From: Kirill Shcherbatov <kshcherbatov@tarantool.org>
To: tarantool-patches@freelists.org
Cc: v.shpilevoy@tarantool.org,
Kirill Shcherbatov <kshcherbatov@tarantool.org>
Subject: [tarantool-patches] [PATCH v2 3/5] box: introduce path field in key_part
Date: Wed, 15 Aug 2018 15:15:01 +0300 [thread overview]
Message-ID: <776c2c4fe710320a4fe117fc96683efbd30b5c74.1534332920.git.kshcherbatov@tarantool.org> (raw)
In-Reply-To: <cover.1534332920.git.kshcherbatov@tarantool.org>
In-Reply-To: <cover.1534332920.git.kshcherbatov@tarantool.org>
As we need to store user-defined JSON path in key_part
and key_part_def, we have introduced path and path_len
fields. JSON path is verified and transformed to canonical
form on index msgpack unpack.
Path string stored as a part of the key_def allocation:
+-------+---------+-------+---------+-------+-------+-------+
|key_def|key_part1| ... |key_partN| path1 | pathK | pathN |
+-------+---------+-------+---------+-------+-------+-------+
| ^
|-> path _________________|
Because of field names specified as format could be changed
key_part path persisted in Tarantool should be always started
with first-level field access via array index(not by name).
Part of #1012.
---
src/box/index_def.c | 10 +-
src/box/key_def.c | 276 ++++++++++++++++++++++++++++++++++++++-----
src/box/key_def.h | 21 +++-
src/box/lua/space.cc | 5 +
src/box/memtx_engine.c | 5 +
src/box/schema.cc | 12 +-
src/box/tuple_compare.cc | 2 +
| 1 +
src/box/tuple_hash.cc | 1 +
src/box/vinyl.c | 5 +
src/box/vy_log.c | 3 +-
11 files changed, 300 insertions(+), 41 deletions(-)
diff --git a/src/box/index_def.c b/src/box/index_def.c
index 9cda63c..f67b952 100644
--- a/src/box/index_def.c
+++ b/src/box/index_def.c
@@ -209,8 +209,14 @@ index_def_is_valid(struct index_def *index_def, const char *space_name)
* Courtesy to a user who could have made
* a typo.
*/
- if (index_def->key_def->parts[i].fieldno ==
- index_def->key_def->parts[j].fieldno) {
+ struct key_part *part_a = &index_def->key_def->parts[i];
+ struct key_part *part_b = &index_def->key_def->parts[j];
+ if ((part_a->fieldno == part_b->fieldno &&
+ part_a->path == NULL && part_b->path == NULL) ||
+ (part_a->path_len != 0 &&
+ part_a->path_len == part_b->path_len &&
+ memcmp(part_a->path, part_b->path,
+ part_a->path_len) == 0)) {
diag_set(ClientError, ER_MODIFY_INDEX,
index_def->name, space_name,
"same key part is indexed twice");
diff --git a/src/box/key_def.c b/src/box/key_def.c
index 8a4262b..b00e46d 100644
--- a/src/box/key_def.c
+++ b/src/box/key_def.c
@@ -35,12 +35,15 @@
#include "column_mask.h"
#include "schema_def.h"
#include "coll_id_cache.h"
+#include "fiber.h"
+#include "json/path.h"
static const struct key_part_def key_part_def_default = {
0,
field_type_MAX,
COLL_NONE,
false,
+ NULL
};
static int64_t
@@ -53,6 +56,7 @@ part_type_by_name_wrapper(const char *str, uint32_t len)
#define PART_OPT_FIELD "field"
#define PART_OPT_COLLATION "collation"
#define PART_OPT_NULLABILITY "is_nullable"
+#define PART_OPT_PATH "path"
const struct opt_def part_def_reg[] = {
OPT_DEF_ENUM(PART_OPT_TYPE, field_type, struct key_part_def, type,
@@ -61,6 +65,7 @@ const struct opt_def part_def_reg[] = {
OPT_DEF(PART_OPT_COLLATION, OPT_UINT32, struct key_part_def, coll_id),
OPT_DEF(PART_OPT_NULLABILITY, OPT_BOOL, struct key_part_def,
is_nullable),
+ OPT_DEF(PART_OPT_PATH, OPT_STRPTR, struct key_part_def, path),
OPT_END,
};
@@ -97,12 +102,22 @@ struct key_def *
key_def_dup(const struct key_def *src)
{
size_t sz = key_def_sizeof(src->part_count);
+ const struct key_part *parts = src->parts;
+ const struct key_part *parts_end = parts + src->part_count;
+ for (; parts < parts_end; parts++)
+ sz += parts->path != NULL ? parts->path_len + 1 : 0;
struct key_def *res = (struct key_def *)malloc(sz);
if (res == NULL) {
diag_set(OutOfMemory, sz, "malloc", "res");
return NULL;
}
memcpy(res, src, sz);
+ for (uint32_t i = 0; i < src->part_count; i++) {
+ if (src->parts[i].path == NULL)
+ continue;
+ size_t path_offset = src->parts[i].path - (char *)src;
+ res->parts[i].path = (char *)res + path_offset;
+ }
return res;
}
@@ -110,8 +125,17 @@ void
key_def_swap(struct key_def *old_def, struct key_def *new_def)
{
assert(old_def->part_count == new_def->part_count);
- for (uint32_t i = 0; i < new_def->part_count; i++)
- SWAP(old_def->parts[i], new_def->parts[i]);
+ for (uint32_t i = 0; i < new_def->part_count; i++) {
+ if (old_def->parts[i].path == NULL) {
+ SWAP(old_def->parts[i], new_def->parts[i]);
+ } else {
+ size_t path_offset =
+ old_def->parts[i].path - (char *)old_def;
+ SWAP(old_def->parts[i], new_def->parts[i]);
+ old_def->parts[i].path = (char *)old_def + path_offset;
+ new_def->parts[i].path = (char *)new_def + path_offset;
+ }
+ }
SWAP(*old_def, *new_def);
}
@@ -131,9 +155,9 @@ key_def_set_cmp(struct key_def *def)
}
struct key_def *
-key_def_new(uint32_t part_count)
+key_def_new(uint32_t part_count, size_t extra_size)
{
- size_t sz = key_def_sizeof(part_count);
+ size_t sz = key_def_sizeof(part_count) + extra_size;
/** Use calloc() to zero comparator function pointers. */
struct key_def *key_def = (struct key_def *) calloc(1, sz);
if (key_def == NULL) {
@@ -148,10 +172,13 @@ key_def_new(uint32_t part_count)
struct key_def *
key_def_new_with_parts(struct key_part_def *parts, uint32_t part_count)
{
- struct key_def *def = key_def_new(part_count);
+ size_t sz = 0;
+ for (uint32_t i = 0; i < part_count; i++)
+ sz += parts[i].path != NULL ? strlen(parts[i].path) + 1 : 0;
+ struct key_def *def = key_def_new(part_count, sz);
if (def == NULL)
return NULL;
-
+ char *data = (char *)def + key_def_sizeof(part_count);
for (uint32_t i = 0; i < part_count; i++) {
struct key_part_def *part = &parts[i];
struct coll *coll = NULL;
@@ -165,14 +192,22 @@ key_def_new_with_parts(struct key_part_def *parts, uint32_t part_count)
}
coll = coll_id->coll;
}
+ uint32_t path_len = 0;
+ if (part->path != NULL) {
+ path_len = strlen(part->path);
+ def->parts[i].path = data;
+ data += path_len + 1;
+ }
key_def_set_part(def, i, part->fieldno, part->type,
- part->is_nullable, coll, part->coll_id);
+ part->is_nullable, coll, part->coll_id,
+ part->path, path_len);
}
return def;
}
-void
-key_def_dump_parts(const struct key_def *def, struct key_part_def *parts)
+int
+key_def_dump_parts(struct region *pool, const struct key_def *def,
+ struct key_part_def *parts)
{
for (uint32_t i = 0; i < def->part_count; i++) {
const struct key_part *part = &def->parts[i];
@@ -181,13 +216,26 @@ key_def_dump_parts(const struct key_def *def, struct key_part_def *parts)
part_def->type = part->type;
part_def->is_nullable = part->is_nullable;
part_def->coll_id = part->coll_id;
+ if (part->path != NULL) {
+ part_def->path = region_alloc(pool, part->path_len + 1);
+ if (part_def->path == NULL) {
+ diag_set(OutOfMemory, part->path_len + 1,
+ "region_alloc", "part_def->path");
+ return -1;
+ }
+ memcpy(part_def->path, part->path, part->path_len);
+ part_def->path[part->path_len] = '\0';
+ } else {
+ part_def->path = NULL;
+ }
}
+ return 0;
}
box_key_def_t *
box_key_def_new(uint32_t *fields, uint32_t *types, uint32_t part_count)
{
- struct key_def *key_def = key_def_new(part_count);
+ struct key_def *key_def = key_def_new(part_count, 0);
if (key_def == NULL)
return key_def;
@@ -195,7 +243,7 @@ box_key_def_new(uint32_t *fields, uint32_t *types, uint32_t part_count)
key_def_set_part(key_def, item, fields[item],
(enum field_type)types[item],
key_part_def_default.is_nullable, NULL,
- COLL_NONE);
+ COLL_NONE, NULL, 0);
}
return key_def;
}
@@ -241,6 +289,11 @@ key_part_cmp(const struct key_part *parts1, uint32_t part_count1,
if (part1->is_nullable != part2->is_nullable)
return part1->is_nullable <
part2->is_nullable ? -1 : 1;
+ /* Lexicographic strings order. */
+ uint32_t len = MIN(part1->path_len, part2->path_len);
+ int rc = 0;
+ if ((rc = memcmp(part1->path, part2->path, len)) != 0)
+ return rc;
}
return part_count1 < part_count2 ? -1 : part_count1 > part_count2;
}
@@ -248,11 +301,12 @@ key_part_cmp(const struct key_part *parts1, uint32_t part_count1,
void
key_def_set_part(struct key_def *def, uint32_t part_no, uint32_t fieldno,
enum field_type type, bool is_nullable, struct coll *coll,
- uint32_t coll_id)
+ uint32_t coll_id, char *path, uint32_t path_len)
{
assert(part_no < def->part_count);
assert(type < field_type_MAX);
def->is_nullable |= is_nullable;
+ def->has_json_paths |= (path != NULL);
def->parts[part_no].is_nullable = is_nullable;
def->parts[part_no].fieldno = fieldno;
def->parts[part_no].type = type;
@@ -260,6 +314,15 @@ key_def_set_part(struct key_def *def, uint32_t part_no, uint32_t fieldno,
def->parts[part_no].coll_id = coll_id;
def->parts[part_no].slot_cache = TUPLE_OFFSET_SLOT_NIL;
def->parts[part_no].format_epoch = 0;
+ if (path != NULL) {
+ def->parts[part_no].path_len = path_len;
+ assert(def->parts[part_no].path != NULL);
+ memcpy(def->parts[part_no].path, path, path_len);
+ def->parts[part_no].path[path_len] = '\0';
+ } else {
+ def->parts[part_no].path_len = 0;
+ def->parts[part_no].path = NULL;
+ }
column_mask_set_fieldno(&def->column_mask, fieldno);
/**
* When all parts are set, initialize the tuple
@@ -304,8 +367,15 @@ key_def_snprint_parts(char *buf, int size, const struct key_part_def *parts,
for (uint32_t i = 0; i < part_count; i++) {
const struct key_part_def *part = &parts[i];
assert(part->type < field_type_MAX);
- SNPRINT(total, snprintf, buf, size, "%d, '%s'",
- (int)part->fieldno, field_type_strs[part->type]);
+ if (part->path != NULL) {
+ SNPRINT(total, snprintf, buf, size, "%d, '%s', '%s'",
+ (int) part->fieldno, part->path,
+ field_type_strs[part->type]);
+ } else {
+ SNPRINT(total, snprintf, buf, size, "%d, '%s'",
+ (int) part->fieldno,
+ field_type_strs[part->type]);
+ }
if (i < part_count - 1)
SNPRINT(total, snprintf, buf, size, ", ");
}
@@ -324,6 +394,8 @@ key_def_sizeof_parts(const struct key_part_def *parts, uint32_t part_count)
count++;
if (part->is_nullable)
count++;
+ if (part->path != NULL)
+ count++;
size += mp_sizeof_map(count);
size += mp_sizeof_str(strlen(PART_OPT_FIELD));
size += mp_sizeof_uint(part->fieldno);
@@ -338,6 +410,10 @@ key_def_sizeof_parts(const struct key_part_def *parts, uint32_t part_count)
size += mp_sizeof_str(strlen(PART_OPT_NULLABILITY));
size += mp_sizeof_bool(part->is_nullable);
}
+ if (part->path != NULL) {
+ size += mp_sizeof_str(strlen(PART_OPT_PATH));
+ size += mp_sizeof_str(strlen(part->path));
+ }
}
return size;
}
@@ -351,6 +427,8 @@ key_def_encode_parts(char *data, const struct key_part_def *parts,
int count = 2;
if (part->coll_id != COLL_NONE)
count++;
+ if (part->path != NULL)
+ count++;
if (part->is_nullable)
count++;
data = mp_encode_map(data, count);
@@ -372,6 +450,12 @@ key_def_encode_parts(char *data, const struct key_part_def *parts,
strlen(PART_OPT_NULLABILITY));
data = mp_encode_bool(data, part->is_nullable);
}
+ if (part->path != NULL) {
+ data = mp_encode_str(data, PART_OPT_PATH,
+ strlen(PART_OPT_PATH));
+ data = mp_encode_str(data, part->path,
+ strlen(part->path));
+ }
}
return data;
}
@@ -432,10 +516,111 @@ key_def_decode_parts_166(struct key_part_def *parts, uint32_t part_count,
fields[part->fieldno].is_nullable :
key_part_def_default.is_nullable);
part->coll_id = COLL_NONE;
+ part->path = NULL;
}
return 0;
}
+/**
+ * Verify key_part JSON path and convert to canonical form.
+ *
+ * @param region to make allocations.
+ * @param part with path to update.
+ * @param path_extra alloated space to reuse if possible.
+ * @param path_extra_size @path_extra size.
+ *
+ * @retval -1 on error.
+ * @retval 0 on success.
+ */
+static int
+key_def_normalize_json_path(struct region *region, struct key_part_def *part,
+ char **path_extra, uint32_t *path_extra_size)
+{
+ const char *err_msg = NULL;
+
+ uint32_t allocated_size = *path_extra_size;
+ char *path = *path_extra;
+
+ uint32_t path_len = strlen(part->path);
+ struct json_path_parser parser;
+ struct json_path_node node;
+ json_path_parser_create(&parser, part->path, path_len);
+ /*
+ * A worst-case scenario is .a -> ["a"]
+ * i.e. 3*path_len + 1 is enough.
+ */
+ uint32_t new_path_size = 3 * path_len + 1;
+ if (new_path_size >= allocated_size) {
+ path = region_alloc(region, new_path_size);
+ if (path == NULL) {
+ diag_set(OutOfMemory, new_path_size,
+ "region_alloc", "path");
+ return -1;
+ }
+ allocated_size = new_path_size;
+ }
+ assert(path != NULL);
+ part->path = path;
+ int rc = json_path_next(&parser, &node);
+ if (rc != 0)
+ goto error_invalid_json;
+ if (node.type != JSON_PATH_NUM) {
+ diag_set(ClientError, ER_WRONG_INDEX_OPTIONS,
+ part->fieldno,
+ "invalid JSON path: first part should "
+ "be defined as array index");
+ return -1;
+ }
+ if (node.num - TUPLE_INDEX_BASE != part->fieldno) {
+ diag_set(ClientError, ER_WRONG_INDEX_OPTIONS,
+ part->fieldno,
+ "invalid JSON path: first part refers "
+ "to invalid field");
+ return -1;
+ }
+ uint32_t lexemes = 0;
+ do {
+ if (node.type == JSON_PATH_NUM) {
+ path += sprintf(path, "[%u]", (uint32_t) node.num);
+ } else if (node.type == JSON_PATH_STR) {
+ path += sprintf(path, "[\"%.*s\"]", node.len, node.str);
+ } else {
+ unreachable();
+ }
+ lexemes++;
+ } while ((rc = json_path_next(&parser, &node)) == 0 &&
+ node.type != JSON_PATH_END);
+ if (rc != 0 || node.type != JSON_PATH_END)
+ goto error_invalid_json;
+ if (lexemes == 1) {
+ /* JSON index is useless. */
+ path = part->path;
+ part->path = NULL;
+ } else {
+ /* Skip terminating zero. */
+ path++;
+ /* Account constructed string size. */
+ allocated_size -= path - part->path;
+ }
+ /* Going to try to reuse extra allocation next time. */
+ if ((uint32_t)parser.src_len > path_len) {
+ *path_extra = path;
+ *path_extra_size = allocated_size;
+ } else {
+ *path_extra = (char *)parser.src;
+ *path_extra_size = parser.src_len;
+ }
+ return 0;
+
+error_invalid_json:
+ err_msg = tt_sprintf("invalid JSON path '%.*s': path has invalid "
+ "structure (error at position %d)", parser.src_len,
+ parser.src, parser.symbol_count);
+ diag_set(ClientError, ER_WRONG_INDEX_OPTIONS,
+ part->fieldno + TUPLE_INDEX_BASE, err_msg);
+ return -1;
+}
+
int
key_def_decode_parts(struct key_part_def *parts, uint32_t part_count,
const char **data, const struct field_def *fields,
@@ -445,8 +630,11 @@ key_def_decode_parts(struct key_part_def *parts, uint32_t part_count,
return key_def_decode_parts_166(parts, part_count, data,
fields, field_count);
}
- for (uint32_t i = 0; i < part_count; i++) {
- struct key_part_def *part = &parts[i];
+ char *path = NULL;
+ uint32_t allocated_size = 0;
+ struct key_part_def *part = parts;
+ struct region *region = &fiber()->gc;
+ for (uint32_t i = 0; i < part_count; i++, part++) {
if (mp_typeof(**data) != MP_MAP) {
diag_set(ClientError, ER_WRONG_INDEX_OPTIONS,
i + TUPLE_INDEX_BASE,
@@ -456,7 +644,7 @@ key_def_decode_parts(struct key_part_def *parts, uint32_t part_count,
*part = key_part_def_default;
if (opts_decode(part, part_def_reg, data,
ER_WRONG_INDEX_OPTIONS, i + TUPLE_INDEX_BASE,
- NULL) != 0)
+ region) != 0)
return -1;
if (part->type == field_type_MAX) {
diag_set(ClientError, ER_WRONG_INDEX_OPTIONS,
@@ -473,6 +661,10 @@ key_def_decode_parts(struct key_part_def *parts, uint32_t part_count,
"string and scalar parts");
return -1;
}
+ if (part->path != NULL &&
+ key_def_normalize_json_path(region, part, &path,
+ &allocated_size) != 0)
+ return -1;
}
return 0;
}
@@ -497,6 +689,7 @@ key_def_decode_parts_160(struct key_part_def *parts, uint32_t part_count,
fields[part->fieldno].is_nullable :
key_part_def_default.is_nullable);
part->coll_id = COLL_NONE;
+ part->path = NULL;
}
return 0;
}
@@ -533,18 +726,29 @@ key_def_merge(const struct key_def *first, const struct key_def *second)
* Find and remove part duplicates, i.e. parts counted
* twice since they are present in both key defs.
*/
- const struct key_part *part = second->parts;
- const struct key_part *end = part + second->part_count;
+ size_t sz = 0;
+ const struct key_part *part = first->parts;
+ const struct key_part *end = part + first->part_count;
+ for (; part != end; part++) {
+ if (part->path != NULL)
+ sz += part->path_len + 1;
+ }
+ part = second->parts;
+ end = part + second->part_count;
for (; part != end; part++) {
- if (key_def_find(first, part->fieldno))
+ const struct key_part *duplicate =
+ key_def_find(first, part->fieldno);
+ if (duplicate != NULL &&
+ part->path_len == duplicate->path_len &&
+ memcmp(part->path, duplicate->path, part->path_len) == 0)
--new_part_count;
+ else if (part->path != NULL)
+ sz += part->path_len + 1;
}
-
- struct key_def *new_def;
- new_def = (struct key_def *)calloc(1, key_def_sizeof(new_part_count));
+ sz += key_def_sizeof(new_part_count);
+ struct key_def *new_def = (struct key_def *)calloc(1, sz);
if (new_def == NULL) {
- diag_set(OutOfMemory, key_def_sizeof(new_part_count), "malloc",
- "new_def");
+ diag_set(OutOfMemory, sz, "calloc", "new_def");
return NULL;
}
new_def->part_count = new_part_count;
@@ -552,24 +756,40 @@ key_def_merge(const struct key_def *first, const struct key_def *second)
new_def->is_nullable = first->is_nullable || second->is_nullable;
new_def->has_optional_parts = first->has_optional_parts ||
second->has_optional_parts;
+ /* Path data write position in the new key_def. */
+ char *data = (char *)new_def + key_def_sizeof(new_part_count);
/* Write position in the new key def. */
uint32_t pos = 0;
/* Append first key def's parts to the new index_def. */
part = first->parts;
end = part + first->part_count;
for (; part != end; part++) {
+ if (part->path != NULL) {
+ new_def->parts[pos].path = data;
+ data += part->path_len + 1;
+ }
key_def_set_part(new_def, pos++, part->fieldno, part->type,
- part->is_nullable, part->coll, part->coll_id);
+ part->is_nullable, part->coll, part->coll_id,
+ part->path, part->path_len);
}
/* Set-append second key def's part to the new key def. */
part = second->parts;
end = part + second->part_count;
for (; part != end; part++) {
- if (key_def_find(first, part->fieldno))
+ const struct key_part *duplicate =
+ key_def_find(first, part->fieldno);
+ if (duplicate != NULL &&
+ part->path_len == duplicate->path_len &&
+ memcmp(part->path, duplicate->path, part->path_len) == 0)
continue;
+ if (part->path != NULL) {
+ new_def->parts[pos].path = data;
+ data += part->path_len + 1;
+ }
key_def_set_part(new_def, pos++, part->fieldno, part->type,
- part->is_nullable, part->coll, part->coll_id);
+ part->is_nullable, part->coll, part->coll_id,
+ part->path, part->path_len);
}
return new_def;
}
diff --git a/src/box/key_def.h b/src/box/key_def.h
index 42c054c..b6d6259 100644
--- a/src/box/key_def.h
+++ b/src/box/key_def.h
@@ -54,6 +54,8 @@ struct key_part_def {
uint32_t coll_id;
/** True if a key part can store NULLs. */
bool is_nullable;
+ /** JSON path to data. */
+ char *path;
};
/**
@@ -78,6 +80,10 @@ struct key_part {
uint64_t format_epoch;
/** Cache for corresponding tuple_format slot_offset. */
int32_t slot_cache;
+ /** JSON path to data. */
+ char *path;
+ /** JSON path length. */
+ uint32_t path_len;
};
struct key_def;
@@ -137,6 +143,10 @@ struct key_def {
* fields assumed to be MP_NIL.
*/
bool has_optional_parts;
+ /**
+ * True, if some key part contain JSON path.
+ */
+ bool has_json_paths;
/** Key fields mask. @sa column_mask.h for details. */
uint64_t column_mask;
/** The size of the 'parts' array. */
@@ -234,7 +244,7 @@ key_def_sizeof(uint32_t part_count)
* Allocate a new key_def with the given part count.
*/
struct key_def *
-key_def_new(uint32_t part_count);
+key_def_new(uint32_t part_count, size_t extra_size);
/**
* Allocate a new key_def with the given part count
@@ -246,8 +256,9 @@ key_def_new_with_parts(struct key_part_def *parts, uint32_t part_count);
/**
* Dump part definitions of the given key def.
*/
-void
-key_def_dump_parts(const struct key_def *def, struct key_part_def *parts);
+int
+key_def_dump_parts(struct region *pool, const struct key_def *def,
+ struct key_part_def *parts);
/**
* Set a single key part in a key def.
@@ -256,7 +267,7 @@ key_def_dump_parts(const struct key_def *def, struct key_part_def *parts);
void
key_def_set_part(struct key_def *def, uint32_t part_no, uint32_t fieldno,
enum field_type type, bool is_nullable, struct coll *coll,
- uint32_t coll_id);
+ uint32_t coll_id, char *path, uint32_t path_len);
/**
* Update 'has_optional_parts' of @a key_def with correspondence
@@ -370,6 +381,8 @@ key_validate_parts(const struct key_def *key_def, const char *key,
static inline bool
key_def_is_sequential(const struct key_def *key_def)
{
+ if (key_def->has_json_paths)
+ return false;
for (uint32_t part_id = 0; part_id < key_def->part_count; part_id++) {
if (key_def->parts[part_id].fieldno != part_id)
return false;
diff --git a/src/box/lua/space.cc b/src/box/lua/space.cc
index 580e0ea..98bb969 100644
--- a/src/box/lua/space.cc
+++ b/src/box/lua/space.cc
@@ -295,6 +295,11 @@ lbox_fillspace(struct lua_State *L, struct space *space, int i)
lua_pushnumber(L, part->fieldno + TUPLE_INDEX_BASE);
lua_setfield(L, -2, "fieldno");
+ if (part->path != NULL) {
+ lua_pushstring(L, part->path);
+ lua_setfield(L, -2, "path");
+ }
+
lua_pushboolean(L, part->is_nullable);
lua_setfield(L, -2, "is_nullable");
diff --git a/src/box/memtx_engine.c b/src/box/memtx_engine.c
index f5ace92..827ad01 100644
--- a/src/box/memtx_engine.c
+++ b/src/box/memtx_engine.c
@@ -1320,6 +1320,11 @@ memtx_index_def_change_requires_rebuild(struct index *index,
return true;
if (old_part->coll != new_part->coll)
return true;
+ if (old_part->path_len != new_part->path_len)
+ return true;
+ if (memcmp(old_part->path, new_part->path,
+ old_part->path_len) != 0)
+ return true;
}
return false;
}
diff --git a/src/box/schema.cc b/src/box/schema.cc
index 433f52c..94c72ff 100644
--- a/src/box/schema.cc
+++ b/src/box/schema.cc
@@ -285,19 +285,19 @@ schema_init()
* (and re-created) first.
*/
/* _schema - key/value space with schema description */
- struct key_def *key_def = key_def_new(1); /* part count */
+ struct key_def *key_def = key_def_new(1, 0);
if (key_def == NULL)
diag_raise();
auto key_def_guard = make_scoped_guard([&] { key_def_delete(key_def); });
key_def_set_part(key_def, 0 /* part no */, 0 /* field no */,
- FIELD_TYPE_STRING, false, NULL, COLL_NONE);
+ FIELD_TYPE_STRING, false, NULL, COLL_NONE, NULL, 0);
sc_space_new(BOX_SCHEMA_ID, "_schema", key_def, &on_replace_schema,
NULL);
/* _space - home for all spaces. */
key_def_set_part(key_def, 0 /* part no */, 0 /* field no */,
- FIELD_TYPE_UNSIGNED, false, NULL, COLL_NONE);
+ FIELD_TYPE_UNSIGNED, false, NULL, COLL_NONE, NULL, 0);
/* _collation - collation description. */
sc_space_new(BOX_COLLATION_ID, "_collation", key_def,
@@ -340,15 +340,15 @@ schema_init()
NULL);
key_def_delete(key_def);
- key_def = key_def_new(2); /* part count */
+ key_def = key_def_new(2, 0);
if (key_def == NULL)
diag_raise();
/* space no */
key_def_set_part(key_def, 0 /* part no */, 0 /* field no */,
- FIELD_TYPE_UNSIGNED, false, NULL, COLL_NONE);
+ FIELD_TYPE_UNSIGNED, false, NULL, COLL_NONE, NULL, 0);
/* index no */
key_def_set_part(key_def, 1 /* part no */, 1 /* field no */,
- FIELD_TYPE_UNSIGNED, false, NULL, COLL_NONE);
+ FIELD_TYPE_UNSIGNED, false, NULL, COLL_NONE, NULL, 0);
sc_space_new(BOX_INDEX_ID, "_index", key_def,
&alter_space_on_replace_index, &on_stmt_begin_index);
}
diff --git a/src/box/tuple_compare.cc b/src/box/tuple_compare.cc
index f07b695..923e71c 100644
--- a/src/box/tuple_compare.cc
+++ b/src/box/tuple_compare.cc
@@ -462,6 +462,7 @@ static inline int
tuple_compare_slowpath(const struct tuple *tuple_a, const struct tuple *tuple_b,
const struct key_def *key_def)
{
+ assert(!is_flat == key_def->has_json_paths);
assert(!has_optional_parts || is_nullable);
assert(is_nullable == key_def->is_nullable);
assert(has_optional_parts == key_def->has_optional_parts);
@@ -601,6 +602,7 @@ tuple_compare_with_key_slowpath(const struct tuple *tuple, const char *key,
uint32_t part_count,
const struct key_def *key_def)
{
+ assert(!is_flat == key_def->has_json_paths);
assert(!has_optional_parts || is_nullable);
assert(is_nullable == key_def->is_nullable);
assert(has_optional_parts == key_def->has_optional_parts);
--git a/src/box/tuple_extract_key.cc b/src/box/tuple_extract_key.cc
index d95ee8d..0301186 100644
--- a/src/box/tuple_extract_key.cc
+++ b/src/box/tuple_extract_key.cc
@@ -96,6 +96,7 @@ static char *
tuple_extract_key_slowpath(const struct tuple *tuple,
const struct key_def *key_def, uint32_t *key_size)
{
+ assert(!is_flat == key_def->has_json_paths);
assert(!has_optional_parts || key_def->is_nullable);
assert(has_optional_parts == key_def->has_optional_parts);
assert(contains_sequential_parts ==
diff --git a/src/box/tuple_hash.cc b/src/box/tuple_hash.cc
index 272e814..ae0bb8e 100644
--- a/src/box/tuple_hash.cc
+++ b/src/box/tuple_hash.cc
@@ -333,6 +333,7 @@ template <bool has_optional_parts, bool is_flat>
uint32_t
tuple_hash_slowpath(const struct tuple *tuple, const struct key_def *key_def)
{
+ assert(!is_flat == key_def->has_json_paths);
assert(has_optional_parts == key_def->has_optional_parts);
uint32_t h = HASH_SEED;
uint32_t carry = 0;
diff --git a/src/box/vinyl.c b/src/box/vinyl.c
index 7f77963..23abc6b 100644
--- a/src/box/vinyl.c
+++ b/src/box/vinyl.c
@@ -998,6 +998,11 @@ vinyl_index_def_change_requires_rebuild(struct index *index,
return true;
if (!field_type1_contains_type2(new_part->type, old_part->type))
return true;
+ if (old_part->path_len != new_part->path_len)
+ return true;
+ if (memcmp(old_part->path, new_part->path,
+ old_part->path_len) != 0)
+ return true;
}
return false;
}
diff --git a/src/box/vy_log.c b/src/box/vy_log.c
index 3843cad..b1c6659 100644
--- a/src/box/vy_log.c
+++ b/src/box/vy_log.c
@@ -711,7 +711,8 @@ vy_log_record_dup(struct region *pool, const struct vy_log_record *src)
"struct key_part_def");
goto err;
}
- key_def_dump_parts(src->key_def, dst->key_parts);
+ if (key_def_dump_parts(pool, src->key_def, dst->key_parts) != 0)
+ goto err;
dst->key_part_count = src->key_def->part_count;
dst->key_def = NULL;
}
--
2.7.4
next prev parent reply other threads:[~2018-08-15 12:15 UTC|newest]
Thread overview: 16+ messages / expand[flat|nested] mbox.gz Atom feed top
2018-08-15 12:14 [tarantool-patches] [PATCH v2 0/5] box: indexes by JSON path Kirill Shcherbatov
2018-08-15 12:14 ` [tarantool-patches] [PATCH v2 1/5] rfc: describe a Tarantool JSON indexes Kirill Shcherbatov
2018-08-15 12:15 ` [tarantool-patches] [PATCH v2 2/5] box: introduce slot_cache in key_part Kirill Shcherbatov
2018-08-22 0:27 ` [tarantool-patches] " Vladislav Shpilevoy
2018-08-27 7:37 ` Kirill Shcherbatov
2018-09-03 10:32 ` Vladislav Shpilevoy
2018-08-15 12:15 ` Kirill Shcherbatov [this message]
2018-08-22 0:26 ` [tarantool-patches] Re: [PATCH v2 3/5] box: introduce path field " Vladislav Shpilevoy
2018-08-27 7:37 ` Kirill Shcherbatov
2018-08-15 12:15 ` [tarantool-patches] [PATCH v2 4/5] box: introduce path_hash and tuple_field tree Kirill Shcherbatov
2018-08-22 0:26 ` [tarantool-patches] " Vladislav Shpilevoy
2018-08-27 7:37 ` Kirill Shcherbatov
2018-08-15 12:15 ` [tarantool-patches] [PATCH v2 5/5] box: specify indexes in user-friendly form Kirill Shcherbatov
2018-08-22 0:26 ` [tarantool-patches] " Vladislav Shpilevoy
2018-08-27 7:37 ` Kirill Shcherbatov
2018-08-22 0:28 ` Vladislav Shpilevoy
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=776c2c4fe710320a4fe117fc96683efbd30b5c74.1534332920.git.kshcherbatov@tarantool.org \
--to=kshcherbatov@tarantool.org \
--cc=tarantool-patches@freelists.org \
--cc=v.shpilevoy@tarantool.org \
--subject='Re: [tarantool-patches] [PATCH v2 3/5] box: introduce path field in key_part' \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox