From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from localhost (localhost [127.0.0.1]) by turing.freelists.org (Avenir Technologies Mail Multiplex) with ESMTP id 4EFF320E9B for ; Wed, 15 Aug 2018 08:15:08 -0400 (EDT) Received: from turing.freelists.org ([127.0.0.1]) by localhost (turing.freelists.org [127.0.0.1]) (amavisd-new, port 10024) with ESMTP id EnSgCtQd9B5i for ; Wed, 15 Aug 2018 08:15:08 -0400 (EDT) Received: from smtp53.i.mail.ru (smtp53.i.mail.ru [94.100.177.113]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by turing.freelists.org (Avenir Technologies Mail Multiplex) with ESMTPS id 62EFB20967 for ; Wed, 15 Aug 2018 08:15:07 -0400 (EDT) From: Kirill Shcherbatov Subject: [tarantool-patches] [PATCH v2 4/5] box: introduce path_hash and tuple_field tree Date: Wed, 15 Aug 2018 15:15:02 +0300 Message-Id: In-Reply-To: References: In-Reply-To: References: Sender: tarantool-patches-bounce@freelists.org Errors-to: tarantool-patches-bounce@freelists.org Reply-To: tarantool-patches@freelists.org List-help: List-unsubscribe: List-software: Ecartis version 1.0.0 List-Id: tarantool-patches List-subscribe: List-owner: List-post: List-archive: To: tarantool-patches@freelists.org Cc: v.shpilevoy@tarantool.org, Kirill Shcherbatov To work with JSON-defined indexes we introduce format JSON path hashtable data_path and a tree of intermediate path fields attached to format root fields. : format->data_path [2].FIO.fname -> field "fname" {type=str, off_slot=-1} [2].FIO.sname -> field "sname" {type=str, off_slot=-2} : format->field[2] {type = map} | FIO {type = map} | "fname" | "sname" {type=str,off_slot=-1} ____|____ {type = str,off_slot=-2} Leaf fields used in Index have initialized offset_slot. On new tuple creation we observe fields tree and use leaf records to init tuple field_map. At the same time we use data_path hashtable on tuple data access by index(when cached offset_slot is invalid). All paths stored at the end of format allocation: JSON-tree fields same as format->path_hash point to them. +------------+------------+-------+------------+-------+ |tuple_format|tuple_field1| ... |tuple_fieldN| pathK | +------------+------------+-------+------------+-------+ New routine tuple_format_add_json_path is used to construct all internal structures for JSON path on new format creation and duplicating. Part of #1012. --- src/box/errcode.h | 2 +- src/box/key_def.c | 3 + src/box/key_def.h | 2 + src/box/tuple.c | 25 +- src/box/tuple_compare.cc | 41 ++- src/box/tuple_extract_key.cc | 137 +++++++++-- src/box/tuple_format.c | 575 +++++++++++++++++++++++++++++++++++++++---- src/box/tuple_format.h | 84 ++++++- src/box/tuple_hash.cc | 17 +- src/box/vy_lsm.c | 43 ++++ src/box/vy_point_lookup.c | 2 - src/box/vy_stmt.c | 124 ++++++++-- test/box/misc.result | 51 ++-- test/engine/tuple.result | 271 ++++++++++++++++++++ test/engine/tuple.test.lua | 80 ++++++ 15 files changed, 1314 insertions(+), 143 deletions(-) diff --git a/src/box/errcode.h b/src/box/errcode.h index 3d5f66a..8cbd59d 100644 --- a/src/box/errcode.h +++ b/src/box/errcode.h @@ -107,7 +107,7 @@ struct errcode_record { /* 52 */_(ER_FUNCTION_EXISTS, "Function '%s' already exists") \ /* 53 */_(ER_BEFORE_REPLACE_RET, "Invalid return value of space:before_replace trigger: expected tuple or nil, got %s") \ /* 54 */_(ER_FUNCTION_MAX, "A limit on the total number of functions has been reached: %u") \ - /* 55 */_(ER_UNUSED4, "") \ + /* 55 */_(ER_DATA_MISMATCH_INDEX_PART, "Tuple doesn't math document structure defined as index") \ /* 56 */_(ER_USER_MAX, "A limit on the total number of users has been reached: %u") \ /* 57 */_(ER_NO_SUCH_ENGINE, "Space engine '%s' does not exist") \ /* 58 */_(ER_RELOAD_CFG, "Can't set option '%s' dynamically") \ diff --git a/src/box/key_def.c b/src/box/key_def.c index b00e46d..69ae6af 100644 --- a/src/box/key_def.c +++ b/src/box/key_def.c @@ -36,6 +36,7 @@ #include "schema_def.h" #include "coll_id_cache.h" #include "fiber.h" +#include "assoc.h" #include "json/path.h" static const struct key_part_def key_part_def_default = { @@ -319,9 +320,11 @@ key_def_set_part(struct key_def *def, uint32_t part_no, uint32_t fieldno, assert(def->parts[part_no].path != NULL); memcpy(def->parts[part_no].path, path, path_len); def->parts[part_no].path[path_len] = '\0'; + def->parts[part_no].path_hash = mh_strn_hash(path, path_len); } else { def->parts[part_no].path_len = 0; def->parts[part_no].path = NULL; + def->parts[part_no].path_hash = 0; } column_mask_set_fieldno(&def->column_mask, fieldno); /** diff --git a/src/box/key_def.h b/src/box/key_def.h index b6d6259..de732ca 100644 --- a/src/box/key_def.h +++ b/src/box/key_def.h @@ -84,6 +84,8 @@ struct key_part { char *path; /** JSON path length. */ uint32_t path_len; + /** JSON path hash. */ + uint32_t path_hash; }; struct key_def; diff --git a/src/box/tuple.c b/src/box/tuple.c index d7dbad3..130acf7 100644 --- a/src/box/tuple.c +++ b/src/box/tuple.c @@ -135,6 +135,18 @@ runtime_tuple_delete(struct tuple_format *format, struct tuple *tuple) smfree(&runtime_alloc, tuple, total); } +static int +tuple_validate_json_path_data(const struct tuple_field *field, uint32_t idx, + const char *tuple, const char *offset, void *ctx) +{ + (void)tuple; + (void)ctx; + if (key_mp_type_validate(field->type, mp_typeof(*offset), + ER_KEY_PART_TYPE, idx, field->is_nullable) != 0) + return -1; + return 0; +} + int tuple_validate_raw(struct tuple_format *format, const char *tuple) { @@ -159,14 +171,23 @@ tuple_validate_raw(struct tuple_format *format, const char *tuple) /* Check field types */ struct tuple_field *field = &format->fields[0]; + const char *pos = tuple; uint32_t i = 0; uint32_t defined_field_count = MIN(field_count, format->field_count); for (; i < defined_field_count; ++i, ++field) { - if (key_mp_type_validate(field->type, mp_typeof(*tuple), + if (key_mp_type_validate(field->type, mp_typeof(*pos), ER_FIELD_TYPE, i + TUPLE_INDEX_BASE, field->is_nullable)) return -1; - mp_next(&tuple); + /* Check all JSON paths. */ + if (field->array != NULL) { + json_field_tree_routine func = + tuple_validate_json_path_data; + if (json_field_tree_exec_routine(field, i, tuple, pos, + func, NULL) != 0) + return -1; + } + mp_next(&pos); } return 0; } diff --git a/src/box/tuple_compare.cc b/src/box/tuple_compare.cc index 923e71c..490b528 100644 --- a/src/box/tuple_compare.cc +++ b/src/box/tuple_compare.cc @@ -469,7 +469,8 @@ tuple_compare_slowpath(const struct tuple *tuple_a, const struct tuple *tuple_b, const struct key_part *part = key_def->parts; const char *tuple_a_raw = tuple_data(tuple_a); const char *tuple_b_raw = tuple_data(tuple_b); - if (key_def->part_count == 1 && part->fieldno == 0) { + if (key_def->part_count == 1 && part->fieldno == 0 && + part->path == NULL) { /* * First field can not be optional - empty tuples * can not exist. @@ -1060,13 +1061,19 @@ tuple_compare_create(const struct key_def *def) else return tuple_compare_sequential; } else if (def->has_optional_parts) { - return tuple_compare_slowpath; + if (def->has_json_paths) + return tuple_compare_slowpath; + else + return tuple_compare_slowpath; } else { - return tuple_compare_slowpath; + if (def->has_json_paths) + return tuple_compare_slowpath; + else + return tuple_compare_slowpath; } } assert(! def->has_optional_parts); - if (!key_def_has_collation(def)) { + if (!key_def_has_collation(def) && !def->has_json_paths) { /* Precalculated comparators don't use collation */ for (uint32_t k = 0; k < sizeof(cmp_arr) / sizeof(cmp_arr[0]); k++) { @@ -1084,6 +1091,8 @@ tuple_compare_create(const struct key_def *def) } if (key_def_is_sequential(def)) return tuple_compare_sequential; + else if (def->has_json_paths) + return tuple_compare_slowpath; else return tuple_compare_slowpath; } @@ -1280,13 +1289,29 @@ tuple_compare_with_key_create(const struct key_def *def) false>; } } else if (def->has_optional_parts) { - return tuple_compare_with_key_slowpath; + if (def->has_json_paths) { + return tuple_compare_with_key_slowpath; + } else { + return tuple_compare_with_key_slowpath; + } } else { - return tuple_compare_with_key_slowpath; + if (def->has_json_paths) { + return tuple_compare_with_key_slowpath; + } else { + return tuple_compare_with_key_slowpath; + } } } assert(! def->has_optional_parts); - if (!key_def_has_collation(def)) { + if (!key_def_has_collation(def) && !def->has_json_paths) { /* Precalculated comparators don't use collation */ for (uint32_t k = 0; k < sizeof(cmp_wk_arr) / sizeof(cmp_wk_arr[0]); @@ -1307,6 +1332,8 @@ tuple_compare_with_key_create(const struct key_def *def) } if (key_def_is_sequential(def)) return tuple_compare_with_key_sequential; + else if (def->has_json_paths) + return tuple_compare_with_key_slowpath; else return tuple_compare_with_key_slowpath; } diff --git a/src/box/tuple_extract_key.cc b/src/box/tuple_extract_key.cc index 0301186..4c0e201 100644 --- a/src/box/tuple_extract_key.cc +++ b/src/box/tuple_extract_key.cc @@ -1,15 +1,25 @@ #include "tuple_extract_key.h" #include "tuple.h" #include "fiber.h" +#include "json/path.h" enum { MSGPACK_NULL = 0xc0 }; +static bool +key_def_parts_are_sequential(const struct key_def *def, int i) +{ + uint32_t fieldno1 = def->parts[i].fieldno + 1; + uint32_t fieldno2 = def->parts[i + 1].fieldno; + return fieldno1 == fieldno2 && def->parts[i].path == NULL && + def->parts[i + 1].path == NULL; +} + /** True, if a key con contain two or more parts in sequence. */ static bool key_def_contains_sequential_parts(const struct key_def *def) { for (uint32_t i = 0; i < def->part_count - 1; ++i) { - if (def->parts[i].fieldno + 1 == def->parts[i + 1].fieldno) + if (key_def_parts_are_sequential(def, i)) return true; } return false; @@ -132,8 +142,7 @@ tuple_extract_key_slowpath(const struct tuple *tuple, * minimize tuple_field_raw() calls. */ for (; i < part_count - 1; i++) { - if (key_def->parts[i].fieldno + 1 != - key_def->parts[i + 1].fieldno) { + if (!key_def_parts_are_sequential(key_def, i)) { /* * End of sequential part. */ @@ -180,8 +189,7 @@ tuple_extract_key_slowpath(const struct tuple *tuple, * minimize tuple_field_raw() calls. */ for (; i < part_count - 1; i++) { - if (key_def->parts[i].fieldno + 1 != - key_def->parts[i + 1].fieldno) { + if (!key_def_parts_are_sequential(key_def, i)) { /* * End of sequential part. */ @@ -214,12 +222,13 @@ tuple_extract_key_slowpath(const struct tuple *tuple, * General-purpose version of tuple_extract_key_raw() * @copydoc tuple_extract_key_raw() */ -template +template static char * tuple_extract_key_slowpath_raw(const char *data, const char *data_end, const struct key_def *key_def, uint32_t *key_size) { + assert(!is_flat == key_def->has_json_paths); assert(!has_optional_parts || key_def->is_nullable); assert(has_optional_parts == key_def->has_optional_parts); assert(mp_sizeof_nil() == 1); @@ -247,11 +256,11 @@ tuple_extract_key_slowpath_raw(const char *data, const char *data_end, uint32_t fieldno = key_def->parts[i].fieldno; uint32_t null_count = 0; for (; i < key_def->part_count - 1; i++) { - if (key_def->parts[i].fieldno + 1 != - key_def->parts[i + 1].fieldno) + if (!key_def_parts_are_sequential(key_def, i)) break; } - uint32_t end_fieldno = key_def->parts[i].fieldno; + const struct key_part *part = &key_def->parts[i]; + uint32_t end_fieldno = part->fieldno; if (fieldno < current_fieldno) { /* Rewind. */ @@ -293,6 +302,38 @@ tuple_extract_key_slowpath_raw(const char *data, const char *data_end, current_fieldno++; } } + const char *field_last, *field_end_last; + if (!is_flat && part->path != NULL) { + field_last = field; + field_end_last = field_end; + struct json_path_parser parser; + struct json_path_node node; + json_path_parser_create(&parser, part->path, + part->path_len); + /* Skip fieldno. */ + int rc = json_path_next(&parser, &node); + assert(rc == 0); + while ((rc = json_path_next(&parser, &node)) == 0 && + node.type != JSON_PATH_END) { + switch(node.type) { + case JSON_PATH_NUM: + rc = tuple_field_go_to_index(&field, + node.num); + break; + case JSON_PATH_STR: + rc = tuple_field_go_to_key(&field, + node.str, + node.len); + break; + default: + unreachable(); + } + assert(rc == 0); + } + assert(rc == 0 && node.type == JSON_PATH_END); + field_end = field; + mp_next(&field_end); + } memcpy(key_buf, field, field_end - field); key_buf += field_end - field; if (has_optional_parts && null_count != 0) { @@ -301,6 +342,10 @@ tuple_extract_key_slowpath_raw(const char *data, const char *data_end, } else { assert(key_buf - key <= data_end - data); } + if (!is_flat && part->path != NULL) { + field = field_last; + field_end = field_end_last; + } } if (key_size != NULL) *key_size = (uint32_t)(key_buf - key); @@ -330,32 +375,74 @@ tuple_extract_key_set(struct key_def *key_def) if (key_def->has_optional_parts) { assert(key_def->is_nullable); if (key_def_contains_sequential_parts(key_def)) { - key_def->tuple_extract_key = - tuple_extract_key_slowpath; + if (key_def->has_json_paths) { + key_def->tuple_extract_key = + tuple_extract_key_slowpath; + } else { + key_def->tuple_extract_key = + tuple_extract_key_slowpath; + } } else { - key_def->tuple_extract_key = - tuple_extract_key_slowpath; + if (key_def->has_json_paths) { + key_def->tuple_extract_key = + tuple_extract_key_slowpath; + } else { + key_def->tuple_extract_key = + tuple_extract_key_slowpath; + } } } else { if (key_def_contains_sequential_parts(key_def)) { - key_def->tuple_extract_key = - tuple_extract_key_slowpath; + if (key_def->has_json_paths) { + key_def->tuple_extract_key = + tuple_extract_key_slowpath; + } else { + key_def->tuple_extract_key = + tuple_extract_key_slowpath; + } } else { - key_def->tuple_extract_key = - tuple_extract_key_slowpath; + if (key_def->has_json_paths) { + key_def->tuple_extract_key = + tuple_extract_key_slowpath; + } else { + key_def->tuple_extract_key = + tuple_extract_key_slowpath; + } } } } if (key_def->has_optional_parts) { assert(key_def->is_nullable); - key_def->tuple_extract_key_raw = - tuple_extract_key_slowpath_raw; + if (key_def->has_json_paths) { + key_def->tuple_extract_key_raw = + tuple_extract_key_slowpath_raw; + } else { + key_def->tuple_extract_key_raw = + tuple_extract_key_slowpath_raw; + } } else { - key_def->tuple_extract_key_raw = - tuple_extract_key_slowpath_raw; + if (key_def->has_json_paths) { + key_def->tuple_extract_key_raw = + tuple_extract_key_slowpath_raw; + } else { + key_def->tuple_extract_key_raw = + tuple_extract_key_slowpath_raw; + } } } diff --git a/src/box/tuple_format.c b/src/box/tuple_format.c index a9fddc0..1821950 100644 --- a/src/box/tuple_format.c +++ b/src/box/tuple_format.c @@ -30,6 +30,7 @@ */ #include "json/path.h" #include "tuple_format.h" +#include "assoc.h" /** Global table of tuple formats */ struct tuple_format **tuple_formats; @@ -38,9 +39,336 @@ static intptr_t recycled_format_ids = FORMAT_ID_NIL; static uint32_t formats_size = 0, formats_capacity = 0; static const struct tuple_field tuple_field_default = { - FIELD_TYPE_ANY, TUPLE_OFFSET_SLOT_NIL, false, false, + FIELD_TYPE_ANY, TUPLE_OFFSET_SLOT_NIL, false, false, {{NULL, 0}} }; +struct mh_strnptr_node_t * +json_path_hash_get(struct mh_strnptr_t *hashtable, const char *path, + uint32_t path_len, uint32_t path_hash) +{ + assert(hashtable != NULL); + struct mh_strnptr_key_t key = {path, path_len, path_hash}; + mh_int_t rc = mh_strnptr_find(hashtable, &key, NULL); + if (rc == mh_end(hashtable)) + return NULL; + return mh_strnptr_node(hashtable, rc); +} + +/** + * Create a new hashtable object. + * @param[out] hashtable pointer to object to create. + * @param records count of records to reserve. + * @retval -1 on error. + * @retval 0 on success. + */ +static int +json_path_hash_create(struct mh_strnptr_t **hashtable, uint32_t records) +{ + struct mh_strnptr_t *ret = mh_strnptr_new(); + if (ret == NULL) { + diag_set(OutOfMemory, sizeof(*hashtable), "mh_strnptr_new", + "hashtable"); + return -1; + } + if (mh_strnptr_reserve(ret, records, NULL) != 0) { + mh_strnptr_delete(ret); + diag_set(OutOfMemory, records, "mh_strnptr_reserve", + "hashtable"); + return -1; + } + *hashtable = ret; + return 0; +} +/** + * Delete @hashtable object. + * @param hashtable pointer to object to delete. + */ +static void +json_path_hash_delete(struct mh_strnptr_t *hashtable) +{ + if (hashtable == NULL) + return; + while (mh_size(hashtable)) { + mh_int_t n = mh_first(hashtable); + mh_strnptr_del(hashtable, n, NULL); + } + mh_strnptr_delete(hashtable); +} + +/** + * Insert a new record to hashtable. + * @param hashtable storage to insert new record. + * @param path string. + * @param path_len length of @path. + * @param tuple_field value to store in @hashtable. + * @retval -1 on error. + * @retval 0 on success. + */ +static int +json_path_hash_insert(struct mh_strnptr_t *hashtable, const char *path, + uint32_t path_len, struct tuple_field *field) +{ + assert(hashtable != NULL); + /* Test if record already present in hash. */ + uint32_t path_hash = mh_strn_hash(path, path_len); + struct mh_strnptr_node_t name_node = + {path, path_len, path_hash, field}; + mh_int_t rc = mh_strnptr_put(hashtable, &name_node, NULL, NULL); + if (rc == mh_end(hashtable)) { + diag_set(OutOfMemory, sizeof(*hashtable), "mh_strnptr_put", + "hashtable"); + return -1; + } + return 0; +} + +/** + * Construct field tree level for JSON path part. + * + * @param[in, out] tuple_field pointer to record to start with + * would be changed to record that math + * @part lexeme. + * @param fieldno number of root space field. + * @param part JSON path lexeme to represent in field tree. + * @retval -1 on error. + * @retval 0 on success. + */ +static int +json_field_tree_append(struct tuple_field **field_subtree, uint32_t fieldno, + struct json_path_node *part) +{ + enum field_type type; + struct tuple_field *field = *field_subtree; + switch (part->type) { + case JSON_PATH_NUM: { + type = FIELD_TYPE_ARRAY; + if (field->type != FIELD_TYPE_ANY && field->type != type) + goto error_type_mistmatch; + field->type = type; + /* Create or resize field array if required. */ + if (field->array == NULL || part->num >= field->array_size) { + struct tuple_field **array = + realloc(field->array, + part->num * sizeof(struct tuple_field *)); + if (array == NULL) { + diag_set(OutOfMemory, + sizeof(struct tuple_field *), "realloc", + "array"); + return -1; + } + if (field->array == NULL) { + memset(array, 0, part->num * + sizeof(struct tuple_field *)); + } else { + memset(&array[field->array_size], 0, + (part->num - field->array_size) * + sizeof(struct tuple_field *)); + } + field->array = array; + field->array_size = part->num; + } + /* Record already exists. No actions required */ + if (field->array[part->num - TUPLE_INDEX_BASE] != NULL) { + *field_subtree = + field->array[part->num - TUPLE_INDEX_BASE]; + return 0; + } + break; + } + case JSON_PATH_STR: { + type = FIELD_TYPE_MAP; + if (field->type != FIELD_TYPE_ANY && field->type != type) + goto error_type_mistmatch; + field->type = type; + if (field->map == NULL && + json_path_hash_create(&field->map, 1) != 0) + return -1; + struct mh_strnptr_node_t *node = + json_path_hash_get(field->map, part->str, part->len, + mh_strn_hash(part->str, part->len)); + if (node != NULL) { + *field_subtree = node->val; + return 0; + } + break; + } + default: + unreachable(); + } + + /* Construct and insert a new record. */ + struct tuple_field *new_field = malloc(sizeof(struct tuple_field)); + if (new_field == NULL) { + diag_set(OutOfMemory, sizeof(struct tuple_field), "malloc", + "new_field"); + return -1; + } + *new_field = tuple_field_default; + if (field->type == FIELD_TYPE_MAP) { + if (json_path_hash_insert(field->map, part->str, part->len, + new_field) != 0) { + free(new_field); + return -1; + } + } else if (field->type == FIELD_TYPE_ARRAY) { + field->array[part->num - TUPLE_INDEX_BASE] = new_field; + } + *field_subtree = new_field; + + return 0; + +error_type_mistmatch: + diag_set(ClientError, ER_INDEX_PART_TYPE_MISMATCH, + tt_sprintf("%d", fieldno + TUPLE_INDEX_BASE), + field_type_strs[type], field_type_strs[field->type]); + return -1; +} + +/** + * Delete @field_subtree object. + * @param field_subtree to delete. + */ +static void +json_field_tree_delete(struct tuple_field *field_subtree) +{ + if (field_subtree->type == FIELD_TYPE_MAP && + field_subtree->map != NULL) { + mh_int_t i; + mh_foreach(field_subtree->map, i) { + struct tuple_field *field = + mh_strnptr_node(field_subtree->map, i)->val; + if (field == NULL) + continue; + json_field_tree_delete(field); + free(field); + } + json_path_hash_delete(field_subtree->map); + } else if (field_subtree->type == FIELD_TYPE_ARRAY && + field_subtree->array != NULL) { + for (uint32_t i = 0; i < field_subtree->array_size; i++) { + struct tuple_field *field = field_subtree->array[i]; + if (field == NULL) + continue; + json_field_tree_delete(field_subtree->array[i]); + free(field_subtree->array[i]); + } + free(field_subtree->array); + } +} + +int +json_field_tree_exec_routine(const struct tuple_field *field, uint32_t idx, + const char *tuple, const char *offset, + json_field_tree_routine routine, void *routine_ctx) +{ + int rc = 0; + if (field->type == FIELD_TYPE_MAP) { + mh_int_t i; + mh_foreach(field->map, i) { + struct mh_strnptr_node_t *node = + mh_strnptr_node(field->map, i); + const char *raw = offset; + if (tuple_field_go_to_key(&raw, node->str, + node->len) != 0) { + diag_set(ClientError, + ER_DATA_MISMATCH_INDEX_PART); + return -1; + } + if (json_field_tree_exec_routine(node->val, idx, + tuple, raw, routine, + routine_ctx) != 0) + return -1; + } + } else if (field->type == FIELD_TYPE_ARRAY) { + assert(mp_typeof(*offset) == MP_ARRAY); + uint32_t count = mp_decode_array(&offset); + if (count < field->array_size) { + diag_set(ClientError, ER_DATA_MISMATCH_INDEX_PART); + return -1; + } + for (uint32_t i = 0; i < field->array_size; + i++, mp_next(&offset)) { + if (field->array[i] == NULL) + continue; + if (json_field_tree_exec_routine(field->array[i], idx, + tuple, offset, routine, + routine_ctx) != 0) + return -1; + } + } else { + rc = routine(field, idx, tuple, offset, routine_ctx); + } + return rc; +} + +/** + * Add new JSON @path to @format. + * @param format to modify. + * @param path string to add. + * @param path_len length of @path. + * @param field_type type of field by @path. + * @param[out] leaf_field pointer to leaf field. + * @retval -1 on error. + * @retval 0 on success. + */ +static int +tuple_format_add_json_path(struct tuple_format *format, const char *path, + uint32_t path_len, enum field_type type, + struct tuple_field **leaf_field) +{ + assert(format->path_hash != NULL); + /* + * Get root field by index. + * Path is specified in canonical form: [i]... + */ + int rc = 0; + struct json_path_parser parser; + struct json_path_node node; + json_path_parser_create(&parser, path, path_len); + rc = json_path_next(&parser, &node); + assert(rc == 0 && node.type == JSON_PATH_NUM); + assert(node.num < format->field_count + 1); + + /* Test if path is already registered. */ + struct mh_strnptr_node_t *leaf_node = NULL; + uint32_t hash = mh_strn_hash(path, path_len); + if ((leaf_node = json_path_hash_get(format->path_hash, path, + path_len, hash)) != NULL) { + struct tuple_field *field = leaf_node->val; + if (field->type != type) { + const char *err = + tt_sprintf("JSON path '%.*s' has been already " + "constructed for '%s' leaf record", + path_len, path, + field_type_strs[field->type]); + diag_set(ClientError, ER_WRONG_INDEX_OPTIONS, + node.num, err); + return -1; + } + *leaf_field = field; + return 0; + } + + /* Build data path tree. */ + uint32_t root_fieldno = node.num - TUPLE_INDEX_BASE; + struct tuple_field *field = &format->fields[root_fieldno]; + while ((rc = json_path_next(&parser, &node)) == 0 && + node.type != JSON_PATH_END) { + if (json_field_tree_append(&field, root_fieldno, &node) != 0) + return -1; + } + assert(rc == 0 && node.type == JSON_PATH_END); + + /* Leaf record is a new object as JSON path unique. */ + field->type = type; + if (json_path_hash_insert(format->path_hash, path, path_len, + field) != 0) + return -1; + + *leaf_field = field; + return 0; +} + /** * Extract all available type info from keys and field * definitions. @@ -63,12 +391,17 @@ tuple_format_create(struct tuple_format *format, struct key_def * const *keys, format->fields[i].type = fields[i].type; format->fields[i].offset_slot = TUPLE_OFFSET_SLOT_NIL; format->fields[i].is_nullable = fields[i].is_nullable; + /* Don't need to init format->fields[i].map. */ + format->fields[i].array = NULL; + format->fields[i].array_size = 0; } /* Initialize remaining fields */ for (uint32_t i = field_count; i < format->field_count; i++) format->fields[i] = tuple_field_default; int current_slot = 0; + char *data = (char *)format + sizeof(struct tuple_format) + + format->field_count * sizeof(struct tuple_field); /* extract field type info */ for (uint16_t key_no = 0; key_no < key_count; ++key_no) { @@ -101,10 +434,12 @@ tuple_format_create(struct tuple_format *format, struct key_def * const *keys, * used in tuple_format. */ if (field_type1_contains_type2(field->type, - part->type)) { + part->type) && + part->path == NULL) { field->type = part->type; } else if (! field_type1_contains_type2(part->type, - field->type)) { + field->type) && + part->path == NULL) { const char *name; int fieldno = part->fieldno + TUPLE_INDEX_BASE; if (part->fieldno >= field_count) { @@ -131,9 +466,22 @@ tuple_format_create(struct tuple_format *format, struct key_def * const *keys, * First field is always simply accessible, * so we don't store an offset for it. */ - if (field->offset_slot == TUPLE_OFFSET_SLOT_NIL && + if (part->path != NULL) { + assert(is_sequential == false); + memcpy(data, part->path, part->path_len); + data[part->path_len] = '\0'; + struct tuple_field *leaf = NULL; + if (tuple_format_add_json_path(format, data, + part->path_len, + part->type, + &leaf) != 0) + return -1; + assert(leaf != NULL); + if (leaf->offset_slot == TUPLE_OFFSET_SLOT_NIL) + leaf->offset_slot = --current_slot; + data += part->path_len + 1; + } else if (field->offset_slot == TUPLE_OFFSET_SLOT_NIL && is_sequential == false && part->fieldno > 0) { - field->offset_slot = --current_slot; } } @@ -201,20 +549,26 @@ static struct tuple_format * tuple_format_alloc(struct key_def * const *keys, uint16_t key_count, uint32_t space_field_count, struct tuple_dictionary *dict) { + size_t extra_size = 0; uint32_t index_field_count = 0; + uint32_t json_path_count = 0; /* find max max field no */ for (uint16_t key_no = 0; key_no < key_count; ++key_no) { const struct key_def *key_def = keys[key_no]; const struct key_part *part = key_def->parts; const struct key_part *pend = part + key_def->part_count; for (; part < pend; part++) { + if (part->path != NULL) { + json_path_count++; + extra_size += part->path_len + 1; + } index_field_count = MAX(index_field_count, part->fieldno + 1); } } uint32_t field_count = MAX(space_field_count, index_field_count); uint32_t total = sizeof(struct tuple_format) + - field_count * sizeof(struct tuple_field); + field_count * sizeof(struct tuple_field) + extra_size; struct tuple_format *format = (struct tuple_format *) malloc(total); if (format == NULL) { @@ -244,6 +598,11 @@ tuple_format_alloc(struct key_def * const *keys, uint16_t key_count, format->index_field_count = index_field_count; format->exact_field_count = 0; format->min_field_count = 0; + if (json_path_hash_create(&format->path_hash, json_path_count) != 0) { + tuple_dictionary_unref(format->dict); + free(format); + return NULL; + } return format; } @@ -251,6 +610,9 @@ tuple_format_alloc(struct key_def * const *keys, uint16_t key_count, static inline void tuple_format_destroy(struct tuple_format *format) { + for (uint32_t i = 0; i < format->field_count; i++) + json_field_tree_delete(&format->fields[i]); + json_path_hash_delete(format->path_hash); tuple_dictionary_unref(format->dict); } @@ -335,21 +697,75 @@ tuple_format_dup(struct tuple_format *src) { uint32_t total = sizeof(struct tuple_format) + src->field_count * sizeof(struct tuple_field); + if (src->path_hash != NULL) { + mh_int_t i; + mh_foreach(src->path_hash, i) + total += mh_strnptr_node(src->path_hash, i)->len + 1; + } struct tuple_format *format = (struct tuple_format *) malloc(total); if (format == NULL) { diag_set(OutOfMemory, total, "malloc", "tuple format"); return NULL; } memcpy(format, src, total); + + /* Fill with NULLs for normal destruction on error. */ + format->path_hash = NULL; + for (uint32_t i = 0; i < format->field_count; i++) { + format->fields[i].array = NULL; + format->fields[i].array_size = 0; + } + if (src->path_hash != NULL) { + mh_int_t i; + if (json_path_hash_create(&format->path_hash, + mh_size(src->path_hash)) != 0) + goto error; + mh_foreach(src->path_hash, i) { + struct mh_strnptr_node_t *node = + mh_strnptr_node(src->path_hash, i); + /* Path data has been already copied. */ + char *path = (char *)format + (node->str - (char *)src); + /* Store source leaf field offset_slot. */ + struct tuple_field *leaf_field = node->val; + int32_t offset_slot = leaf_field->offset_slot; + if (tuple_format_add_json_path(format, path, node->len, + leaf_field->type, + &leaf_field) != 0) + goto error; + /* Store offset_slot in a new leaf record. */ + assert(leaf_field != NULL); + leaf_field->offset_slot = offset_slot; + } + } tuple_dictionary_ref(format->dict); format->id = FORMAT_ID_NIL; format->refs = 0; - if (tuple_format_register(format) != 0) { - tuple_format_destroy(format); - free(format); - return NULL; - } + if (tuple_format_register(format) != 0) + goto error; return format; +error: + tuple_format_destroy(format); + free(format); + return NULL; +} + +/** + * Watch json_field_tree_routine description + * @param ctx is field_map + */ +static int +tuple_init_json_field_map_routine(const struct tuple_field *field, uint32_t idx, + const char *tuple, const char *offset, + void *ctx) +{ + uint32_t *field_map = ctx; + assert(field->offset_slot != TUPLE_OFFSET_SLOT_NIL); + if (key_mp_type_validate(field->type, mp_typeof(*offset), + ER_KEY_PART_TYPE, idx, + field->is_nullable) != 0) + return -1; + field_map[field->offset_slot] = (uint32_t)(offset - tuple); + return 0; } /** @sa declaration for details. */ @@ -378,18 +794,14 @@ tuple_init_field_map(const struct tuple_format *format, uint32_t *field_map, return -1; } - /* first field is simply accessible, so we do not store offset to it */ - enum mp_type mp_type = mp_typeof(*pos); + /* + * First field is simply accessible, store offset to it + * only for JSON path. + */ + uint32_t i = 0; + enum mp_type mp_type; const struct tuple_field *field = &format->fields[0]; - if (key_mp_type_validate(field->type, mp_type, ER_FIELD_TYPE, - TUPLE_INDEX_BASE, field->is_nullable)) - return -1; - mp_next(&pos); - /* other fields...*/ - ++field; - uint32_t i = 1; - uint32_t defined_field_count = MIN(field_count, format->field_count); - if (field_count < format->index_field_count) { + if (field_count < format->index_field_count || field->map != NULL) { /* * Nullify field map to be able to detect by 0, * which key fields are absent in tuple_field(). @@ -397,6 +809,16 @@ tuple_init_field_map(const struct tuple_format *format, uint32_t *field_map, memset((char *)field_map - format->field_map_size, 0, format->field_map_size); } + if (field->map == NULL) { + mp_type = mp_typeof(*pos); + if (key_mp_type_validate(field->type, mp_type, ER_FIELD_TYPE, + TUPLE_INDEX_BASE, field->is_nullable)) + return -1; + mp_next(&pos); + ++field; + ++i; + } + uint32_t defined_field_count = MIN(field_count, format->field_count); for (; i < defined_field_count; ++i, ++field) { mp_type = mp_typeof(*pos); if (key_mp_type_validate(field->type, mp_type, ER_FIELD_TYPE, @@ -407,6 +829,14 @@ tuple_init_field_map(const struct tuple_format *format, uint32_t *field_map, field_map[field->offset_slot] = (uint32_t) (pos - tuple); } + if (field->map != NULL) { + assert(field->array != NULL); + json_field_tree_routine func = + tuple_init_json_field_map_routine; + if (json_field_tree_exec_routine(field, i, tuple, pos, + func, field_map) != 0) + return -1; + } mp_next(&pos); } return 0; @@ -467,15 +897,60 @@ box_tuple_format_unref(box_tuple_format_t *format) tuple_format_unref(format); } -/** - * Propagate @a field to MessagePack(field)[index]. - * @param[in][out] field Field to propagate. - * @param index 1-based index to propagate to. - * - * @retval 0 Success, the index was found. - * @retval -1 Not found. - */ -static inline int +const char * +tuple_field_by_part(const struct tuple_format *format, const char *data, + const uint32_t *field_map, struct key_part *part) +{ + const char *raw = NULL; + uint32_t field_no = part->fieldno; + struct mh_strnptr_node_t *node; + if (unlikely(part->path == NULL)) { + raw = tuple_field_raw(format, data, field_map, field_no); + } else { + int32_t offset_slot = TUPLE_OFFSET_SLOT_NIL; + if (part->format_epoch == format->epoch && + -part->slot_cache * sizeof(uint32_t) <= + format->field_map_size) { + offset_slot = part->slot_cache; + } else if (format->path_hash != NULL && + (node = json_path_hash_get(format->path_hash, + part->path, + part->path_len, + part->path_hash)) != + NULL) { + assert(node != NULL); + struct tuple_field *field = node->val; + assert(field != NULL); + offset_slot = field->offset_slot; + } + if (unlikely(offset_slot == TUPLE_OFFSET_SLOT_NIL || + field_map[offset_slot] == 0)) { + /* + * Legacy tuple having no field map + * for JSON index. + */ + uint32_t path_hash = + field_name_hash(part->path, part->path_len); + if (tuple_field_raw_by_path(format, data, field_map, + part->path, part->path_len, + path_hash, &raw) != 0) + raw = NULL; + } else { + assert(offset_slot < 0); + assert(-offset_slot * sizeof(uint32_t) <= + format->field_map_size); + /* Cache offset_slot if required. */ + if (part->format_epoch < format->epoch) { + part->slot_cache = offset_slot; + part->format_epoch = format->epoch; + } + raw = data + field_map[offset_slot]; + } + } + return raw; +} + +int tuple_field_go_to_index(const char **field, uint64_t index) { enum mp_type type = mp_typeof(**field); @@ -513,16 +988,7 @@ tuple_field_go_to_index(const char **field, uint64_t index) return -1; } -/** - * Propagate @a field to MessagePack(field)[key]. - * @param[in][out] field Field to propagate. - * @param key Key to propagate to. - * @param len Length of @a key. - * - * @retval 0 Success, the index was found. - * @retval -1 Not found. - */ -static inline int +int tuple_field_go_to_key(const char **field, const char *key, int len) { enum mp_type type = mp_typeof(**field); @@ -547,21 +1013,32 @@ tuple_field_go_to_key(const char **field, const char *key, int len) return -1; } -const char * -tuple_field_by_part(const struct tuple_format *format, const char *data, - const uint32_t *field_map, struct key_part *part) -{ - return tuple_field_raw(format, data, field_map, part->fieldno); -} - int -tuple_field_raw_by_path(struct tuple_format *format, const char *tuple, +tuple_field_raw_by_path(const struct tuple_format *format, const char *tuple, const uint32_t *field_map, const char *path, uint32_t path_len, uint32_t path_hash, const char **field) { assert(path_len > 0); uint32_t fieldno; + if (format->path_hash != NULL) { + /* + * The path hash for format->path_hash hashtable + * may may be different from path_hash specified + * as function argument. + */ + struct mh_strnptr_node_t *ht_record = + json_path_hash_get(format->path_hash, path, path_len, + mh_strn_hash(path, path_len)); + if (ht_record != NULL) { + struct tuple_field *tuple_field = ht_record->val; + int32_t offset_slot = tuple_field->offset_slot; + assert(offset_slot != TUPLE_OFFSET_SLOT_NIL); + assert(field_map[offset_slot] != 0); + *field = tuple + field_map[offset_slot]; + return 0; + } + } /* * It is possible, that a field has a name as * well-formatted JSON. For example 'a.b.c.d' or '[1]' can diff --git a/src/box/tuple_format.h b/src/box/tuple_format.h index a989917..1348f0d 100644 --- a/src/box/tuple_format.h +++ b/src/box/tuple_format.h @@ -108,6 +108,18 @@ struct tuple_field { bool is_key_part; /** True, if a field can store NULL. */ bool is_nullable; + /** Tree child records. */ + union { + /** Array of fields. */ + struct { + struct tuple_field **array; + uint32_t array_size; + }; + /** Hashtable: path -> tuple_field. */ + struct mh_strnptr_t *map; + /** Leaf argument for tree-walker routine. */ + void *arg; + }; }; /** @@ -166,6 +178,8 @@ struct tuple_format { * Shared names storage used by all formats of a space. */ struct tuple_dictionary *dict; + /** JSON path hash table. */ + struct mh_strnptr_t *path_hash; /* Formats of the fields */ struct tuple_field fields[0]; }; @@ -395,7 +409,7 @@ tuple_field_raw(const struct tuple_format *format, const char *tuple, * @retval NULL No field with @a name. */ static inline const char * -tuple_field_raw_by_name(struct tuple_format *format, const char *tuple, +tuple_field_raw_by_name(const struct tuple_format *format, const char *tuple, const uint32_t *field_map, const char *name, uint32_t name_len, uint32_t name_hash) { @@ -420,11 +434,77 @@ tuple_field_raw_by_name(struct tuple_format *format, const char *tuple, * @retval -1 Error in JSON path. */ int -tuple_field_raw_by_path(struct tuple_format *format, const char *tuple, +tuple_field_raw_by_path(const struct tuple_format *format, const char *tuple, const uint32_t *field_map, const char *path, uint32_t path_len, uint32_t path_hash, const char **field); +/** + * Get @hashtable record by key @path, @path_len. + * @param hashtable to lookup, + * @param path string. + * @param path_len length of @path. + * @retval NULL on nothing found. + * @retval hashtable record pointer. + */ +struct mh_strnptr_node_t * +json_path_hash_get(struct mh_strnptr_t *hashtable, const char *path, + uint32_t path_len, uint32_t path_hash); + +/** + * Routine to execute with json_field_tree_exec_routine on JSON + * path field tree records. + * @param field to use on initialization. + * @param idx root field index to emmit correct error. + * @param tuple source raw data. + * @param offset calculated offset of field that path refers to. + * @param ctx callback argument + * @retval 0 on success. + * @retval -1 on error. + */ +typedef int (*json_field_tree_routine)(const struct tuple_field *field, + uint32_t idx, const char *tuple, + const char *offset, void *ctx); + +/** + * Execute a @routine on @field leaf records of JSON path + * represented as a tree with specified @tuple. + * @param field to use on initialization. + * @param idx root field index to emmit correct error. + * @param tuple source raw data. + * @param offset calculated offset of field that path refers to. + * @param ctx callback argument + * @retval 0 on success. + * @retval -1 on error. + */ +int +json_field_tree_exec_routine(const struct tuple_field *field, uint32_t idx, + const char *tuple, const char *offset, + json_field_tree_routine routine, void *routine_ctx); + +/** + * Propagate @a field to MessagePack(field)[index]. + * @param[in][out] field Field to propagate. + * @param index 1-based index to propagate to. + * + * @retval 0 Success, the index was found. + * @retval -1 Not found. + */ +int +tuple_field_go_to_index(const char **field, uint64_t index); + +/** + * Propagate @a field to MessagePack(field)[key]. + * @param[in][out] field Field to propagate. + * @param key Key to propagate to. + * @param len Length of @a key. + * + * @retval 0 Success, the index was found. + * @retval -1 Not found. + */ +int +tuple_field_go_to_key(const char **field, const char *key, int len); + #if defined(__cplusplus) } /* extern "C" */ #endif /* defined(__cplusplus) */ diff --git a/src/box/tuple_hash.cc b/src/box/tuple_hash.cc index ae0bb8e..5f3357a 100644 --- a/src/box/tuple_hash.cc +++ b/src/box/tuple_hash.cc @@ -228,7 +228,7 @@ key_hash_slowpath(const char *key, const struct key_def *key_def); void tuple_hash_func_set(struct key_def *key_def) { - if (key_def->is_nullable) + if (key_def->is_nullable || key_def->has_json_paths) goto slowpath; /* * Check that key_def defines sequential a key without holes @@ -262,10 +262,17 @@ tuple_hash_func_set(struct key_def *key_def) { } slowpath: - if (key_def->has_optional_parts) - key_def->tuple_hash = tuple_hash_slowpath; - else - key_def->tuple_hash = tuple_hash_slowpath; + if (key_def->has_optional_parts) { + if (key_def->has_json_paths) + key_def->tuple_hash = tuple_hash_slowpath; + else + key_def->tuple_hash = tuple_hash_slowpath; + } else { + if (key_def->has_json_paths) + key_def->tuple_hash = tuple_hash_slowpath; + else + key_def->tuple_hash = tuple_hash_slowpath; + } key_def->key_hash = key_hash_slowpath; } diff --git a/src/box/vy_lsm.c b/src/box/vy_lsm.c index cb3c436..1bb5e22 100644 --- a/src/box/vy_lsm.c +++ b/src/box/vy_lsm.c @@ -36,6 +36,7 @@ #include #include #include +#include #include "diag.h" #include "errcode.h" @@ -158,6 +159,48 @@ vy_lsm_new(struct vy_lsm_env *lsm_env, struct vy_cache_env *cache_env, NULL); if (lsm->disk_format == NULL) goto fail_format; + /* + * Tuple formats should be compatible to make + * epoch-based caching work. + */ + int32_t min_offset_slot = 0; + struct tuple_field *dst_fields = lsm->disk_format->fields; + struct mh_strnptr_t *dst_ht = lsm->disk_format->path_hash; + struct mh_strnptr_t *src_ht = format->path_hash; + struct key_part *part = cmp_def->parts; + struct key_part *part_end = part + cmp_def->part_count; + for (; part < part_end; part++) { + struct tuple_field *dst_field = + &dst_fields[part->fieldno]; + struct tuple_field *src_field; + if (dst_field->offset_slot != TUPLE_OFFSET_SLOT_NIL) { + src_field = &format->fields[part->fieldno]; + } else if (dst_fields[part->fieldno].map != NULL) { + struct mh_strnptr_node_t *node; + node = json_path_hash_get(dst_ht, part->path, + part->path_len, + part->path_hash); + assert(node != NULL); + dst_field = node->val; + assert(dst_field != NULL); + + node = json_path_hash_get(src_ht, part->path, + part->path_len, + part->path_hash); + assert(node != NULL); + src_field = node->val; + assert(dst_field != NULL); + } else { + continue; + } + if (src_field->offset_slot == TUPLE_OFFSET_SLOT_NIL) + continue; + dst_field->offset_slot = src_field->offset_slot; + min_offset_slot = + MIN(src_field->offset_slot, min_offset_slot); + } + lsm->disk_format->field_map_size = + -min_offset_slot * sizeof(uint32_t); } tuple_format_ref(lsm->disk_format); diff --git a/src/box/vy_point_lookup.c b/src/box/vy_point_lookup.c index 5e43340..5e5ed16 100644 --- a/src/box/vy_point_lookup.c +++ b/src/box/vy_point_lookup.c @@ -196,8 +196,6 @@ vy_point_lookup(struct vy_lsm *lsm, struct vy_tx *tx, const struct vy_read_view **rv, struct tuple *key, struct tuple **ret) { - assert(tuple_field_count(key) >= lsm->cmp_def->part_count); - *ret = NULL; double start_time = ev_monotonic_now(loop()); int rc = 0; diff --git a/src/box/vy_stmt.c b/src/box/vy_stmt.c index a4b7975..80f08bb 100644 --- a/src/box/vy_stmt.c +++ b/src/box/vy_stmt.c @@ -44,6 +44,7 @@ #include "tuple_format.h" #include "xrow.h" #include "fiber.h" +#include "assoc.h" static struct tuple * vy_tuple_new(struct tuple_format *format, const char *data, const char *end) @@ -321,6 +322,67 @@ vy_stmt_replace_from_upsert(const struct tuple *upsert) return replace; } +static void +vy_stmt_msgpack_build(struct tuple_field *field, char *tuple, + uint32_t *field_map, char **offset, bool write_data) +{ + if (field->type == FIELD_TYPE_ARRAY) { + if (write_data) + *offset = mp_encode_array(*offset, field->array_size); + else + *offset += mp_sizeof_array(field->array_size); + for (uint32_t i = 0; i < field->array_size; i++) { + if (field->array[i] == NULL) { + if (write_data) + *offset = mp_encode_nil(*offset); + else + *offset += mp_sizeof_nil(); + continue; + } + vy_stmt_msgpack_build(field->array[i], tuple, field_map, + offset, write_data); + } + return; + } else if (field->type == FIELD_TYPE_MAP) { + if (write_data) + *offset = mp_encode_map(*offset, mh_size(field->map)); + else + *offset += mp_sizeof_map(mh_size(field->map)); + mh_int_t i; + mh_foreach(field->map, i) { + struct mh_strnptr_node_t *node = + mh_strnptr_node(field->map, i); + assert(node); + if (write_data) { + *offset = mp_encode_str(*offset, node->str, + node->len); + } else { + *offset += mp_sizeof_str(node->len); + } + vy_stmt_msgpack_build(node->val, tuple, field_map, + offset, write_data); + } + return;; + } + + struct iovec *iov = field->arg; + if (iov == NULL) { + if (write_data) + *offset = mp_encode_nil(*offset); + else + *offset += mp_sizeof_nil(); + } else { + if (write_data) { + uint32_t data_offset = *offset - tuple; + memcpy(*offset, iov->iov_base, iov->iov_len); + field->arg = NULL; + if (field->offset_slot != TUPLE_OFFSET_SLOT_NIL) + field_map[field->offset_slot] = data_offset; + } + *offset += iov->iov_len; + } +} + static struct tuple * vy_stmt_new_surrogate_from_key(const char *key, enum iproto_type type, const struct key_def *cmp_def, @@ -331,27 +393,45 @@ vy_stmt_new_surrogate_from_key(const char *key, enum iproto_type type, struct region *region = &fiber()->gc; uint32_t field_count = format->index_field_count; - struct iovec *iov = region_alloc(region, sizeof(*iov) * field_count); + uint32_t part_count = mp_decode_array(&key); + assert(part_count == cmp_def->part_count); + struct iovec *iov = region_alloc(region, sizeof(*iov) * part_count); if (iov == NULL) { - diag_set(OutOfMemory, sizeof(*iov) * field_count, - "region", "iov for surrogate key"); + diag_set(OutOfMemory, sizeof(*iov) * part_count, "region", + "iov for surrogate key"); return NULL; } - memset(iov, 0, sizeof(*iov) * field_count); - uint32_t part_count = mp_decode_array(&key); - assert(part_count == cmp_def->part_count); - assert(part_count <= field_count); - uint32_t nulls_count = field_count - cmp_def->part_count; - uint32_t bsize = mp_sizeof_array(field_count) + - mp_sizeof_nil() * nulls_count; - for (uint32_t i = 0; i < part_count; ++i) { - const struct key_part *part = &cmp_def->parts[i]; + uint32_t bsize = mp_sizeof_array(field_count); + uint32_t nulls_count = field_count; + memset(iov, 0, sizeof(*iov) * part_count); + const struct key_part *part = cmp_def->parts; + for (uint32_t i = 0; i < part_count; ++i, ++part) { assert(part->fieldno < field_count); const char *svp = key; - iov[part->fieldno].iov_base = (char *) key; + iov[i].iov_base = (char *) key; mp_next(&key); - iov[part->fieldno].iov_len = key - svp; - bsize += key - svp; + iov[i].iov_len = key - svp; + struct tuple_field *field; + if (part->path == NULL) { + field = &format->fields[part->fieldno]; + --nulls_count; + } else { + struct mh_strnptr_node_t *node = + json_path_hash_get(format->path_hash, + part->path, part->path_len, + part->path_hash); + assert(node != NULL); + field = node->val; + assert(field != NULL); + } + field->arg = &iov[i]; + } + bsize += nulls_count * mp_sizeof_nil(); + for (uint32_t i = 0; i < field_count; ++i) { + char *data = NULL; + vy_stmt_msgpack_build(&format->fields[i], NULL, NULL, &data, + false); + bsize += data - (char *)NULL; } struct tuple *stmt = vy_stmt_alloc(format, bsize); @@ -362,17 +442,11 @@ vy_stmt_new_surrogate_from_key(const char *key, enum iproto_type type, uint32_t *field_map = (uint32_t *) raw; char *wpos = mp_encode_array(raw, field_count); for (uint32_t i = 0; i < field_count; ++i) { - const struct tuple_field *field = &format->fields[i]; - if (field->offset_slot != TUPLE_OFFSET_SLOT_NIL) - field_map[field->offset_slot] = wpos - raw; - if (iov[i].iov_base == NULL) { - wpos = mp_encode_nil(wpos); - } else { - memcpy(wpos, iov[i].iov_base, iov[i].iov_len); - wpos += iov[i].iov_len; - } + vy_stmt_msgpack_build(&format->fields[i], raw, field_map, &wpos, + true); } - assert(wpos == raw + bsize); + + assert(wpos <= raw + bsize); vy_stmt_set_type(stmt, type); return stmt; } diff --git a/test/box/misc.result b/test/box/misc.result index 4895a78..556f004 100644 --- a/test/box/misc.result +++ b/test/box/misc.result @@ -348,7 +348,7 @@ t; - 'box.error.CANT_CREATE_COLLATION : 150' - 'box.error.USER_EXISTS : 46' - 'box.error.WAL_IO : 40' - - 'box.error.PROC_RET : 21' + - 'box.error.RTREE_RECT : 101' - 'box.error.PRIV_GRANTED : 89' - 'box.error.CREATE_SPACE : 9' - 'box.error.GRANT : 88' @@ -359,7 +359,7 @@ t; - 'box.error.VINYL_MAX_TUPLE_SIZE : 139' - 'box.error.LOAD_FUNCTION : 99' - 'box.error.INVALID_XLOG : 74' - - 'box.error.READ_VIEW_ABORTED : 130' + - 'box.error.PRIV_NOT_GRANTED : 91' - 'box.error.TRANSACTION_CONFLICT : 97' - 'box.error.GUEST_USER_PASSWORD : 96' - 'box.error.PROC_C : 102' @@ -370,7 +370,7 @@ t; - 'box.error.CFG : 59' - 'box.error.NO_SUCH_FIELD : 37' - 'box.error.CONNECTION_TO_SELF : 117' - - 'box.error.FUNCTION_MAX : 54' + - 'box.error.PROC_LUA : 32' - 'box.error.ILLEGAL_PARAMS : 1' - 'box.error.PARTIAL_KEY : 136' - 'box.error.SAVEPOINT_NO_TRANSACTION : 114' @@ -397,36 +397,37 @@ t; - 'box.error.FUNCTION_EXISTS : 52' - 'box.error.UPDATE_ARG_TYPE : 26' - 'box.error.CROSS_ENGINE_TRANSACTION : 81' - - 'box.error.FORMAT_MISMATCH_INDEX_PART : 27' - 'box.error.injection : table:
+ - 'box.error.FORMAT_MISMATCH_INDEX_PART : 27' + - 'box.error.IDENTIFIER : 70' - 'box.error.FUNCTION_TX_ACTIVE : 30' - - 'box.error.ITERATOR_TYPE : 72' - 'box.error.TRANSACTION_YIELD : 154' + - 'box.error.NULLABLE_MISMATCH : 153' - 'box.error.NO_SUCH_ENGINE : 57' - 'box.error.COMMIT_IN_SUB_STMT : 122' - - 'box.error.NULLABLE_MISMATCH : 153' - - 'box.error.UNSUPPORTED : 5' - - 'box.error.LAST_DROP : 15' + - 'box.error.RELOAD_CFG : 58' - 'box.error.SPACE_FIELD_IS_DUPLICATE : 149' + - 'box.error.LAST_DROP : 15' + - 'box.error.SEQUENCE_OVERFLOW : 147' - 'box.error.DECOMPRESSION : 124' - 'box.error.CREATE_SEQUENCE : 142' - 'box.error.CREATE_USER : 43' - - 'box.error.SEQUENCE_OVERFLOW : 147' + - 'box.error.DATA_MISMATCH_INDEX_PART : 55' - 'box.error.INSTANCE_UUID_MISMATCH : 66' - - 'box.error.RELOAD_CFG : 58' + - 'box.error.TUPLE_FORMAT_LIMIT : 16' - 'box.error.SYSTEM : 115' - 'box.error.KEY_PART_IS_TOO_LONG : 118' - - 'box.error.MORE_THAN_ONE_TUPLE : 41' + - 'box.error.INJECTION : 8' - 'box.error.TRUNCATE_SYSTEM_SPACE : 137' - 'box.error.NO_SUCH_SAVEPOINT : 61' - 'box.error.VY_QUOTA_TIMEOUT : 135' - - 'box.error.PRIV_NOT_GRANTED : 91' + - 'box.error.READ_VIEW_ABORTED : 130' - 'box.error.WRONG_INDEX_OPTIONS : 108' - 'box.error.INVALID_VYLOG_FILE : 133' - 'box.error.INDEX_FIELD_COUNT_LIMIT : 127' - - 'box.error.BEFORE_REPLACE_RET : 53' + - 'box.error.PROTOCOL : 104' - 'box.error.USER_MAX : 56' - - 'box.error.INVALID_MSGPACK : 20' + - 'box.error.BEFORE_REPLACE_RET : 53' - 'box.error.TUPLE_NOT_ARRAY : 22' - 'box.error.KEY_PART_COUNT : 31' - 'box.error.ALTER_SPACE : 12' @@ -435,7 +436,7 @@ t; - 'box.error.DROP_SEQUENCE : 144' - 'box.error.INVALID_XLOG_ORDER : 76' - 'box.error.UNKNOWN_REQUEST_TYPE : 48' - - 'box.error.PROC_LUA : 32' + - 'box.error.PROC_RET : 21' - 'box.error.SUB_STMT_MAX : 121' - 'box.error.ROLE_NOT_GRANTED : 92' - 'box.error.SPACE_EXISTS : 10' @@ -446,36 +447,36 @@ t; - 'box.error.REPLICASET_UUID_MISMATCH : 63' - 'box.error.UPDATE_FIELD : 29' - 'box.error.INDEX_EXISTS : 85' - - 'box.error.SPLICE : 25' + - 'box.error.DROP_SPACE : 11' - 'box.error.COMPRESSION : 119' - 'box.error.INVALID_ORDER : 68' - - 'box.error.UNKNOWN : 0' + - 'box.error.SPLICE : 25' - 'box.error.NO_SUCH_GROUP : 155' - - 'box.error.TUPLE_FORMAT_LIMIT : 16' + - 'box.error.INVALID_MSGPACK : 20' - 'box.error.DROP_PRIMARY_KEY : 17' - 'box.error.NULLABLE_PRIMARY : 152' - 'box.error.NO_SUCH_SEQUENCE : 145' - - 'box.error.INJECTION : 8' + - 'box.error.FUNCTION_MAX : 54' - 'box.error.INVALID_UUID : 64' - - 'box.error.IDENTIFIER : 70' + - 'box.error.UNSUPPORTED : 5' - 'box.error.TIMEOUT : 78' + - 'box.error.ITERATOR_TYPE : 72' - 'box.error.REPLICA_MAX : 73' - 'box.error.NO_SUCH_ROLE : 82' - - 'box.error.DROP_SPACE : 11' - 'box.error.MISSING_REQUEST_FIELD : 69' - 'box.error.MISSING_SNAPSHOT : 93' - 'box.error.WRONG_SPACE_OPTIONS : 111' - 'box.error.READONLY : 7' - - 'box.error.RTREE_RECT : 101' + - 'box.error.UNKNOWN : 0' - 'box.error.UPSERT_UNIQUE_SECONDARY_KEY : 105' - 'box.error.NO_CONNECTION : 77' - 'box.error.UNSUPPORTED_PRIV : 98' - 'box.error.WRONG_SCHEMA_VERSION : 109' - 'box.error.ROLLBACK_IN_SUB_STMT : 123' - - 'box.error.PROTOCOL : 104' - - 'box.error.INVALID_XLOG_TYPE : 125' - - 'box.error.INDEX_PART_TYPE_MISMATCH : 24' + - 'box.error.MORE_THAN_ONE_TUPLE : 41' - 'box.error.UNSUPPORTED_INDEX_FEATURE : 112' + - 'box.error.INDEX_PART_TYPE_MISMATCH : 24' + - 'box.error.INVALID_XLOG_TYPE : 125' ... test_run:cmd("setopt delimiter ''"); --- diff --git a/test/engine/tuple.result b/test/engine/tuple.result index 7fb0916..e9efb16 100644 --- a/test/engine/tuple.result +++ b/test/engine/tuple.result @@ -891,6 +891,277 @@ t["{"] s:drop() --- ... +-- +-- gh-1012: Indexes for JSON-defined paths. +-- +s = box.schema.space.create('withdata', {engine = engine}) +--- +... +s:create_index('test1', {parts = {{2, 'number'}, {3, 'str', path = '[3].FIO["fname"]'}, {3, 'str', path = '[3]["FIO"].fname'}}}) +--- +- error: 'Can''t create or modify index ''test1'' in space ''withdata'': same key + part is indexed twice' +... +s:create_index('test1', {parts = {{2, 'number'}, {3, 'str', path = 666}, {3, 'str', path = '[3]["FIO"]["fname"]'}}}) +--- +- error: 'Wrong index options (field 2): ''path'' must be string' +... +s:create_index('test1', {parts = {{2, 'number'}, {3, 'str', path = 'field.FIO.fname'}}}) +--- +- error: 'Wrong index options (field 2): invalid JSON path: first part should be defined + as array index' +... +s:create_index('test1', {parts = {{2, 'number'}, {3, 'map', path = '[3].FIO'}}}) +--- +- error: 'Can''t create or modify index ''test1'' in space ''withdata'': field type + ''map'' is not supported' +... +s:create_index('test1', {parts = {{2, 'number'}, {3, 'array', path = '[3][1]'}}}) +--- +- error: 'Can''t create or modify index ''test1'' in space ''withdata'': field type + ''array'' is not supported' +... +s:create_index('test1', {parts = {{2, 'number'}, {3, 'str', path = '[3].FIO'}, {3, 'str', path = '[3]["FIO"].fname'}}}) +--- +- error: Field 3 has type 'map' in one index, but type 'string' in another +... +s:create_index('test1', {parts = {{2, 'number'}, {3, 'str', path = '[3][1].sname'}, {3, 'str', path = '[3]["FIO"].fname'}}}) +--- +- error: Field 3 has type 'map' in one index, but type 'array' in another +... +s:create_index('test1', {parts = {{2, 'number'}, {3, 'str', path = '[2].FIO.fname'}}}) +--- +- error: 'Wrong index options (field 2): invalid JSON path: first part refers to invalid + field' +... +s:create_index('test1', {parts = {{2, 'number'}, {3, 'str', path = '[3].FIO....fname'}}}) +--- +- error: 'Wrong index options (field 3): invalid JSON path ''[3].FIO....fname'': path + has invalid structure (error at position 9)' +... +idx = s:create_index('test1', {parts = {{2, 'number'}, {3, 'str', path = '[3]["FIO"]["fname"]'}, {3, 'str', path = '[3]["FIO"]["sname"]'}}}) +--- +... +assert(idx ~= nil) +--- +- true +... +s:insert{7, 7, {town = 'London', FIO = 666}, 4, 5} +--- +- error: Tuple doesn't math document structure defined as index +... +s:insert{7, 7, {town = 'London', FIO = {fname = 666, sname = 'Bond'}}, 4, 5} +--- +- error: 'Supplied key type of part 2 does not match index part type: expected string' +... +s:insert{7, 7, {town = 'London', FIO = {fname = "James"}}, 4, 5} +--- +- error: Tuple doesn't math document structure defined as index +... +s:insert{7, 7, {town = 'London', FIO = {fname = 'James', sname = 'Bond'}}, 4, 5} +--- +- [7, 7, {'town': 'London', 'FIO': {'fname': 'James', 'sname': 'Bond'}}, 4, 5] +... +s:insert{7, 7, {town = 'London', FIO = {fname = 'James', sname = 'Bond'}}, 4, 5} +--- +- error: Duplicate key exists in unique index 'test1' in space 'withdata' +... +s:insert{7, 7, {town = 'London', FIO = {fname = 'James', sname = 'Bond', data = "extra"}}, 4, 5} +--- +- error: Duplicate key exists in unique index 'test1' in space 'withdata' +... +s:insert{7, 7, {town = 'Moscow', FIO = {fname = 'Max', sname = 'Isaev', data = "extra"}}, 4, 5} +--- +- [7, 7, {'town': 'Moscow', 'FIO': {'fname': 'Max', 'data': 'extra', 'sname': 'Isaev'}}, + 4, 5] +... +idx:select() +--- +- - [7, 7, {'town': 'London', 'FIO': {'fname': 'James', 'sname': 'Bond'}}, 4, 5] + - [7, 7, {'town': 'Moscow', 'FIO': {'fname': 'Max', 'data': 'extra', 'sname': 'Isaev'}}, + 4, 5] +... +idx:min() +--- +- [7, 7, {'town': 'London', 'FIO': {'fname': 'James', 'sname': 'Bond'}}, 4, 5] +... +idx:max() +--- +- [7, 7, {'town': 'Moscow', 'FIO': {'fname': 'Max', 'data': 'extra', 'sname': 'Isaev'}}, + 4, 5] +... +s:drop() +--- +... +s = box.schema.create_space('withdata', {engine = engine}) +--- +... +parts = {} +--- +... +parts[1] = {1, 'unsigned', path='[1][2]'} +--- +... +pk = s:create_index('pk', {parts = parts}) +--- +... +s:insert{{1, 2}, 3} +--- +- [[1, 2], 3] +... +s:upsert({{box.null, 2}}, {{'+', 2, 5}}) +--- +... +s:get(2) +--- +- [[1, 2], 8] +... +s:drop() +--- +... +-- Create index on space with data +s = box.schema.space.create('withdata', {engine = engine}) +--- +... +pk = s:create_index('primary', { type = 'tree' }) +--- +... +s:insert{1, 7, {town = 'London', FIO = 1234}, 4, 5} +--- +- [1, 7, {'town': 'London', 'FIO': 1234}, 4, 5] +... +s:insert{2, 7, {town = 'London', FIO = {fname = 'James', sname = 'Bond'}}, 4, 5} +--- +- [2, 7, {'town': 'London', 'FIO': {'fname': 'James', 'sname': 'Bond'}}, 4, 5] +... +s:insert{3, 7, {town = 'London', FIO = {fname = 'James', sname = 'Bond'}}, 4, 5} +--- +- [3, 7, {'town': 'London', 'FIO': {'fname': 'James', 'sname': 'Bond'}}, 4, 5] +... +s:create_index('test1', {parts = {{2, 'number'}, {3, 'str', path = '[3]["FIO"]["fname"]'}, {3, 'str', path = '[3]["FIO"]["sname"]'}}}) +--- +- error: Tuple doesn't math document structure defined as index +... +_ = s:delete(1) +--- +... +s:create_index('test1', {parts = {{2, 'number'}, {3, 'str', path = '[3]["FIO"]["fname"]'}, {3, 'str', path = '[3]["FIO"]["sname"]'}}}) +--- +- error: Duplicate key exists in unique index 'test1' in space 'withdata' +... +_ = s:delete(2) +--- +... +idx = s:create_index('test1', {parts = {{2, 'number'}, {3, 'str', path = '[3]["FIO"]["fname"]'}, {3, 'str', path = '[3]["FIO"]["sname"]'}}}) +--- +... +assert(idx ~= nil) +--- +- true +... +idx:select() +--- +- - [3, 7, {'town': 'London', 'FIO': {'fname': 'James', 'sname': 'Bond'}}, 4, 5] +... +idx:min() +--- +- [3, 7, {'town': 'London', 'FIO': {'fname': 'James', 'sname': 'Bond'}}, 4, 5] +... +idx:max() +--- +- [3, 7, {'town': 'London', 'FIO': {'fname': 'James', 'sname': 'Bond'}}, 4, 5] +... +idx:drop() +--- +... +s:drop() +--- +... +-- Test complex JSON indexes +s = box.schema.space.create('withdata', {engine = engine}) +--- +... +parts = {} +--- +... +parts[1] = {1, 'str', path='[1][3][2].a'} +--- +... +parts[2] = {1, 'unsigned', path = '[1][3][1]'} +--- +... +parts[3] = {2, 'str', path = '[2][2].d[1]'} +--- +... +pk = s:create_index('primary', { type = 'tree', parts = parts}) +--- +... +s:insert{{1, 2, {3, {a = 'str', b = 5}}}, {'c', {d = {'e', 'f'}, e = 'g'}}, 6} +--- +- [[1, 2, [3, {'a': 'str', 'b': 5}]], ['c', {'d': ['e', 'f'], 'e': 'g'}], 6] +... +s:insert{{1, 2, {3, {a = 'str', b = 1}}}, {'c', {d = {'e', 'f'}, e = 'g'}}, 6} +--- +- error: Duplicate key exists in unique index 'primary' in space 'withdata' +... +parts = {} +--- +... +parts[1] = {1, 'unsigned', path='[1][3][2].b' } +--- +... +parts[2] = {3, 'unsigned'} +--- +... +crosspart_idx = s:create_index('crosspart', { parts = parts}) +--- +... +s:insert{{1, 2, {3, {a = 'str2', b = 2}}}, {'c', {d = {'e', 'f'}, e = 'g'}}, 6} +--- +- [[1, 2, [3, {'a': 'str2', 'b': 2}]], ['c', {'d': ['e', 'f'], 'e': 'g'}], 6] +... +parts = {} +--- +... +parts[1] = {1, 'unsigned', path='[1][3][2].b'} +--- +... +num_idx = s:create_index('numeric', {parts = parts}) +--- +... +s:insert{{1, 2, {3, {a = 'str3', b = 9}}}, {'c', {d = {'e', 'f'}, e = 'g'}}, 6} +--- +- [[1, 2, [3, {'a': 'str3', 'b': 9}]], ['c', {'d': ['e', 'f'], 'e': 'g'}], 6] +... +num_idx:get(2) +--- +- [[1, 2, [3, {'a': 'str2', 'b': 2}]], ['c', {'d': ['e', 'f'], 'e': 'g'}], 6] +... +num_idx:select() +--- +- - [[1, 2, [3, {'a': 'str2', 'b': 2}]], ['c', {'d': ['e', 'f'], 'e': 'g'}], 6] + - [[1, 2, [3, {'a': 'str', 'b': 5}]], ['c', {'d': ['e', 'f'], 'e': 'g'}], 6] + - [[1, 2, [3, {'a': 'str3', 'b': 9}]], ['c', {'d': ['e', 'f'], 'e': 'g'}], 6] +... +num_idx:max() +--- +- [[1, 2, [3, {'a': 'str3', 'b': 9}]], ['c', {'d': ['e', 'f'], 'e': 'g'}], 6] +... +num_idx:min() +--- +- [[1, 2, [3, {'a': 'str2', 'b': 2}]], ['c', {'d': ['e', 'f'], 'e': 'g'}], 6] +... +assert(crosspart_idx:max() == num_idx:max()) +--- +- true +... +assert(crosspart_idx:min() == num_idx:min()) +--- +- true +... +s:drop() +--- +... engine = nil --- ... diff --git a/test/engine/tuple.test.lua b/test/engine/tuple.test.lua index 30d6f1a..d20a547 100644 --- a/test/engine/tuple.test.lua +++ b/test/engine/tuple.test.lua @@ -289,5 +289,85 @@ t["{"] s:drop() +-- +-- gh-1012: Indexes for JSON-defined paths. +-- +s = box.schema.space.create('withdata', {engine = engine}) +s:create_index('test1', {parts = {{2, 'number'}, {3, 'str', path = '[3].FIO["fname"]'}, {3, 'str', path = '[3]["FIO"].fname'}}}) +s:create_index('test1', {parts = {{2, 'number'}, {3, 'str', path = 666}, {3, 'str', path = '[3]["FIO"]["fname"]'}}}) +s:create_index('test1', {parts = {{2, 'number'}, {3, 'str', path = 'field.FIO.fname'}}}) +s:create_index('test1', {parts = {{2, 'number'}, {3, 'map', path = '[3].FIO'}}}) +s:create_index('test1', {parts = {{2, 'number'}, {3, 'array', path = '[3][1]'}}}) +s:create_index('test1', {parts = {{2, 'number'}, {3, 'str', path = '[3].FIO'}, {3, 'str', path = '[3]["FIO"].fname'}}}) +s:create_index('test1', {parts = {{2, 'number'}, {3, 'str', path = '[3][1].sname'}, {3, 'str', path = '[3]["FIO"].fname'}}}) +s:create_index('test1', {parts = {{2, 'number'}, {3, 'str', path = '[2].FIO.fname'}}}) +s:create_index('test1', {parts = {{2, 'number'}, {3, 'str', path = '[3].FIO....fname'}}}) +idx = s:create_index('test1', {parts = {{2, 'number'}, {3, 'str', path = '[3]["FIO"]["fname"]'}, {3, 'str', path = '[3]["FIO"]["sname"]'}}}) +assert(idx ~= nil) +s:insert{7, 7, {town = 'London', FIO = 666}, 4, 5} +s:insert{7, 7, {town = 'London', FIO = {fname = 666, sname = 'Bond'}}, 4, 5} +s:insert{7, 7, {town = 'London', FIO = {fname = "James"}}, 4, 5} +s:insert{7, 7, {town = 'London', FIO = {fname = 'James', sname = 'Bond'}}, 4, 5} +s:insert{7, 7, {town = 'London', FIO = {fname = 'James', sname = 'Bond'}}, 4, 5} +s:insert{7, 7, {town = 'London', FIO = {fname = 'James', sname = 'Bond', data = "extra"}}, 4, 5} +s:insert{7, 7, {town = 'Moscow', FIO = {fname = 'Max', sname = 'Isaev', data = "extra"}}, 4, 5} +idx:select() +idx:min() +idx:max() +s:drop() + +s = box.schema.create_space('withdata', {engine = engine}) +parts = {} +parts[1] = {1, 'unsigned', path='[1][2]'} +pk = s:create_index('pk', {parts = parts}) +s:insert{{1, 2}, 3} +s:upsert({{box.null, 2}}, {{'+', 2, 5}}) +s:get(2) +s:drop() + +-- Create index on space with data +s = box.schema.space.create('withdata', {engine = engine}) +pk = s:create_index('primary', { type = 'tree' }) +s:insert{1, 7, {town = 'London', FIO = 1234}, 4, 5} +s:insert{2, 7, {town = 'London', FIO = {fname = 'James', sname = 'Bond'}}, 4, 5} +s:insert{3, 7, {town = 'London', FIO = {fname = 'James', sname = 'Bond'}}, 4, 5} +s:create_index('test1', {parts = {{2, 'number'}, {3, 'str', path = '[3]["FIO"]["fname"]'}, {3, 'str', path = '[3]["FIO"]["sname"]'}}}) +_ = s:delete(1) +s:create_index('test1', {parts = {{2, 'number'}, {3, 'str', path = '[3]["FIO"]["fname"]'}, {3, 'str', path = '[3]["FIO"]["sname"]'}}}) +_ = s:delete(2) +idx = s:create_index('test1', {parts = {{2, 'number'}, {3, 'str', path = '[3]["FIO"]["fname"]'}, {3, 'str', path = '[3]["FIO"]["sname"]'}}}) +assert(idx ~= nil) +idx:select() +idx:min() +idx:max() +idx:drop() +s:drop() + +-- Test complex JSON indexes +s = box.schema.space.create('withdata', {engine = engine}) +parts = {} +parts[1] = {1, 'str', path='[1][3][2].a'} +parts[2] = {1, 'unsigned', path = '[1][3][1]'} +parts[3] = {2, 'str', path = '[2][2].d[1]'} +pk = s:create_index('primary', { type = 'tree', parts = parts}) +s:insert{{1, 2, {3, {a = 'str', b = 5}}}, {'c', {d = {'e', 'f'}, e = 'g'}}, 6} +s:insert{{1, 2, {3, {a = 'str', b = 1}}}, {'c', {d = {'e', 'f'}, e = 'g'}}, 6} +parts = {} +parts[1] = {1, 'unsigned', path='[1][3][2].b' } +parts[2] = {3, 'unsigned'} +crosspart_idx = s:create_index('crosspart', { parts = parts}) +s:insert{{1, 2, {3, {a = 'str2', b = 2}}}, {'c', {d = {'e', 'f'}, e = 'g'}}, 6} +parts = {} +parts[1] = {1, 'unsigned', path='[1][3][2].b'} +num_idx = s:create_index('numeric', {parts = parts}) +s:insert{{1, 2, {3, {a = 'str3', b = 9}}}, {'c', {d = {'e', 'f'}, e = 'g'}}, 6} +num_idx:get(2) +num_idx:select() +num_idx:max() +num_idx:min() +assert(crosspart_idx:max() == num_idx:max()) +assert(crosspart_idx:min() == num_idx:min()) +s:drop() + engine = nil test_run = nil -- 2.7.4