From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: From: Kirill Shcherbatov Subject: [PATCH v8 4/5] box: introduce offset_slot cache in key_part Date: Wed, 16 Jan 2019 16:44:42 +0300 Message-Id: In-Reply-To: References: MIME-Version: 1.0 Content-Transfer-Encoding: 8bit To: tarantool-patches@freelists.org, vdavydov.dev@gmail.com Cc: kostja@tarantool.org, Kirill Shcherbatov List-ID: tuple_field_by_part looks up the tuple_field corresponding to the given key part in tuple_format in order to quickly retrieve the offset of indexed data from the tuple field map. For regular indexes this operation is blazing fast, however of JSON indexes it is not as we have to parse the path to data and then do multiple lookups in a JSON tree. Since tuple_field_by_part is used by comparators, we should strive to make this routine as fast as possible for all kinds of indexes. This patch introduces an optimization that is supposed to make tuple_field_by_part for JSON indexes as fast as it is for regular indexes in most cases. We do that by caching the offset slot right in key_part. There's a catch here however - we create a new format whenever an index is dropped or created and we don't reindex old tuples. As a result, there may be several generations of tuples in the same space, all using different formats while there's the only key_def used for comparison. To overcome this problem, we introduce the notion of tuple_format epoch. This is a counter incremented each time a new format is created. We store it in tuple_format and key_def, and we only use the offset slot cached in a key_def if it's epoch coincides with the epoch of the tuple format. If they don't, we look up a tuple_field as before, and then update the cached value provided the epoch of the tuple format. Part of #1012 --- src/box/key_def.c | 15 ++++++++++----- src/box/key_def.h | 14 ++++++++++++++ src/box/tuple.h | 2 +- src/box/tuple_format.c | 4 +++- src/box/tuple_format.h | 36 +++++++++++++++++++++++++++++++++--- 5 files changed, 61 insertions(+), 10 deletions(-) diff --git a/src/box/key_def.c b/src/box/key_def.c index 0ed497dfc..b5e93f117 100644 --- a/src/box/key_def.c +++ b/src/box/key_def.c @@ -139,7 +139,8 @@ key_def_set_part(struct key_def *def, uint32_t part_no, uint32_t fieldno, enum field_type type, enum on_conflict_action nullable_action, struct coll *coll, uint32_t coll_id, enum sort_order sort_order, const char *path, - uint32_t path_len, char **path_pool) + uint32_t path_len, char **path_pool, int32_t offset_slot_cache, + uint64_t format_epoch) { assert(part_no < def->part_count); assert(type < field_type_MAX); @@ -151,6 +152,8 @@ key_def_set_part(struct key_def *def, uint32_t part_no, uint32_t fieldno, def->parts[part_no].coll = coll; def->parts[part_no].coll_id = coll_id; def->parts[part_no].sort_order = sort_order; + def->parts[part_no].offset_slot_cache = offset_slot_cache; + def->parts[part_no].format_epoch = format_epoch; if (path != NULL) { assert(path_pool != NULL); def->parts[part_no].path = *path_pool; @@ -199,7 +202,7 @@ key_def_new(const struct key_part_def *parts, uint32_t part_count) key_def_set_part(def, i, part->fieldno, part->type, part->nullable_action, coll, part->coll_id, part->sort_order, part->path, path_len, - &path_pool); + &path_pool, TUPLE_OFFSET_SLOT_NIL, 0); } key_def_set_cmp(def); return def; @@ -252,7 +255,7 @@ box_key_def_new(uint32_t *fields, uint32_t *types, uint32_t part_count) (enum field_type)types[item], ON_CONFLICT_ACTION_DEFAULT, NULL, COLL_NONE, SORT_ORDER_ASC, NULL, 0, - NULL); + NULL, TUPLE_OFFSET_SLOT_NIL, 0); } key_def_set_cmp(key_def); return key_def; @@ -662,7 +665,8 @@ key_def_merge(const struct key_def *first, const struct key_def *second) key_def_set_part(new_def, pos++, part->fieldno, part->type, part->nullable_action, part->coll, part->coll_id, part->sort_order, part->path, - part->path_len, &path_pool); + part->path_len, &path_pool, + part->offset_slot_cache, part->format_epoch); } /* Set-append second key def's part to the new key def. */ @@ -674,7 +678,8 @@ key_def_merge(const struct key_def *first, const struct key_def *second) key_def_set_part(new_def, pos++, part->fieldno, part->type, part->nullable_action, part->coll, part->coll_id, part->sort_order, part->path, - part->path_len, &path_pool); + part->path_len, &path_pool, + part->offset_slot_cache, part->format_epoch); } key_def_set_cmp(new_def); return new_def; diff --git a/src/box/key_def.h b/src/box/key_def.h index fe4acffb5..7a71ed060 100644 --- a/src/box/key_def.h +++ b/src/box/key_def.h @@ -97,6 +97,20 @@ struct key_part { char *path; /** The length of JSON path. */ uint32_t path_len; + /** + * Epoch of the tuple format the offset slot cached in + * this part is valid for, see tuple_format::epoch. + */ + uint64_t format_epoch; + /** + * Cached value of the offset slot corresponding to + * the indexed field (tuple_field::offset_slot). + * Valid only if key_def::epoch equals the epoch of + * the tuple format. This value is updated in + * tuple_field_by_part_raw to always store the + * offset corresponding to the last used tuple format. + */ + int32_t offset_slot_cache; }; struct key_def; diff --git a/src/box/tuple.h b/src/box/tuple.h index 4368dac4e..f8a4e6f22 100644 --- a/src/box/tuple.h +++ b/src/box/tuple.h @@ -527,7 +527,7 @@ tuple_field_by_path(const struct tuple *tuple, uint32_t fieldno, { return tuple_field_raw_by_path(tuple_format(tuple), tuple_data(tuple), tuple_field_map(tuple), fieldno, - path, path_len); + path, path_len, NULL); } /** diff --git a/src/box/tuple_format.c b/src/box/tuple_format.c index 9af9c307a..3cec1cc44 100644 --- a/src/box/tuple_format.c +++ b/src/box/tuple_format.c @@ -39,6 +39,7 @@ struct tuple_format **tuple_formats; static intptr_t recycled_format_ids = FORMAT_ID_NIL; static uint32_t formats_size = 0, formats_capacity = 0; +static uint64_t formats_epoch = 0; static struct tuple_field * tuple_field_new(void) @@ -540,6 +541,7 @@ tuple_format_new(struct tuple_format_vtab *vtab, struct key_def * const *keys, format->vtab = *vtab; format->engine = NULL; format->is_temporary = false; + format->epoch = ++formats_epoch; if (tuple_format_register(format) < 0) { tuple_format_destroy(format); free(format); @@ -968,5 +970,5 @@ tuple_field_raw_by_full_path(struct tuple_format *format, const char *tuple, } return tuple_field_raw_by_path(format, tuple, field_map, fieldno, path + lexer.offset, - path_len - lexer.offset); + path_len - lexer.offset, NULL); } diff --git a/src/box/tuple_format.h b/src/box/tuple_format.h index 311bacbe8..fce0b9c93 100644 --- a/src/box/tuple_format.h +++ b/src/box/tuple_format.h @@ -137,6 +137,12 @@ tuple_field_is_nullable(struct tuple_field *tuple_field) * Tuple format describes how tuple is stored and information about its fields */ struct tuple_format { + /** + * Counter that grows incrementally on space rebuild + * used for caching offset slot in key_part, for more + * details see key_part::offset_slot_cache. + */ + uint64_t epoch; /** Virtual function table */ struct tuple_format_vtab vtab; /** Pointer to engine-specific data. */ @@ -436,12 +442,17 @@ tuple_field_go_to_path(const char **data, const char *path, uint32_t path_len); /** * Get a field at the specific position in this MessagePack * array by fieldno and path. + * The offset_slot[out] may be specified to save it on use, + * set NULL otherwise. */ static inline const char * tuple_field_raw_by_path(struct tuple_format *format, const char *tuple, const uint32_t *field_map, uint32_t fieldno, - const char *path, uint32_t path_len) + const char *path, uint32_t path_len, + int32_t *offset_slot) { + if (offset_slot != NULL) + *offset_slot = TUPLE_OFFSET_SLOT_NIL; if (likely(path != NULL && fieldno < format->index_field_count)) { /* Indexed field */ struct tuple_field *field = @@ -450,6 +461,8 @@ tuple_field_raw_by_path(struct tuple_format *format, const char *tuple, if (field == NULL) goto parse; assert(field != NULL); + if (offset_slot != NULL) + *offset_slot = field->offset_slot; if (field->offset_slot != TUPLE_OFFSET_SLOT_NIL) { assert(-field->offset_slot * sizeof(uint32_t) <= format->field_map_size); @@ -496,8 +509,25 @@ static inline const char * tuple_field_by_part_raw(struct tuple_format *format, const char *data, const uint32_t *field_map, struct key_part *part) { - return tuple_field_raw_by_path(format, data, field_map, part->fieldno, - part->path, part->path_len); + if (likely(part->format_epoch == format->epoch)) { + int32_t offset_slot = part->offset_slot_cache; + assert(-offset_slot * sizeof(uint32_t) <= + format->field_map_size); + return field_map[offset_slot] != 0 ? + data + field_map[offset_slot] : NULL; + } else { + assert(format->epoch != 0); + int32_t offset_slot; + const char *field = + tuple_field_raw_by_path(format, data, field_map, + part->fieldno, part->path, + part->path_len, &offset_slot); + if (offset_slot != TUPLE_OFFSET_SLOT_NIL) { + part->format_epoch = format->epoch; + part->offset_slot_cache = offset_slot; + } + return field; + } } #if defined(__cplusplus) -- 2.19.2