From: Kirill Shcherbatov <kshcherbatov@tarantool.org>
To: tarantool-patches@freelists.org
Cc: v.shpilevoy@tarantool.org,
Kirill Shcherbatov <kshcherbatov@tarantool.org>
Subject: [tarantool-patches] [PATCH v3 2/4] box: introduce slot_cache in key_part
Date: Mon, 27 Aug 2018 10:37:28 +0300 [thread overview]
Message-ID: <9be616bdd2190a0ab38aaee981098811f542eeb1.1535354849.git.kshcherbatov@tarantool.org> (raw)
In-Reply-To: <cover.1535354849.git.kshcherbatov@tarantool.org>
In-Reply-To: <cover.1535354849.git.kshcherbatov@tarantool.org>
Same key_part could be used in different formats multiple
times, so different field->offset_slot would be allocated.
In most scenarios we work with series of tuples of same
format, and (in general) format lookup for field would be
expensive operation for JSON-paths defined in key_part.
New slot_cache field in key_part structure and epoch-based
mechanism to validate it actuality should be effective
approach to improve performance.
New routine tuple_field_by_part use tuple and key_part
to access field that allows to rework and speedup all
scenarios of access tuple data by index.
This also allows to work with JSON-path key_parts later.
Part of #1012.
---
src/box/alter.cc | 38 ++++++++++++++++++
src/box/key_def.c | 2 +
src/box/key_def.h | 11 ++++++
src/box/memtx_bitset.c | 8 +++-
src/box/memtx_rtree.c | 6 ++-
src/box/tuple_compare.cc | 94 +++++++++++++++++++++++++++++++++-----------
| 44 ++++++++++++++-------
src/box/tuple_format.c | 12 ++++++
src/box/tuple_format.h | 18 +++++++++
src/box/tuple_hash.cc | 53 ++++++++++++++++++++-----
src/box/vy_stmt.h | 7 +++-
11 files changed, 243 insertions(+), 50 deletions(-)
diff --git a/src/box/alter.cc b/src/box/alter.cc
index a6299a1..a46b886 100644
--- a/src/box/alter.cc
+++ b/src/box/alter.cc
@@ -1032,6 +1032,42 @@ ModifySpace::~ModifySpace()
space_def_delete(new_def);
}
+class ModifySpaceFormat: public AlterSpaceOp
+{
+public:
+ ModifySpaceFormat(struct alter_space *alter) : AlterSpaceOp(alter) {}
+ virtual void alter(struct alter_space *alter);
+};
+
+void
+ModifySpaceFormat:: alter(struct alter_space * alter)
+{
+ struct tuple_format *format = alter->new_space != NULL ?
+ alter->new_space->format : NULL;
+ if (format == NULL)
+ return;
+ struct rlist *key_list = &alter->key_list;
+ bool is_format_epoch_changed = false;
+ struct index_def *index_def;
+ rlist_foreach_entry(index_def, key_list, link) {
+ struct key_part *part = index_def->key_def->parts;
+ struct key_part *parts_end =
+ part + index_def->key_def->part_count;
+ for (; part < parts_end; part++) {
+ struct tuple_field *field =
+ &format->fields[part->fieldno];
+ if (field->offset_slot != part->offset_slot)
+ is_format_epoch_changed = true;
+ }
+ }
+ format->epoch = alter->old_space != NULL &&
+ alter->old_space->format != NULL ?
+ alter->old_space->format->epoch : 0;
+ if (is_format_epoch_changed)
+ format->epoch++;
+}
+
+
/** DropIndex - remove an index from space. */
class DropIndex: public AlterSpaceOp
@@ -1316,6 +1352,7 @@ RebuildIndex::prepare(struct alter_space *alter)
/* Get the new index and build it. */
new_index = space_index(alter->new_space, new_index_def->iid);
assert(new_index != NULL);
+ assert(alter->new_space != NULL && alter->old_space != NULL);
space_build_index_xc(alter->old_space, new_index,
alter->new_space->format);
}
@@ -1922,6 +1959,7 @@ on_replace_dd_index(struct trigger * /* trigger */, void *event)
index_def_guard.is_active = false;
}
}
+ (void) new ModifySpaceFormat(alter);
/*
* Create MoveIndex ops for the remaining indexes in the
* old space.
diff --git a/src/box/key_def.c b/src/box/key_def.c
index ee09dc9..440d41e 100644
--- a/src/box/key_def.c
+++ b/src/box/key_def.c
@@ -258,6 +258,8 @@ key_def_set_part(struct key_def *def, uint32_t part_no, uint32_t fieldno,
def->parts[part_no].type = type;
def->parts[part_no].coll = coll;
def->parts[part_no].coll_id = coll_id;
+ def->parts[part_no].offset_slot = TUPLE_OFFSET_SLOT_NIL;
+ def->parts[part_no].offset_slot_epoch = 0;
column_mask_set_fieldno(&def->column_mask, fieldno);
/**
* When all parts are set, initialize the tuple
diff --git a/src/box/key_def.h b/src/box/key_def.h
index aecbe03..a32c34c 100644
--- a/src/box/key_def.h
+++ b/src/box/key_def.h
@@ -74,6 +74,17 @@ struct key_part {
struct coll *coll;
/** True if a part can store NULLs. */
bool is_nullable;
+ /**
+ * Epoch of offset slot cache. Initialized with
+ * incremental epoch of format on caching it's field's
+ * offset_slot via tuple_field_by_part_raw to speed up
+ * access on subsequent calls with same format.
+ * Cache is expected to use "the eldest format is most
+ * relevant" strategy.
+ */
+ uint64_t offset_slot_epoch;
+ /** Cache with format's field offset slot. */
+ int32_t offset_slot;
};
struct key_def;
diff --git a/src/box/memtx_bitset.c b/src/box/memtx_bitset.c
index a665f1a..9529618 100644
--- a/src/box/memtx_bitset.c
+++ b/src/box/memtx_bitset.c
@@ -283,8 +283,12 @@ memtx_bitset_index_replace(struct index *base, struct tuple *old_tuple,
}
if (new_tuple != NULL) {
- const char *field;
- field = tuple_field(new_tuple, base->def->key_def->parts[0].fieldno);
+ const char *field =
+ tuple_field_by_part_raw(tuple_format(new_tuple),
+ tuple_data(new_tuple),
+ tuple_field_map(new_tuple),
+ (struct key_part *)
+ base->def->key_def->parts);
uint32_t key_len;
const void *key = make_key(field, &key_len);
#ifndef OLD_GOOD_BITSET
diff --git a/src/box/memtx_rtree.c b/src/box/memtx_rtree.c
index 0b12cda..00aaf79 100644
--- a/src/box/memtx_rtree.c
+++ b/src/box/memtx_rtree.c
@@ -112,7 +112,11 @@ extract_rectangle(struct rtree_rect *rect, const struct tuple *tuple,
struct index_def *index_def)
{
assert(index_def->key_def->part_count == 1);
- const char *elems = tuple_field(tuple, index_def->key_def->parts[0].fieldno);
+ const char *elems =
+ tuple_field_by_part_raw(tuple_format(tuple), tuple_data(tuple),
+ tuple_field_map(tuple),
+ (struct key_part *)
+ index_def->key_def->parts);
unsigned dimension = index_def->opts.dimension;
uint32_t count = mp_decode_array(&elems);
return mp_decode_rect(rect, dimension, elems, count, "Field");
diff --git a/src/box/tuple_compare.cc b/src/box/tuple_compare.cc
index e53afba..5d7df4d 100644
--- a/src/box/tuple_compare.cc
+++ b/src/box/tuple_compare.cc
@@ -431,10 +431,20 @@ tuple_common_key_parts(const struct tuple *tuple_a,
const struct key_def *key_def)
{
uint32_t i;
+ struct tuple_format *tuple_a_format = tuple_format(tuple_a);
+ struct tuple_format *tuple_b_format = tuple_format(tuple_b);
+ const char *tuple_a_raw = tuple_data(tuple_a);
+ const char *tuple_b_raw = tuple_data(tuple_b);
+ const uint32_t *tuple_a_field_map = tuple_field_map(tuple_a);
+ const uint32_t *tuple_b_field_map = tuple_field_map(tuple_b);
for (i = 0; i < key_def->part_count; i++) {
- const struct key_part *part = &key_def->parts[i];
- const char *field_a = tuple_field(tuple_a, part->fieldno);
- const char *field_b = tuple_field(tuple_b, part->fieldno);
+ struct key_part *part = (struct key_part *)&key_def->parts[i];
+ const char *field_a =
+ tuple_field_by_part_raw(tuple_a_format, tuple_a_raw,
+ tuple_a_field_map, part);
+ const char *field_b =
+ tuple_field_by_part_raw(tuple_b_format, tuple_b_raw,
+ tuple_b_field_map, part);
enum mp_type a_type = field_a != NULL ?
mp_typeof(*field_a) : MP_NIL;
enum mp_type b_type = field_b != NULL ?
@@ -449,7 +459,7 @@ tuple_common_key_parts(const struct tuple *tuple_a,
return i;
}
-template<bool is_nullable, bool has_optional_parts>
+template<bool is_nullable, bool has_optional_parts, bool has_json_path>
static inline int
tuple_compare_slowpath(const struct tuple *tuple_a, const struct tuple *tuple_b,
const struct key_def *key_def)
@@ -498,10 +508,23 @@ tuple_compare_slowpath(const struct tuple *tuple_a, const struct tuple *tuple_b,
end = part + key_def->part_count;
for (; part < end; part++) {
- field_a = tuple_field_raw(format_a, tuple_a_raw, field_map_a,
- part->fieldno);
- field_b = tuple_field_raw(format_b, tuple_b_raw, field_map_b,
- part->fieldno);
+ if (!has_json_path) {
+ field_a = tuple_field_raw(format_a, tuple_a_raw,
+ field_map_a,
+ part->fieldno);
+ field_b = tuple_field_raw(format_b, tuple_b_raw,
+ field_map_b,
+ part->fieldno);
+ } else {
+ field_a = tuple_field_by_part_raw(format_a, tuple_a_raw,
+ field_map_a,
+ (struct key_part *)
+ part);
+ field_b = tuple_field_by_part_raw(format_b, tuple_b_raw,
+ field_map_b,
+ (struct key_part *)
+ part);
+ }
assert(has_optional_parts ||
(field_a != NULL && field_b != NULL));
if (! is_nullable) {
@@ -548,10 +571,23 @@ tuple_compare_slowpath(const struct tuple *tuple_a, const struct tuple *tuple_b,
*/
end = key_def->parts + key_def->part_count;
for (; part < end; ++part) {
- field_a = tuple_field_raw(format_a, tuple_a_raw, field_map_a,
- part->fieldno);
- field_b = tuple_field_raw(format_b, tuple_b_raw, field_map_b,
- part->fieldno);
+ if (!has_json_path) {
+ field_a = tuple_field_raw(format_a, tuple_a_raw,
+ field_map_a,
+ part->fieldno);
+ field_b = tuple_field_raw(format_b, tuple_b_raw,
+ field_map_b,
+ part->fieldno);
+ } else {
+ field_a = tuple_field_by_part_raw(format_a, tuple_a_raw,
+ field_map_a,
+ (struct key_part *)
+ part);
+ field_b = tuple_field_by_part_raw(format_b, tuple_b_raw,
+ field_map_b,
+ (struct key_part *)
+ part);
+ }
/*
* Extended parts are primary, and they can not
* be absent or be NULLs.
@@ -565,7 +601,7 @@ tuple_compare_slowpath(const struct tuple *tuple_a, const struct tuple *tuple_b,
return 0;
}
-template<bool is_nullable, bool has_optional_parts>
+template<bool is_nullable, bool has_optional_parts, bool has_json_paths>
static inline int
tuple_compare_with_key_slowpath(const struct tuple *tuple, const char *key,
uint32_t part_count,
@@ -583,8 +619,14 @@ tuple_compare_with_key_slowpath(const struct tuple *tuple, const char *key,
enum mp_type a_type, b_type;
if (likely(part_count == 1)) {
const char *field;
- field = tuple_field_raw(format, tuple_raw, field_map,
- part->fieldno);
+ if (!has_json_paths) {
+ field = tuple_field_raw(format, tuple_raw, field_map,
+ part->fieldno);
+ } else {
+ field = tuple_field_by_part_raw(format, tuple_raw,
+ field_map,
+ (struct key_part *)part);
+ }
if (! is_nullable) {
return tuple_compare_field(field, key, part->type,
part->coll);
@@ -609,8 +651,14 @@ tuple_compare_with_key_slowpath(const struct tuple *tuple, const char *key,
int rc;
for (; part < end; ++part, mp_next(&key)) {
const char *field;
- field = tuple_field_raw(format, tuple_raw, field_map,
- part->fieldno);
+ if (!has_json_paths) {
+ field = tuple_field_raw(format, tuple_raw, field_map,
+ part->fieldno);
+ } else {
+ field = tuple_field_by_part_raw(format, tuple_raw,
+ field_map,
+ (struct key_part *)part);
+ }
if (! is_nullable) {
rc = tuple_compare_field(field, key, part->type,
part->coll);
@@ -1016,9 +1064,9 @@ tuple_compare_create(const struct key_def *def)
else
return tuple_compare_sequential<true, false>;
} else if (def->has_optional_parts) {
- return tuple_compare_slowpath<true, true>;
+ return tuple_compare_slowpath<true, true, false>;
} else {
- return tuple_compare_slowpath<true, false>;
+ return tuple_compare_slowpath<true, false, false>;
}
}
assert(! def->has_optional_parts);
@@ -1041,7 +1089,7 @@ tuple_compare_create(const struct key_def *def)
if (key_def_is_sequential(def))
return tuple_compare_sequential<false, false>;
else
- return tuple_compare_slowpath<false, false>;
+ return tuple_compare_slowpath<false, false, false>;
}
/* }}} tuple_compare */
@@ -1236,9 +1284,9 @@ tuple_compare_with_key_create(const struct key_def *def)
false>;
}
} else if (def->has_optional_parts) {
- return tuple_compare_with_key_slowpath<true, true>;
+ return tuple_compare_with_key_slowpath<true, true, false>;
} else {
- return tuple_compare_with_key_slowpath<true, false>;
+ return tuple_compare_with_key_slowpath<true, false, false>;
}
}
assert(! def->has_optional_parts);
@@ -1264,7 +1312,7 @@ tuple_compare_with_key_create(const struct key_def *def)
if (key_def_is_sequential(def))
return tuple_compare_with_key_sequential<false, false>;
else
- return tuple_compare_with_key_slowpath<false, false>;
+ return tuple_compare_with_key_slowpath<false, false, false>;
}
/* }}} tuple_compare_with_key */
--git a/src/box/tuple_extract_key.cc b/src/box/tuple_extract_key.cc
index 880abb6..2ea405a 100644
--- a/src/box/tuple_extract_key.cc
+++ b/src/box/tuple_extract_key.cc
@@ -91,7 +91,8 @@ tuple_extract_key_sequential(const struct tuple *tuple,
* General-purpose implementation of tuple_extract_key()
* @copydoc tuple_extract_key()
*/
-template <bool contains_sequential_parts, bool has_optional_parts>
+template <bool contains_sequential_parts, bool has_optional_parts,
+ bool has_json_paths>
static char *
tuple_extract_key_slowpath(const struct tuple *tuple,
const struct key_def *key_def, uint32_t *key_size)
@@ -110,9 +111,15 @@ tuple_extract_key_slowpath(const struct tuple *tuple,
/* Calculate the key size. */
for (uint32_t i = 0; i < part_count; ++i) {
- const char *field =
- tuple_field_raw(format, data, field_map,
- key_def->parts[i].fieldno);
+ const char *field;
+ if (!has_json_paths) {
+ field = tuple_field_raw(format, data, field_map,
+ key_def->parts[i].fieldno);
+ } else {
+ field = tuple_field_by_part_raw(format, data, field_map,
+ (struct key_part *)
+ &key_def->parts[i]);
+ }
if (has_optional_parts && field == NULL) {
bsize += mp_sizeof_nil();
continue;
@@ -152,9 +159,15 @@ tuple_extract_key_slowpath(const struct tuple *tuple,
}
char *key_buf = mp_encode_array(key, part_count);
for (uint32_t i = 0; i < part_count; ++i) {
- const char *field =
- tuple_field_raw(format, data, field_map,
- key_def->parts[i].fieldno);
+ const char *field;
+ if (!has_json_paths) {
+ field = tuple_field_raw(format, data, field_map,
+ key_def->parts[i].fieldno);
+ } else {
+ field = tuple_field_by_part_raw(format, data, field_map,
+ (struct key_part *)
+ &key_def->parts[i]);
+ }
if (has_optional_parts && field == NULL) {
key_buf = mp_encode_nil(key_buf);
continue;
@@ -201,7 +214,7 @@ tuple_extract_key_slowpath(const struct tuple *tuple,
* General-purpose version of tuple_extract_key_raw()
* @copydoc tuple_extract_key_raw()
*/
-template <bool has_optional_parts>
+template <bool has_optional_parts, bool has_json_paths>
static char *
tuple_extract_key_slowpath_raw(const char *data, const char *data_end,
const struct key_def *key_def,
@@ -318,18 +331,21 @@ tuple_extract_key_set(struct key_def *key_def)
assert(key_def->is_nullable);
if (key_def_contains_sequential_parts(key_def)) {
key_def->tuple_extract_key =
- tuple_extract_key_slowpath<true, true>;
+ tuple_extract_key_slowpath<true, true,
+ false>;
} else {
key_def->tuple_extract_key =
- tuple_extract_key_slowpath<false, true>;
+ tuple_extract_key_slowpath<false, true,
+ false>;
}
} else {
if (key_def_contains_sequential_parts(key_def)) {
key_def->tuple_extract_key =
- tuple_extract_key_slowpath<true, false>;
+ tuple_extract_key_slowpath<true, false,
+ false>;
} else {
key_def->tuple_extract_key =
- tuple_extract_key_slowpath<false,
+ tuple_extract_key_slowpath<false, false,
false>;
}
}
@@ -337,9 +353,9 @@ tuple_extract_key_set(struct key_def *key_def)
if (key_def->has_optional_parts) {
assert(key_def->is_nullable);
key_def->tuple_extract_key_raw =
- tuple_extract_key_slowpath_raw<true>;
+ tuple_extract_key_slowpath_raw<true, false>;
} else {
key_def->tuple_extract_key_raw =
- tuple_extract_key_slowpath_raw<false>;
+ tuple_extract_key_slowpath_raw<false, false>;
}
}
diff --git a/src/box/tuple_format.c b/src/box/tuple_format.c
index b385c0d..2d4a85f 100644
--- a/src/box/tuple_format.c
+++ b/src/box/tuple_format.c
@@ -232,6 +232,11 @@ tuple_format_alloc(struct key_def * const *keys, uint16_t key_count,
format->dict = dict;
tuple_dictionary_ref(dict);
}
+ /*
+ * Set invalid epoch that should be changed later on
+ * attaching to space.
+ */
+ format->epoch = 1;
format->refs = 0;
format->id = FORMAT_ID_NIL;
format->field_count = field_count;
@@ -541,6 +546,13 @@ tuple_field_go_to_key(const char **field, const char *key, int len)
return -1;
}
+const char *
+tuple_field_by_part_raw(const struct tuple_format *format, const char *data,
+ const uint32_t *field_map, struct key_part *part)
+{
+ return tuple_field_raw(format, data, field_map, part->fieldno);
+}
+
int
tuple_field_raw_by_path(struct tuple_format *format, const char *tuple,
const uint32_t *field_map, const char *path,
diff --git a/src/box/tuple_format.h b/src/box/tuple_format.h
index c7dc48f..ecbf64c 100644
--- a/src/box/tuple_format.h
+++ b/src/box/tuple_format.h
@@ -115,6 +115,12 @@ struct tuple_field {
* Tuple format describes how tuple is stored and information about its fields
*/
struct tuple_format {
+ /**
+ * Counter that grows incrementally on space rebuild if
+ * format has other distribution of offset slots comparing
+ * with previous one.
+ */
+ uint64_t epoch;
/** Virtual function table */
struct tuple_format_vtab vtab;
/** Pointer to engine-specific data. */
@@ -324,6 +330,18 @@ tuple_init_field_map(const struct tuple_format *format, uint32_t *field_map,
const char *tuple);
/**
+ * Get a field refereed by multipart index @part in tuple.
+ * @param format Tuple format.
+ * @param tuple A pointer to MessagePack array.
+ * @param field_map A pointer to the LAST element of field map.
+ * @param part Multipart index part to use.
+ * @retval Field data if field exists or NULL.
+ */
+const char *
+tuple_field_by_part_raw(const struct tuple_format *format, const char *data,
+ const uint32_t *field_map, struct key_part *part);
+
+/**
* Get a field at the specific position in this MessagePack array.
* Returns a pointer to MessagePack data.
* @param format tuple format
diff --git a/src/box/tuple_hash.cc b/src/box/tuple_hash.cc
index dee9be3..561ca55 100644
--- a/src/box/tuple_hash.cc
+++ b/src/box/tuple_hash.cc
@@ -157,7 +157,12 @@ struct TupleHash
uint32_t h = HASH_SEED;
uint32_t carry = 0;
uint32_t total_size = 0;
- const char *field = tuple_field(tuple, key_def->parts->fieldno);
+ const char *field =
+ tuple_field_by_part_raw(tuple_format(tuple),
+ tuple_data(tuple),
+ tuple_field_map(tuple),
+ (struct key_part *)
+ key_def->parts);
TupleFieldHash<TYPE, MORE_TYPES...>::
hash(&field, &h, &carry, &total_size);
return PMurHash32_Result(h, carry, total_size);
@@ -169,7 +174,12 @@ struct TupleHash<FIELD_TYPE_UNSIGNED> {
static uint32_t hash(const struct tuple *tuple,
const struct key_def *key_def)
{
- const char *field = tuple_field(tuple, key_def->parts->fieldno);
+ const char *field =
+ tuple_field_by_part_raw(tuple_format(tuple),
+ tuple_data(tuple),
+ tuple_field_map(tuple),
+ (struct key_part *)
+ key_def->parts);
uint64_t val = mp_decode_uint(&field);
if (likely(val <= UINT32_MAX))
return val;
@@ -211,7 +221,7 @@ static const hasher_signature hash_arr[] = {
#undef HASHER
-template <bool has_optional_parts>
+template <bool has_optional_parts, bool has_json_paths>
uint32_t
tuple_hash_slowpath(const struct tuple *tuple, const struct key_def *key_def);
@@ -255,9 +265,9 @@ tuple_hash_func_set(struct key_def *key_def) {
slowpath:
if (key_def->has_optional_parts)
- key_def->tuple_hash = tuple_hash_slowpath<true>;
+ key_def->tuple_hash = tuple_hash_slowpath<true, false>;
else
- key_def->tuple_hash = tuple_hash_slowpath<false>;
+ key_def->tuple_hash = tuple_hash_slowpath<false, false>;
key_def->key_hash = key_hash_slowpath;
}
@@ -312,13 +322,16 @@ tuple_hash_key_part(uint32_t *ph1, uint32_t *pcarry,
const struct tuple *tuple,
const struct key_part *part)
{
- const char *field = tuple_field(tuple, part->fieldno);
+ const char *field =
+ tuple_field_by_part_raw(tuple_format(tuple), tuple_data(tuple),
+ tuple_field_map(tuple),
+ (struct key_part *)part);
if (field == NULL)
return tuple_hash_null(ph1, pcarry);
return tuple_hash_field(ph1, pcarry, &field, part->coll);
}
-template <bool has_optional_parts>
+template <bool has_optional_parts, bool has_json_paths>
uint32_t
tuple_hash_slowpath(const struct tuple *tuple, const struct key_def *key_def)
{
@@ -327,7 +340,17 @@ tuple_hash_slowpath(const struct tuple *tuple, const struct key_def *key_def)
uint32_t carry = 0;
uint32_t total_size = 0;
uint32_t prev_fieldno = key_def->parts[0].fieldno;
- const char *field = tuple_field(tuple, key_def->parts[0].fieldno);
+ struct tuple_format *format = tuple_format(tuple);
+ const char *tuple_raw = tuple_data(tuple);
+ const uint32_t *field_map = tuple_field_map(tuple);
+ const char *field;
+ if (!has_json_paths) {
+ field = tuple_field(tuple, prev_fieldno);
+ } else {
+ field = tuple_field_by_part_raw(format, tuple_raw, field_map,
+ (struct key_part *)
+ &key_def->parts);
+ }
const char *end = (char *)tuple + tuple_size(tuple);
if (has_optional_parts && field == NULL) {
total_size += tuple_hash_null(&h, &carry);
@@ -341,7 +364,19 @@ tuple_hash_slowpath(const struct tuple *tuple, const struct key_def *key_def)
* need of tuple_field
*/
if (prev_fieldno + 1 != key_def->parts[part_id].fieldno) {
- field = tuple_field(tuple, key_def->parts[part_id].fieldno);
+ if (!has_json_paths) {
+ field = tuple_field(tuple,
+ key_def->parts[part_id].
+ fieldno);
+ } else {
+ struct key_part *part =
+ (struct key_part *)
+ &key_def->parts[part_id];
+ field = tuple_field_by_part_raw(format,
+ tuple_raw,
+ field_map,
+ part);
+ }
}
if (has_optional_parts && (field == NULL || field >= end)) {
total_size += tuple_hash_null(&h, &carry);
diff --git a/src/box/vy_stmt.h b/src/box/vy_stmt.h
index 273d5e8..c4885c0 100644
--- a/src/box/vy_stmt.h
+++ b/src/box/vy_stmt.h
@@ -719,7 +719,12 @@ static inline bool
vy_tuple_key_contains_null(const struct tuple *tuple, const struct key_def *def)
{
for (uint32_t i = 0; i < def->part_count; ++i) {
- const char *field = tuple_field(tuple, def->parts[i].fieldno);
+ const char *field =
+ tuple_field_by_part_raw(tuple_format(tuple),
+ tuple_data(tuple),
+ tuple_field_map(tuple),
+ (struct key_part *)
+ &def->parts[i]);
if (field == NULL || mp_typeof(*field) == MP_NIL)
return true;
}
--
2.7.4
next prev parent reply other threads:[~2018-08-27 7:37 UTC|newest]
Thread overview: 14+ messages / expand[flat|nested] mbox.gz Atom feed top
2018-08-27 7:37 [tarantool-patches] [PATCH v3 0/4] box: indexes by JSON path Kirill Shcherbatov
2018-08-27 7:37 ` [tarantool-patches] [PATCH v3 1/4] rfc: describe a Tarantool JSON indexes Kirill Shcherbatov
2018-08-27 7:37 ` Kirill Shcherbatov [this message]
2018-09-03 10:35 ` [tarantool-patches] Re: [PATCH v3 2/4] box: introduce slot_cache in key_part Vladislav Shpilevoy
2018-09-06 12:47 ` Kirill Shcherbatov
2018-09-17 17:08 ` Vladimir Davydov
2018-08-27 7:37 ` [tarantool-patches] [PATCH v3 3/4] box: introduce JSON indexes Kirill Shcherbatov
2018-09-03 10:32 ` [tarantool-patches] " Vladislav Shpilevoy
2018-09-03 10:35 ` Vladislav Shpilevoy
2018-09-06 12:46 ` Kirill Shcherbatov
2018-08-27 7:37 ` [tarantool-patches] [PATCH v3 4/4] box: specify indexes in user-friendly form Kirill Shcherbatov
2018-09-03 10:32 ` [tarantool-patches] " Vladislav Shpilevoy
2018-09-06 12:46 ` Kirill Shcherbatov
2018-09-17 15:50 ` [tarantool-patches] [PATCH v3 0/4] box: indexes by JSON path Vladimir Davydov
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=9be616bdd2190a0ab38aaee981098811f542eeb1.1535354849.git.kshcherbatov@tarantool.org \
--to=kshcherbatov@tarantool.org \
--cc=tarantool-patches@freelists.org \
--cc=v.shpilevoy@tarantool.org \
--subject='Re: [tarantool-patches] [PATCH v3 2/4] box: introduce slot_cache in key_part' \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox