[tarantool-patches] [PATCH v2 2/5] box: introduce slot_cache in key_part

Kirill Shcherbatov kshcherbatov at tarantool.org
Wed Aug 15 15:15:00 MSK 2018


Same key_part could be used in different formats multiple
times, so different field->offset_slot would be allocated.
In most scenarios we work with series of tuples of same
format, and (in general) format lookup for field would be
expensive operation for JSON-paths defined in key_part.
New slot_cache field in key_part structure and epoch-based
mechanism to validate it actuality should be effective
approach to improve performance.
New routine tuple_field_by_part use tuple and key_part
to access field that allows to rework and speedup all
scenarios of access tuple data by index.
This also allows to work with JSON-path key_parts later.

Part of #1012.
---
 src/box/alter.cc             |  4 ++
 src/box/key_def.c            |  2 +
 src/box/key_def.h            |  4 ++
 src/box/memtx_bitset.c       |  8 +++-
 src/box/memtx_rtree.c        |  6 ++-
 src/box/space.c              | 25 +++++++++++++
 src/box/space.h              | 10 +++++
 src/box/tuple_compare.cc     | 88 ++++++++++++++++++++++++++++++++------------
 src/box/tuple_extract_key.cc | 39 ++++++++++++++------
 src/box/tuple_format.c       | 12 ++++++
 src/box/tuple_format.h       | 19 ++++++++++
 src/box/tuple_hash.cc        | 49 +++++++++++++++++++-----
 src/box/vy_stmt.h            |  6 ++-
 13 files changed, 224 insertions(+), 48 deletions(-)

diff --git a/src/box/alter.cc b/src/box/alter.cc
index 3007a13..e1a0d9c 100644
--- a/src/box/alter.cc
+++ b/src/box/alter.cc
@@ -887,6 +887,10 @@ alter_space_do(struct txn *txn, struct alter_space *alter)
 	alter->new_space->sequence = alter->old_space->sequence;
 	memcpy(alter->new_space->access, alter->old_space->access,
 	       sizeof(alter->old_space->access));
+	space_format_update_epoch(alter->new_space,
+				  alter->old_space->format != NULL ?
+				  alter->old_space->format->epoch : 0,
+				  &alter->key_list);
 
 	/*
 	 * Build new indexes, check if tuples conform to
diff --git a/src/box/key_def.c b/src/box/key_def.c
index ee09dc9..8a4262b 100644
--- a/src/box/key_def.c
+++ b/src/box/key_def.c
@@ -258,6 +258,8 @@ key_def_set_part(struct key_def *def, uint32_t part_no, uint32_t fieldno,
 	def->parts[part_no].type = type;
 	def->parts[part_no].coll = coll;
 	def->parts[part_no].coll_id = coll_id;
+	def->parts[part_no].slot_cache = TUPLE_OFFSET_SLOT_NIL;
+	def->parts[part_no].format_epoch = 0;
 	column_mask_set_fieldno(&def->column_mask, fieldno);
 	/**
 	 * When all parts are set, initialize the tuple
diff --git a/src/box/key_def.h b/src/box/key_def.h
index aecbe03..42c054c 100644
--- a/src/box/key_def.h
+++ b/src/box/key_def.h
@@ -74,6 +74,10 @@ struct key_part {
 	struct coll *coll;
 	/** True if a part can store NULLs. */
 	bool is_nullable;
+	/** Format epoch for slot_cache. */
+	uint64_t format_epoch;
+	/** Cache for corresponding tuple_format slot_offset. */
+	int32_t slot_cache;
 };
 
 struct key_def;
diff --git a/src/box/memtx_bitset.c b/src/box/memtx_bitset.c
index a665f1a..cb41be1 100644
--- a/src/box/memtx_bitset.c
+++ b/src/box/memtx_bitset.c
@@ -283,8 +283,12 @@ memtx_bitset_index_replace(struct index *base, struct tuple *old_tuple,
 	}
 
 	if (new_tuple != NULL) {
-		const char *field;
-		field = tuple_field(new_tuple, base->def->key_def->parts[0].fieldno);
+		const char *field =
+			tuple_field_by_part(tuple_format(new_tuple),
+					    tuple_data(new_tuple),
+					    tuple_field_map(new_tuple),
+					    (struct key_part *)
+						base->def->key_def->parts);
 		uint32_t key_len;
 		const void *key = make_key(field, &key_len);
 #ifndef OLD_GOOD_BITSET
diff --git a/src/box/memtx_rtree.c b/src/box/memtx_rtree.c
index 0b12cda..9a49acd 100644
--- a/src/box/memtx_rtree.c
+++ b/src/box/memtx_rtree.c
@@ -112,7 +112,11 @@ extract_rectangle(struct rtree_rect *rect, const struct tuple *tuple,
 		  struct index_def *index_def)
 {
 	assert(index_def->key_def->part_count == 1);
-	const char *elems = tuple_field(tuple, index_def->key_def->parts[0].fieldno);
+	const char *elems =
+		tuple_field_by_part(tuple_format(tuple), tuple_data(tuple),
+				    tuple_field_map(tuple),
+				    (struct key_part *)
+					index_def->key_def->parts);
 	unsigned dimension = index_def->opts.dimension;
 	uint32_t count = mp_decode_array(&elems);
 	return mp_decode_rect(rect, dimension, elems, count, "Field");
diff --git a/src/box/space.c b/src/box/space.c
index 871cc67..c34428a 100644
--- a/src/box/space.c
+++ b/src/box/space.c
@@ -226,6 +226,31 @@ space_index_key_def(struct space *space, uint32_t id)
 }
 
 void
+space_format_update_epoch(struct space *space, uint64_t last_epoch,
+			  struct rlist *key_list)
+{
+	struct tuple_format *format = space->format;
+	if (format == NULL)
+		return;
+	bool is_format_epoch_changed = false;
+	struct index_def *index_def;
+	rlist_foreach_entry(index_def, key_list, link) {
+		struct key_part *part = index_def->key_def->parts;
+		struct key_part *parts_end =
+			part + index_def->key_def->part_count;
+		for (; part < parts_end; part++) {
+			struct tuple_field *field =
+				&format->fields[part->fieldno];
+			if (field->offset_slot != part->slot_cache)
+				is_format_epoch_changed = true;
+		}
+	}
+	format->epoch = last_epoch;
+	if (is_format_epoch_changed)
+		format->epoch++;
+}
+
+void
 generic_space_swap_index(struct space *old_space, struct space *new_space,
 			 uint32_t old_index_id, uint32_t new_index_id)
 {
diff --git a/src/box/space.h b/src/box/space.h
index 8888ec8..8d13bc8 100644
--- a/src/box/space.h
+++ b/src/box/space.h
@@ -228,6 +228,16 @@ struct key_def *
 space_index_key_def(struct space *space, uint32_t id);
 
 /**
+ * Setup space format epoch value.
+ * @param space to update.
+ * @param last_epoch last space epo
+ * @param key_list list of index_defs.
+ */
+void
+space_format_update_epoch(struct space *space, uint64_t last_epoch,
+			  struct rlist *key_list);
+
+/**
  * Look up the index by id.
  */
 static inline struct index *
diff --git a/src/box/tuple_compare.cc b/src/box/tuple_compare.cc
index e53afba..f07b695 100644
--- a/src/box/tuple_compare.cc
+++ b/src/box/tuple_compare.cc
@@ -432,9 +432,17 @@ tuple_common_key_parts(const struct tuple *tuple_a,
 {
 	uint32_t i;
 	for (i = 0; i < key_def->part_count; i++) {
-		const struct key_part *part = &key_def->parts[i];
-		const char *field_a = tuple_field(tuple_a, part->fieldno);
-		const char *field_b = tuple_field(tuple_b, part->fieldno);
+		struct key_part *part = (struct key_part *)&key_def->parts[i];
+		const char *field_a =
+			tuple_field_by_part(tuple_format(tuple_a),
+					    tuple_data(tuple_a),
+					    tuple_field_map(tuple_a),
+					    part);
+		const char *field_b =
+			tuple_field_by_part(tuple_format(tuple_b),
+					    tuple_data(tuple_b),
+					    tuple_field_map(tuple_b),
+					    part);
 		enum mp_type a_type = field_a != NULL ?
 				      mp_typeof(*field_a) : MP_NIL;
 		enum mp_type b_type = field_b != NULL ?
@@ -449,7 +457,7 @@ tuple_common_key_parts(const struct tuple *tuple_a,
 	return i;
 }
 
-template<bool is_nullable, bool has_optional_parts>
+template<bool is_nullable, bool has_optional_parts, bool is_flat>
 static inline int
 tuple_compare_slowpath(const struct tuple *tuple_a, const struct tuple *tuple_b,
 		       const struct key_def *key_def)
@@ -498,10 +506,21 @@ tuple_compare_slowpath(const struct tuple *tuple_a, const struct tuple *tuple_b,
 		end = part + key_def->part_count;
 
 	for (; part < end; part++) {
-		field_a = tuple_field_raw(format_a, tuple_a_raw, field_map_a,
-					  part->fieldno);
-		field_b = tuple_field_raw(format_b, tuple_b_raw, field_map_b,
-					  part->fieldno);
+		if (is_flat) {
+			field_a = tuple_field_raw(format_a, tuple_a_raw,
+						  field_map_a,
+						  part->fieldno);
+			field_b = tuple_field_raw(format_b, tuple_b_raw,
+						  field_map_b,
+						  part->fieldno);
+		} else {
+			field_a = tuple_field_by_part(format_a, tuple_a_raw,
+						      field_map_a,
+						      (struct key_part *)part);
+			field_b = tuple_field_by_part(format_b, tuple_b_raw,
+						      field_map_b,
+						      (struct key_part *)part);
+		}
 		assert(has_optional_parts ||
 		       (field_a != NULL && field_b != NULL));
 		if (! is_nullable) {
@@ -548,10 +567,21 @@ tuple_compare_slowpath(const struct tuple *tuple_a, const struct tuple *tuple_b,
 	 */
 	end = key_def->parts + key_def->part_count;
 	for (; part < end; ++part) {
-		field_a = tuple_field_raw(format_a, tuple_a_raw, field_map_a,
-					  part->fieldno);
-		field_b = tuple_field_raw(format_b, tuple_b_raw, field_map_b,
-					  part->fieldno);
+		if (is_flat) {
+			field_a = tuple_field_raw(format_a, tuple_a_raw,
+						  field_map_a,
+						  part->fieldno);
+			field_b = tuple_field_raw(format_b, tuple_b_raw,
+						  field_map_b,
+						  part->fieldno);
+		} else {
+			field_a = tuple_field_by_part(format_a, tuple_a_raw,
+						      field_map_a,
+						      (struct key_part *)part);
+			field_b = tuple_field_by_part(format_b, tuple_b_raw,
+						      field_map_b,
+						      (struct key_part *)part);
+		}
 		/*
 		 * Extended parts are primary, and they can not
 		 * be absent or be NULLs.
@@ -565,7 +595,7 @@ tuple_compare_slowpath(const struct tuple *tuple_a, const struct tuple *tuple_b,
 	return 0;
 }
 
-template<bool is_nullable, bool has_optional_parts>
+template<bool is_nullable, bool has_optional_parts, bool is_flat>
 static inline int
 tuple_compare_with_key_slowpath(const struct tuple *tuple, const char *key,
 				uint32_t part_count,
@@ -583,8 +613,14 @@ tuple_compare_with_key_slowpath(const struct tuple *tuple, const char *key,
 	enum mp_type a_type, b_type;
 	if (likely(part_count == 1)) {
 		const char *field;
-		field = tuple_field_raw(format, tuple_raw, field_map,
-					part->fieldno);
+		if (is_flat) {
+			field = tuple_field_raw(format, tuple_raw, field_map,
+						part->fieldno);
+		} else {
+			field = tuple_field_by_part(format, tuple_raw,
+						    field_map,
+						    (struct key_part *)part);
+		}
 		if (! is_nullable) {
 			return tuple_compare_field(field, key, part->type,
 						   part->coll);
@@ -609,8 +645,14 @@ tuple_compare_with_key_slowpath(const struct tuple *tuple, const char *key,
 	int rc;
 	for (; part < end; ++part, mp_next(&key)) {
 		const char *field;
-		field = tuple_field_raw(format, tuple_raw, field_map,
-					part->fieldno);
+		if (is_flat) {
+			field = tuple_field_raw(format, tuple_raw, field_map,
+						part->fieldno);
+		} else {
+			field = tuple_field_by_part(format, tuple_raw,
+						    field_map,
+						    (struct key_part *)part);
+		}
 		if (! is_nullable) {
 			rc = tuple_compare_field(field, key, part->type,
 						 part->coll);
@@ -1016,9 +1058,9 @@ tuple_compare_create(const struct key_def *def)
 			else
 				return tuple_compare_sequential<true, false>;
 		} else if (def->has_optional_parts) {
-			return tuple_compare_slowpath<true, true>;
+			return tuple_compare_slowpath<true, true, true>;
 		} else {
-			return tuple_compare_slowpath<true, false>;
+			return tuple_compare_slowpath<true, false, true>;
 		}
 	}
 	assert(! def->has_optional_parts);
@@ -1041,7 +1083,7 @@ tuple_compare_create(const struct key_def *def)
 	if (key_def_is_sequential(def))
 		return tuple_compare_sequential<false, false>;
 	else
-		return tuple_compare_slowpath<false, false>;
+		return tuple_compare_slowpath<false, false, true>;
 }
 
 /* }}} tuple_compare */
@@ -1236,9 +1278,9 @@ tuple_compare_with_key_create(const struct key_def *def)
 									 false>;
 			}
 		} else if (def->has_optional_parts) {
-			return tuple_compare_with_key_slowpath<true, true>;
+			return tuple_compare_with_key_slowpath<true, true, true>;
 		} else {
-			return tuple_compare_with_key_slowpath<true, false>;
+			return tuple_compare_with_key_slowpath<true, false, true>;
 		}
 	}
 	assert(! def->has_optional_parts);
@@ -1264,7 +1306,7 @@ tuple_compare_with_key_create(const struct key_def *def)
 	if (key_def_is_sequential(def))
 		return tuple_compare_with_key_sequential<false, false>;
 	else
-		return tuple_compare_with_key_slowpath<false, false>;
+		return tuple_compare_with_key_slowpath<false, false, true>;
 }
 
 /* }}} tuple_compare_with_key */
diff --git a/src/box/tuple_extract_key.cc b/src/box/tuple_extract_key.cc
index 880abb6..d95ee8d 100644
--- a/src/box/tuple_extract_key.cc
+++ b/src/box/tuple_extract_key.cc
@@ -91,7 +91,7 @@ tuple_extract_key_sequential(const struct tuple *tuple,
  * General-purpose implementation of tuple_extract_key()
  * @copydoc tuple_extract_key()
  */
-template <bool contains_sequential_parts, bool has_optional_parts>
+template <bool contains_sequential_parts, bool has_optional_parts, bool is_flat>
 static char *
 tuple_extract_key_slowpath(const struct tuple *tuple,
 			   const struct key_def *key_def, uint32_t *key_size)
@@ -110,9 +110,15 @@ tuple_extract_key_slowpath(const struct tuple *tuple,
 
 	/* Calculate the key size. */
 	for (uint32_t i = 0; i < part_count; ++i) {
-		const char *field =
-			tuple_field_raw(format, data, field_map,
-					key_def->parts[i].fieldno);
+		const char *field;
+		if (is_flat) {
+			field = tuple_field_raw(format, data, field_map,
+						key_def->parts[i].fieldno);
+		} else {
+			field = tuple_field_by_part(format, data, field_map,
+						    (struct key_part *)
+							&key_def->parts[i]);
+		}
 		if (has_optional_parts && field == NULL) {
 			bsize += mp_sizeof_nil();
 			continue;
@@ -152,9 +158,15 @@ tuple_extract_key_slowpath(const struct tuple *tuple,
 	}
 	char *key_buf = mp_encode_array(key, part_count);
 	for (uint32_t i = 0; i < part_count; ++i) {
-		const char *field =
-			tuple_field_raw(format, data, field_map,
-					key_def->parts[i].fieldno);
+		const char *field;
+		if (is_flat) {
+			field = tuple_field_raw(format, data, field_map,
+						key_def->parts[i].fieldno);
+		} else {
+			field = tuple_field_by_part(format, data, field_map,
+						    (struct key_part *)
+							&key_def->parts[i]);
+		}
 		if (has_optional_parts && field == NULL) {
 			key_buf = mp_encode_nil(key_buf);
 			continue;
@@ -318,19 +330,22 @@ tuple_extract_key_set(struct key_def *key_def)
 			assert(key_def->is_nullable);
 			if (key_def_contains_sequential_parts(key_def)) {
 				key_def->tuple_extract_key =
-					tuple_extract_key_slowpath<true, true>;
+					tuple_extract_key_slowpath<true, true,
+								   true>;
 			} else {
 				key_def->tuple_extract_key =
-					tuple_extract_key_slowpath<false, true>;
+					tuple_extract_key_slowpath<false, true,
+								   true>;
 			}
 		} else {
 			if (key_def_contains_sequential_parts(key_def)) {
 				key_def->tuple_extract_key =
-					tuple_extract_key_slowpath<true, false>;
+					tuple_extract_key_slowpath<true, false,
+								   true>;
 			} else {
 				key_def->tuple_extract_key =
-					tuple_extract_key_slowpath<false,
-								   false>;
+					tuple_extract_key_slowpath<false, false,
+								   true>;
 			}
 		}
 	}
diff --git a/src/box/tuple_format.c b/src/box/tuple_format.c
index 2e19d2e..a9fddc0 100644
--- a/src/box/tuple_format.c
+++ b/src/box/tuple_format.c
@@ -233,6 +233,11 @@ tuple_format_alloc(struct key_def * const *keys, uint16_t key_count,
 		format->dict = dict;
 		tuple_dictionary_ref(dict);
 	}
+	/*
+	 * Set invalid epoch that should be changed later on
+	 * attaching to space.
+	 */
+	format->epoch = 1;
 	format->refs = 0;
 	format->id = FORMAT_ID_NIL;
 	format->field_count = field_count;
@@ -542,6 +547,13 @@ tuple_field_go_to_key(const char **field, const char *key, int len)
 	return -1;
 }
 
+const char *
+tuple_field_by_part(const struct tuple_format *format, const char *data,
+		    const uint32_t *field_map, struct key_part *part)
+{
+	return tuple_field_raw(format, data, field_map, part->fieldno);
+}
+
 int
 tuple_field_raw_by_path(struct tuple_format *format, const char *tuple,
                         const uint32_t *field_map, const char *path,
diff --git a/src/box/tuple_format.h b/src/box/tuple_format.h
index c7dc48f..a989917 100644
--- a/src/box/tuple_format.h
+++ b/src/box/tuple_format.h
@@ -115,6 +115,11 @@ struct tuple_field {
  * Tuple format describes how tuple is stored and information about its fields
  */
 struct tuple_format {
+	/**
+	 * Tuple format epoch to validate key_part slot_cache
+	 * actuality. Changed on space rebuild if required.
+	 */
+	uint64_t epoch;
 	/** Virtual function table */
 	struct tuple_format_vtab vtab;
 	/** Pointer to engine-specific data. */
@@ -324,6 +329,20 @@ tuple_init_field_map(const struct tuple_format *format, uint32_t *field_map,
 		     const char *tuple);
 
 /**
+ * Get a field refereed by multipart index @def part @idx in
+ * @tuple.
+ *
+ * @param format tuple format
+ * @param tuple a pointer to MessagePack array
+ * @param field_map a pointer to the LAST element of field map
+ * @param part multipart index part to use.
+ * @retval field data if field exists or NULL
+ */
+const char *
+tuple_field_by_part(const struct tuple_format *format, const char *data,
+		    const uint32_t *field_map, struct key_part *part);
+
+/**
  * Get a field at the specific position in this MessagePack array.
  * Returns a pointer to MessagePack data.
  * @param format tuple format
diff --git a/src/box/tuple_hash.cc b/src/box/tuple_hash.cc
index dee9be3..272e814 100644
--- a/src/box/tuple_hash.cc
+++ b/src/box/tuple_hash.cc
@@ -157,7 +157,11 @@ struct TupleHash
 		uint32_t h = HASH_SEED;
 		uint32_t carry = 0;
 		uint32_t total_size = 0;
-		const char *field = tuple_field(tuple, key_def->parts->fieldno);
+		const char *field =
+			tuple_field_by_part(tuple_format(tuple),
+					    tuple_data(tuple),
+					    tuple_field_map(tuple),
+					    (struct key_part *)key_def->parts);
 		TupleFieldHash<TYPE, MORE_TYPES...>::
 			hash(&field, &h, &carry, &total_size);
 		return PMurHash32_Result(h, carry, total_size);
@@ -169,7 +173,11 @@ struct TupleHash<FIELD_TYPE_UNSIGNED> {
 	static uint32_t	hash(const struct tuple *tuple,
 			     const struct key_def *key_def)
 	{
-		const char *field = tuple_field(tuple, key_def->parts->fieldno);
+		const char *field =
+			tuple_field_by_part(tuple_format(tuple),
+					    tuple_data(tuple),
+					    tuple_field_map(tuple),
+					    (struct key_part *)key_def->parts);
 		uint64_t val = mp_decode_uint(&field);
 		if (likely(val <= UINT32_MAX))
 			return val;
@@ -211,7 +219,7 @@ static const hasher_signature hash_arr[] = {
 
 #undef HASHER
 
-template <bool has_optional_parts>
+template <bool has_optional_parts, bool is_flat>
 uint32_t
 tuple_hash_slowpath(const struct tuple *tuple, const struct key_def *key_def);
 
@@ -255,9 +263,9 @@ tuple_hash_func_set(struct key_def *key_def) {
 
 slowpath:
 	if (key_def->has_optional_parts)
-		key_def->tuple_hash = tuple_hash_slowpath<true>;
+		key_def->tuple_hash = tuple_hash_slowpath<true, true>;
 	else
-		key_def->tuple_hash = tuple_hash_slowpath<false>;
+		key_def->tuple_hash = tuple_hash_slowpath<false, true>;
 	key_def->key_hash = key_hash_slowpath;
 }
 
@@ -312,13 +320,16 @@ tuple_hash_key_part(uint32_t *ph1, uint32_t *pcarry,
 		    const struct tuple *tuple,
 		    const struct key_part *part)
 {
-	const char *field = tuple_field(tuple, part->fieldno);
+	const char *field =
+		tuple_field_by_part(tuple_format(tuple), tuple_data(tuple),
+				    tuple_field_map(tuple),
+				    (struct key_part *)part);
 	if (field == NULL)
 		return tuple_hash_null(ph1, pcarry);
 	return tuple_hash_field(ph1, pcarry, &field, part->coll);
 }
 
-template <bool has_optional_parts>
+template <bool has_optional_parts, bool is_flat>
 uint32_t
 tuple_hash_slowpath(const struct tuple *tuple, const struct key_def *key_def)
 {
@@ -327,7 +338,15 @@ tuple_hash_slowpath(const struct tuple *tuple, const struct key_def *key_def)
 	uint32_t carry = 0;
 	uint32_t total_size = 0;
 	uint32_t prev_fieldno = key_def->parts[0].fieldno;
-	const char *field = tuple_field(tuple, key_def->parts[0].fieldno);
+	const char *field;
+	if (is_flat) {
+		field = tuple_field(tuple, prev_fieldno);
+	} else {
+		field = tuple_field_by_part(tuple_format(tuple),
+					    tuple_data(tuple),
+					    tuple_field_map(tuple),
+					    (struct key_part *)&key_def->parts);
+	}
 	const char *end = (char *)tuple + tuple_size(tuple);
 	if (has_optional_parts && field == NULL) {
 		total_size += tuple_hash_null(&h, &carry);
@@ -341,7 +360,19 @@ tuple_hash_slowpath(const struct tuple *tuple, const struct key_def *key_def)
 		 * need of tuple_field
 		 */
 		if (prev_fieldno + 1 != key_def->parts[part_id].fieldno) {
-			field = tuple_field(tuple, key_def->parts[part_id].fieldno);
+			if (is_flat) {
+				field = tuple_field(tuple,
+						    key_def->parts[part_id].
+						    fieldno);
+			} else {
+				struct key_part *part =
+					(struct key_part *)
+					&key_def->parts[part_id];
+				field = tuple_field_by_part(tuple_format(tuple),
+							    tuple_data(tuple),
+							    tuple_field_map(tuple),
+							    part);
+			}
 		}
 		if (has_optional_parts && (field == NULL || field >= end)) {
 			total_size += tuple_hash_null(&h, &carry);
diff --git a/src/box/vy_stmt.h b/src/box/vy_stmt.h
index e53f98c..233c800 100644
--- a/src/box/vy_stmt.h
+++ b/src/box/vy_stmt.h
@@ -665,7 +665,11 @@ static inline bool
 vy_tuple_key_contains_null(const struct tuple *tuple, const struct key_def *def)
 {
 	for (uint32_t i = 0; i < def->part_count; ++i) {
-		const char *field = tuple_field(tuple, def->parts[i].fieldno);
+		const char *field =
+			tuple_field_by_part(tuple_format(tuple),
+					    tuple_data(tuple),
+					    tuple_field_map(tuple),
+					    (struct key_part *)&def->parts[i]);
 		if (field == NULL || mp_typeof(*field) == MP_NIL)
 			return true;
 	}
-- 
2.7.4





More information about the Tarantool-patches mailing list