Tarantool development patches archive
 help / color / mirror / Atom feed
* [Tarantool-patches] [PATCH 0/4] Introduce tiny tuples with perf test
@ 2021-01-18 23:50 Ilya Kosarev via Tarantool-patches
  2021-01-18 23:50 ` [Tarantool-patches] [PATCH 1/4] tuple: introduce unit perf test based on gbench Ilya Kosarev via Tarantool-patches
                   ` (3 more replies)
  0 siblings, 4 replies; 5+ messages in thread
From: Ilya Kosarev via Tarantool-patches @ 2021-01-18 23:50 UTC (permalink / raw)
  To: v.shpilevoy, alyapunov; +Cc: tarantool-patches

Tuple bsize was stored in 4 byte uint32_t field. Tuple data_offset was
stored in 2 byte uint16_t field. Now the tuple contains flexible byte
array at the end, allowing to put it in the same way. On the other
hand, if it is possible, bsize and data_offset use 1 byte each. Such
tuples are called tiny tuples. They only require 6 bytes instead of 10.
Also tiny tuples use 1 byte offsets insead of 4 byte offsets. This
allows to save lots of memory for small enough tuples.

Ilya Kosarev (4):
  tuple: introduce unit perf test based on gbench
  tuple: use getters and setters
  core: introduce tiny tuples
  box: introduce 1 byte field map offsets

 .gitignore                      |   2 +
 CMakeLists.txt                  |   1 +
 perf/CMakeLists.txt             |  34 +++++
 perf/tuple.cc                   | 185 ++++++++++++++++++++++
 src/box/alter.cc                |   2 +-
 src/box/field_map.c             |  14 +-
 src/box/field_map.h             |  19 ++-
 src/box/lua/merger.c            |   2 +-
 src/box/lua/tuple.c             |   3 +-
 src/box/memtx_engine.c          |  31 ++--
 src/box/memtx_tx.c              |  22 +--
 src/box/memtx_tx.h              |   2 +-
 src/box/sql.c                   |   7 +-
 src/box/tuple.c                 |  51 ++++---
 src/box/tuple.h                 | 261 ++++++++++++++++++++++++++++----
 src/box/tuple_compare.cc        |  86 +++++++----
 src/box/tuple_extract_key.cc    |  29 ++--
 src/box/tuple_format.c          |  15 +-
 src/box/tuple_format.h          |   3 +-
 src/box/tuple_hash.cc           |  15 +-
 src/box/vy_stmt.c               |  57 ++++---
 src/box/vy_stmt.h               |  40 +++--
 test/box/errinj.result          |  18 +--
 test/box/upsert_errinj.result   |   2 +-
 test/vinyl/cache.result         |   6 +-
 test/vinyl/quota.result         |  10 +-
 test/vinyl/quota_timeout.result |   8 +-
 test/vinyl/stat.result          | 104 ++++++-------
 28 files changed, 772 insertions(+), 257 deletions(-)
 create mode 100644 perf/CMakeLists.txt
 create mode 100644 perf/tuple.cc

-- 
2.17.1


^ permalink raw reply	[flat|nested] 5+ messages in thread

* [Tarantool-patches] [PATCH 1/4] tuple: introduce unit perf test based on gbench
  2021-01-18 23:50 [Tarantool-patches] [PATCH 0/4] Introduce tiny tuples with perf test Ilya Kosarev via Tarantool-patches
@ 2021-01-18 23:50 ` Ilya Kosarev via Tarantool-patches
  2021-01-18 23:50 ` [Tarantool-patches] [PATCH 2/4] tuple: use getters and setters Ilya Kosarev via Tarantool-patches
                   ` (2 subsequent siblings)
  3 siblings, 0 replies; 5+ messages in thread
From: Ilya Kosarev via Tarantool-patches @ 2021-01-18 23:50 UTC (permalink / raw)
  To: v.shpilevoy, alyapunov; +Cc: tarantool-patches

To verify that tuple actions performance is fine after notable
struct tuple reassembling the perf test is introduced. It tests
creation, deletion, field & data access actions for tuple.
By default, unit perf tests based on gbench are being assembled in case
the google benchmark submodule is being found. Otherwise they are being
silently ignored. If needed, unit perf tests assembling can be enforced
or disabled using -DWITH_UNIT_PERF_TESTS=ON or
-DWITH_UNIT_PERF_TESTS=OFF.

Part of #5385
---
 .gitignore          |   2 +
 CMakeLists.txt      |   1 +
 perf/CMakeLists.txt |  34 ++++++++
 perf/tuple.cc       | 185 ++++++++++++++++++++++++++++++++++++++++++++
 src/box/alter.cc    |   2 +-
 5 files changed, 223 insertions(+), 1 deletion(-)
 create mode 100644 perf/CMakeLists.txt
 create mode 100644 perf/tuple.cc

diff --git a/.gitignore b/.gitignore
index 70784d2b3..4a6ec0e2d 100644
--- a/.gitignore
+++ b/.gitignore
@@ -42,6 +42,7 @@ doc/*/Makefile
 extra/Makefile
 extra/*/Makefile
 extra/luarocks/hardcoded.lua
+perf/Makefile
 test/Makefile
 test/*/Makefile
 Doxyfile.API
@@ -88,6 +89,7 @@ src/box/lua/*.lua.c
 src/tarantool
 src/module.h
 tarantool-*.tar.gz
+perf/*.perftest
 test/lib/
 test/unit/*.test
 test/unit/fiob
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 4fbd19558..17b0aee00 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -550,6 +550,7 @@ include (cmake/rpm.cmake)
 add_subdirectory(src)
 add_subdirectory(extra)
 add_subdirectory(test)
+add_subdirectory(perf)
 add_subdirectory(doc)
 
 option(WITH_NOTIFY_SOCKET "Enable notifications on NOTIFY_SOCKET" ON)
diff --git a/perf/CMakeLists.txt b/perf/CMakeLists.txt
new file mode 100644
index 000000000..5b7038e05
--- /dev/null
+++ b/perf/CMakeLists.txt
@@ -0,0 +1,34 @@
+set(CMAKE_CXX_STANDARD 11)
+
+set(PERF_TESTS_OPTION ON)
+
+if (NOT DEFINED WITH_UNIT_PERF_TESTS)
+    set(PERF_TESTS_OPTION OFF)
+endif()
+
+if (PERF_TESTS_OPTION AND NOT WITH_UNIT_PERF_TESTS)
+    return()
+endif()
+
+find_package(benchmark QUIET)
+if (NOT ${benchmark_FOUND})
+    if (PERF_TESTS_OPTION AND WITH_UNIT_PERF_TESTS)
+        message(FATAL_ERROR "Google Benchmark submodule not found")
+    else()
+        message(AUTHOR_WARNING "Google Benchmark submodule not found")
+    endif()
+    return()
+endif()
+
+file(GLOB all_sources *.c *.cc)
+
+if(NOT TARGET_OS_OPENBSD)
+    set(LIB_DL "dl")
+endif()
+
+include_directories(${MSGPUCK_INCLUDE_DIRS})
+include_directories(${PROJECT_SOURCE_DIR}/src/box)
+include_directories(${CMAKE_SOURCE_DIR}/third_party)
+
+add_executable(tuple.perftest tuple.cc)
+target_link_libraries(tuple.perftest core box tuple benchmark::benchmark)
diff --git a/perf/tuple.cc b/perf/tuple.cc
new file mode 100644
index 000000000..4d50a49b6
--- /dev/null
+++ b/perf/tuple.cc
@@ -0,0 +1,185 @@
+#include "memory.h"
+#include "fiber.h"
+#include "tuple.h"
+
+#include <benchmark/benchmark.h>
+
+#include <vector>
+
+const size_t amount = 1000, index_count = 4;
+char **start;
+char **end;
+std::vector<struct tuple *> tuples;
+std::vector<struct tuple *> big_tuples;
+std::vector<struct tuple *> tiny_tuples;
+
+static void
+create_tuple(benchmark::State& state)
+{
+	size_t i = 0;
+	for (auto _ : state) {
+		struct tuple *tuple = tuple_new(box_tuple_format_default(),
+						start[i % amount],
+						end[i % amount]);
+		tuple_ref(tuple);
+		tuple_unref(tuple);
+		i++;
+	}
+}
+BENCHMARK(create_tuple);
+
+static inline int
+access_fields(struct tuple *tuple)
+{
+	int sum = 0;
+	sum += tuple->refs;
+	sum += tuple->format_id;
+	sum += tuple->bsize;
+	sum += tuple->data_offset;
+	sum += tuple->is_dirty;
+	return sum;
+}
+
+static void
+access_tuple_fields(benchmark::State& state)
+{
+	size_t i = 0;
+	int64_t sum = 0;
+	for (auto _ : state) {
+		struct tuple *tuple = tuples[i++ % tuples.size()];
+		sum += access_fields(tuple);
+	}
+	assert(sum > 0);
+}
+BENCHMARK(access_tuple_fields);
+
+static void
+access_tiny_tuple_fields(benchmark::State& state)
+{
+	size_t i = 0;
+	int64_t sum = 0;
+	for (auto _ : state) {
+		struct tuple *tuple = tiny_tuples[i++ % tiny_tuples.size()];
+		sum += access_fields(tuple);
+	}
+	assert(sum > 0);
+}
+BENCHMARK(access_tiny_tuple_fields);
+
+static void
+access_big_tuple_fields(benchmark::State& state)
+{
+	size_t i = 0;
+	int64_t sum = 0;
+	for (auto _ : state) {
+		struct tuple *tuple = big_tuples[i++ % big_tuples.size()];
+		sum += access_fields(tuple);
+	}
+	assert(sum > 0);
+}
+BENCHMARK(access_big_tuple_fields);
+
+static inline int
+access_data(struct tuple *tuple)
+{
+	uint32_t out;
+	int sum = 0;
+	for (size_t j = 0; j <= index_count; j++)
+		sum += tuple_field_u32(tuple, j, &out);
+	return sum;
+}
+
+static void
+access_tuple_data(benchmark::State& state)
+{
+	size_t i = 0;
+	int64_t sum = 0;
+	for (auto _ : state) {
+		struct tuple *tuple = tuples[i++ % tuples.size()];
+		access_data(tuple);
+	}
+	assert(sum == 0);
+}
+BENCHMARK(access_tuple_data);
+
+static void
+access_tiny_tuple_data(benchmark::State& state)
+{
+	size_t i = 0;
+	int64_t sum = 0;
+	for (auto _ : state) {
+		struct tuple *tuple = tiny_tuples[i++ % tiny_tuples.size()];
+		access_data(tuple);
+	}
+	assert(sum == 0);
+}
+BENCHMARK(access_tiny_tuple_data);
+
+static void
+access_big_tuple_data(benchmark::State& state)
+{
+	size_t i = 0;
+	int64_t sum = 0;
+	for (auto _ : state) {
+		struct tuple *tuple = big_tuples[i++ % big_tuples.size()];
+		access_data(tuple);
+	}
+	assert(sum == 0);
+}
+BENCHMARK(access_big_tuple_data);
+
+int main(int argc, char **argv)
+{
+	memory_init();
+	fiber_init(fiber_c_invoke);
+	tuple_init(NULL);
+
+	char *alloc = (char *)malloc(amount * 5 + amount * (amount - 1) * 2);
+	start = (char **)calloc(amount, sizeof(char *));
+	end = (char **)calloc(amount, sizeof(char *));
+	uint32_t data_size = index_count;
+	start[0] = alloc;
+	for (size_t i = 0; i < amount; i++) {
+		char *cur = start[i];
+		cur = mp_encode_array(cur, ++data_size);
+		for (size_t j = 0; j < data_size; j++)
+			cur = mp_encode_uint(cur, j);
+		end[i] = cur;
+		if (i + 1 < amount)
+			start[i + 1] = cur;
+	}
+
+	uint32_t fieldno0[] = {0};
+	uint32_t fieldno1[] = {1};
+	uint32_t fieldno2[] = {2, 3};
+	uint32_t type1[] = {FIELD_TYPE_UNSIGNED};
+	uint32_t type2[] = {FIELD_TYPE_UNSIGNED, FIELD_TYPE_UNSIGNED};
+	box_key_def_t *key_defs[] = {
+		box_key_def_new(fieldno0, type1, 1),
+		box_key_def_new(fieldno1, type1, 1),
+		box_key_def_new(fieldno2, type2, 2)};
+	box_tuple_format_t *format = box_tuple_format_new(key_defs, 3);
+	for (size_t i = 0; i < amount; i++) {
+		struct tuple *tuple = tuple_new(format, start[i], end[i]);
+		tuple_ref(tuple);
+		if (tuple->bsize <= UINT8_MAX)
+			tiny_tuples.push_back(tuple);
+		else
+			big_tuples.push_back(tuple);
+		tuples.push_back(tuple);
+	}
+
+	::benchmark::Initialize(&argc, argv);
+	if (::benchmark::ReportUnrecognizedArguments(argc, argv)) return 1;
+	::benchmark::RunSpecifiedBenchmarks();
+
+	free(alloc);
+	free(start);
+	free(end);
+	for (auto tuple: tuples)
+		tuple_unref(tuple);
+
+	tuple_free();
+	fiber_free();
+	memory_free();
+}
\ No newline at end of file
diff --git a/src/box/alter.cc b/src/box/alter.cc
index 075b79d33..8c65e7552 100644
--- a/src/box/alter.cc
+++ b/src/box/alter.cc
@@ -4538,7 +4538,7 @@ on_drop_sequence_data_rollback(struct trigger *trigger, void * /* event */)
 	uint32_t id;
 	if (tuple_field_u32(tuple, BOX_SEQUENCE_DATA_FIELD_ID, &id) != 0)
 		return -1;
-	int64_t val;
+	int64_t val = 0;
 	if (tuple_field_i64(tuple, BOX_SEQUENCE_DATA_FIELD_VALUE, &val) != 0)
 		return -1;
 	struct sequence *seq = sequence_by_id(id);
-- 
2.17.1


^ permalink raw reply	[flat|nested] 5+ messages in thread

* [Tarantool-patches] [PATCH 2/4] tuple: use getters and setters
  2021-01-18 23:50 [Tarantool-patches] [PATCH 0/4] Introduce tiny tuples with perf test Ilya Kosarev via Tarantool-patches
  2021-01-18 23:50 ` [Tarantool-patches] [PATCH 1/4] tuple: introduce unit perf test based on gbench Ilya Kosarev via Tarantool-patches
@ 2021-01-18 23:50 ` Ilya Kosarev via Tarantool-patches
  2021-01-18 23:50 ` [Tarantool-patches] [PATCH 3/4] core: introduce tiny tuples Ilya Kosarev via Tarantool-patches
  2021-01-18 23:50 ` [Tarantool-patches] [PATCH 4/4] box: introduce 1 byte field map offsets Ilya Kosarev via Tarantool-patches
  3 siblings, 0 replies; 5+ messages in thread
From: Ilya Kosarev via Tarantool-patches @ 2021-01-18 23:50 UTC (permalink / raw)
  To: v.shpilevoy, alyapunov; +Cc: tarantool-patches

This patch introduces getters and setters for tuple fields bsize,
data_offset and is_dirty. They now will be used instead of direct
field access. It allows struct tuple reassembling to make it's size
variable.

Prerequisites #5385
---
 perf/tuple.cc                |  8 +++---
 src/box/lua/merger.c         |  2 +-
 src/box/memtx_engine.c       | 11 ++++---
 src/box/memtx_tx.c           | 22 +++++++-------
 src/box/memtx_tx.h           |  2 +-
 src/box/sql.c                |  2 +-
 src/box/tuple.c              | 10 +++----
 src/box/tuple.h              | 56 ++++++++++++++++++++++++++++++++----
 src/box/tuple_compare.cc     |  2 +-
 src/box/tuple_extract_key.cc |  4 +--
 src/box/tuple_format.c       |  2 +-
 src/box/vy_stmt.c            | 22 ++++++++------
 src/box/vy_stmt.h            |  2 +-
 13 files changed, 96 insertions(+), 49 deletions(-)

diff --git a/perf/tuple.cc b/perf/tuple.cc
index 4d50a49b6..da45ec23d 100644
--- a/perf/tuple.cc
+++ b/perf/tuple.cc
@@ -34,9 +34,9 @@ access_fields(struct tuple *tuple)
 	int sum = 0;
 	sum += tuple->refs;
 	sum += tuple->format_id;
-	sum += tuple->bsize;
-	sum += tuple->data_offset;
-	sum += tuple->is_dirty;
+	sum += tuple_bsize(tuple);
+	sum += tuple_data_offset(tuple);
+	sum += tuple_is_dirty(tuple);
 	return sum;
 }
 
@@ -162,7 +162,7 @@ int main(int argc, char **argv)
 	for (size_t i = 0; i < amount; i++) {
 		struct tuple *tuple = tuple_new(format, start[i], end[i]);
 		tuple_ref(tuple);
-		if (tuple->bsize <= UINT8_MAX)
+		if (tuple_bsize(tuple) <= UINT8_MAX)
 			tiny_tuples.push_back(tuple);
 		else
 			big_tuples.push_back(tuple);
diff --git a/src/box/lua/merger.c b/src/box/lua/merger.c
index 89bdfffa6..91a77b2ae 100644
--- a/src/box/lua/merger.c
+++ b/src/box/lua/merger.c
@@ -1115,7 +1115,7 @@ encode_result_buffer(struct lua_State *L, struct merge_source *source,
 	while (result_len < limit && (rc =
 	       merge_source_next(source, NULL, &tuple)) == 0 &&
 	       tuple != NULL) {
-		uint32_t bsize = tuple->bsize;
+		uint32_t bsize = tuple_bsize(tuple);
 		ibuf_reserve(output_buffer, bsize);
 		memcpy(output_buffer->wpos, tuple_data(tuple), bsize);
 		output_buffer->wpos += bsize;
diff --git a/src/box/memtx_engine.c b/src/box/memtx_engine.c
index f79f14b4f..c37b35586 100644
--- a/src/box/memtx_engine.c
+++ b/src/box/memtx_engine.c
@@ -1230,7 +1230,7 @@ memtx_tuple_new(struct tuple_format *format, const char *data, const char *end)
 	 */
 	uint32_t data_offset = sizeof(struct tuple) + field_map_size;
 	if (data_offset > INT16_MAX) {
-		/** tuple->data_offset is 15 bits */
+		/** tuple data_offset can't be more than 15 bits */
 		diag_set(ClientError, ER_TUPLE_METADATA_IS_TOO_BIG,
 			 data_offset);
 		goto end;
@@ -1263,13 +1263,12 @@ memtx_tuple_new(struct tuple_format *format, const char *data, const char *end)
 	tuple = &memtx_tuple->base;
 	tuple->refs = 0;
 	memtx_tuple->version = memtx->snapshot_version;
-	assert(tuple_len <= UINT32_MAX); /* bsize is UINT32_MAX */
-	tuple->bsize = tuple_len;
+	tuple_set_bsize(tuple, tuple_len);
 	tuple->format_id = tuple_format_id(format);
 	tuple_format_ref(format);
-	tuple->data_offset = data_offset;
-	tuple->is_dirty = false;
-	char *raw = (char *) tuple + tuple->data_offset;
+	tuple_set_data_offset(tuple, data_offset);
+	tuple_set_dirty_bit(tuple, false);
+	char *raw = (char *) tuple + data_offset;
 	field_map_build(&builder, raw - field_map_size);
 	memcpy(raw, data, tuple_len);
 	say_debug("%s(%zu) = %p", __func__, tuple_len, memtx_tuple);
diff --git a/src/box/memtx_tx.c b/src/box/memtx_tx.c
index 412099b94..1ef96dc96 100644
--- a/src/box/memtx_tx.c
+++ b/src/box/memtx_tx.c
@@ -194,7 +194,7 @@ memtx_tx_story_new(struct space *space, struct tuple *tuple)
 	/* Free some memory. */
 	for (size_t i = 0; i < TX_MANAGER_GC_STEPS_SIZE; i++)
 		memtx_tx_story_gc_step();
-	assert(!tuple->is_dirty);
+	assert(!tuple_is_dirty(tuple));
 	uint32_t index_count = space->index_count;
 	assert(index_count < BOX_INDEX_MAX);
 	struct mempool *pool = &txm.memtx_tx_story_pool[index_count];
@@ -218,7 +218,7 @@ memtx_tx_story_new(struct space *space, struct tuple *tuple)
 			 "mh_history_node");
 		return NULL;
 	}
-	tuple->is_dirty = true;
+	tuple_set_dirty_bit(tuple, true);
 	tuple_ref(tuple);
 
 	story->space = space;
@@ -298,7 +298,7 @@ memtx_tx_story_delete_del_stmt(struct memtx_story *story)
 static struct memtx_story *
 memtx_tx_story_get(struct tuple *tuple)
 {
-	assert(tuple->is_dirty);
+	assert(tuple_is_dirty(tuple));
 
 	mh_int_t pos = mh_history_find(txm.history, tuple, 0);
 	assert(pos != mh_end(txm.history));
@@ -348,7 +348,7 @@ memtx_tx_story_link_tuple(struct memtx_story *story,
 	assert(link->older.tuple == NULL);
 	if (older_tuple == NULL)
 		return;
-	if (older_tuple->is_dirty) {
+	if (tuple_is_dirty(older_tuple)) {
 		memtx_tx_story_link_story(story,
 					  memtx_tx_story_get(older_tuple),
 					  index);
@@ -586,7 +586,7 @@ memtx_tx_story_find_visible_tuple(struct memtx_story *story,
 			/* The tuple is so old that we don't know its story. */
 			*visible_replaced = story->link[index].older.tuple;
 			assert(*visible_replaced == NULL ||
-			       !(*visible_replaced)->is_dirty);
+			       !tuple_is_dirty(*visible_replaced));
 			break;
 		}
 		story = story->link[index].older.story;
@@ -704,7 +704,7 @@ memtx_tx_history_add_stmt(struct txn_stmt *stmt, struct tuple *old_tuple,
 		del_tuple = old_tuple;
 	}
 	if (del_tuple != NULL && del_story == NULL) {
-		if (del_tuple->is_dirty) {
+		if (tuple_is_dirty(del_tuple)) {
 			del_story = memtx_tx_story_get(del_tuple);
 		} else {
 			del_story = memtx_tx_story_new_del_stmt(del_tuple,
@@ -1030,14 +1030,14 @@ memtx_tx_history_commit_stmt(struct txn_stmt *stmt)
 	size_t res = 0;
 	if (stmt->add_story != NULL) {
 		assert(stmt->add_story->add_stmt == stmt);
-		res += stmt->add_story->tuple->bsize;
+		res += tuple_bsize(stmt->add_story->tuple);
 		stmt->add_story->add_stmt = NULL;
 		stmt->add_story = NULL;
 	}
 	if (stmt->del_story != NULL) {
 		assert(stmt->del_story->del_stmt == stmt);
 		assert(stmt->next_in_del_list == NULL);
-		res -= stmt->del_story->tuple->bsize;
+		res -= tuple_bsize(stmt->del_story->tuple);
 		stmt->del_story->del_stmt = NULL;
 		stmt->del_story = NULL;
 	}
@@ -1049,7 +1049,7 @@ memtx_tx_tuple_clarify_slow(struct txn *txn, struct space *space,
 			    struct tuple *tuple, uint32_t index,
 			    uint32_t mk_index, bool is_prepared_ok)
 {
-	assert(tuple->is_dirty);
+	assert(tuple_is_dirty(tuple));
 	struct memtx_story *story = memtx_tx_story_get(tuple);
 	bool own_change = false;
 	struct tuple *result = NULL;
@@ -1104,7 +1104,7 @@ memtx_tx_story_delete(struct memtx_story *story)
 	assert(pos != mh_end(txm.history));
 	mh_history_del(txm.history, pos, 0);
 
-	story->tuple->is_dirty = false;
+	tuple_set_dirty_bit(story->tuple, false);
 	tuple_unref(story->tuple);
 
 #ifndef NDEBUG
@@ -1133,7 +1133,7 @@ memtx_tx_track_read(struct txn *txn, struct space *space, struct tuple *tuple)
 	struct memtx_story *story;
 	struct tx_read_tracker *tracker = NULL;
 
-	if (!tuple->is_dirty) {
+	if (!tuple_is_dirty(tuple)) {
 		story = memtx_tx_story_new(space, tuple);
 		if (story == NULL)
 			return -1;
diff --git a/src/box/memtx_tx.h b/src/box/memtx_tx.h
index 25a203880..ae41d4501 100644
--- a/src/box/memtx_tx.h
+++ b/src/box/memtx_tx.h
@@ -302,7 +302,7 @@ memtx_tx_tuple_clarify(struct txn *txn, struct space *space,
 {
 	if (!memtx_tx_manager_use_mvcc_engine)
 		return tuple;
-	if (!tuple->is_dirty) {
+	if (!tuple_is_dirty(tuple)) {
 		memtx_tx_track_read(txn, space, tuple);
 		return tuple;
 	}
diff --git a/src/box/sql.c b/src/box/sql.c
index 3d968e56a..59e1e88fc 100644
--- a/src/box/sql.c
+++ b/src/box/sql.c
@@ -1312,5 +1312,5 @@ vdbe_field_ref_prepare_tuple(struct vdbe_field_ref *field_ref,
 			     struct tuple *tuple)
 {
 	vdbe_field_ref_create(field_ref, tuple, tuple_data(tuple),
-			      tuple->bsize);
+			      tuple_bsize(tuple));
 }
diff --git a/src/box/tuple.c b/src/box/tuple.c
index c2023e4e8..c73ad4566 100644
--- a/src/box/tuple.c
+++ b/src/box/tuple.c
@@ -85,7 +85,7 @@ runtime_tuple_new(struct tuple_format *format, const char *data, const char *end
 	uint32_t field_map_size = field_map_build_size(&builder);
 	uint32_t data_offset = sizeof(struct tuple) + field_map_size;
 	if (data_offset > INT16_MAX) {
-		/** tuple->data_offset is 15 bits */
+		/** tuple data_offset can't be more than 15 bits */
 		diag_set(ClientError, ER_TUPLE_METADATA_IS_TOO_BIG,
 			 data_offset);
 		goto end;
@@ -101,11 +101,11 @@ runtime_tuple_new(struct tuple_format *format, const char *data, const char *end
 	}
 
 	tuple->refs = 0;
-	tuple->bsize = data_len;
+	tuple_set_bsize(tuple, data_len);
 	tuple->format_id = tuple_format_id(format);
 	tuple_format_ref(format);
-	tuple->data_offset = data_offset;
-	tuple->is_dirty = false;
+	tuple_set_data_offset(tuple, data_offset);
+	tuple_set_dirty_bit(tuple, false);
 	char *raw = (char *) tuple + data_offset;
 	field_map_build(&builder, raw - field_map_size);
 	memcpy(raw, data, data_len);
@@ -611,7 +611,7 @@ size_t
 box_tuple_bsize(box_tuple_t *tuple)
 {
 	assert(tuple != NULL);
-	return tuple->bsize;
+	return tuple_bsize(tuple);
 }
 
 ssize_t
diff --git a/src/box/tuple.h b/src/box/tuple.h
index e4267a4ec..aac054787 100644
--- a/src/box/tuple.h
+++ b/src/box/tuple.h
@@ -344,12 +344,55 @@ struct PACKED tuple
 	 */
 };
 
+static inline void
+tuple_set_dirty_bit(struct tuple *tuple, bool is_dirty)
+{
+	assert(tuple != NULL);
+	tuple->is_dirty = is_dirty;
+}
+
+static inline bool
+tuple_is_dirty(struct tuple *tuple)
+{
+	assert(tuple != NULL);
+	return tuple->is_dirty;
+}
+
+static inline void
+tuple_set_bsize(struct tuple *tuple, uint32_t bsize)
+{
+	assert(tuple != NULL);
+	assert(bsize <= UINT32_MAX); /* bsize is UINT32_MAX */
+	tuple->bsize = bsize;
+}
+
+static inline uint32_t
+tuple_bsize(struct tuple *tuple)
+{
+	assert(tuple != NULL);
+	return tuple->bsize;
+}
+
+static inline void
+tuple_set_data_offset(struct tuple *tuple, uint16_t data_offset)
+{
+	assert(tuple != NULL);
+	tuple->data_offset = data_offset;
+}
+
+static inline uint16_t
+tuple_data_offset(struct tuple *tuple)
+{
+	assert(tuple != NULL);
+	return tuple->data_offset;
+}
+
 /** Size of the tuple including size of struct tuple. */
 static inline size_t
 tuple_size(struct tuple *tuple)
 {
 	/* data_offset includes sizeof(struct tuple). */
-	return tuple->data_offset + tuple->bsize;
+	return tuple_data_offset(tuple) + tuple_bsize(tuple);
 }
 
 /**
@@ -360,7 +403,7 @@ tuple_size(struct tuple *tuple)
 static inline const char *
 tuple_data(struct tuple *tuple)
 {
-	return (const char *) tuple + tuple->data_offset;
+	return (const char *)tuple + tuple_data_offset(tuple);
 }
 
 /**
@@ -381,8 +424,8 @@ tuple_data_or_null(struct tuple *tuple)
 static inline const char *
 tuple_data_range(struct tuple *tuple, uint32_t *p_size)
 {
-	*p_size = tuple->bsize;
-	return (const char *) tuple + tuple->data_offset;
+	*p_size = tuple_bsize(tuple);
+	return (const char *)tuple + tuple_data_offset(tuple);
 }
 
 /**
@@ -535,7 +578,8 @@ tuple_validate(struct tuple_format *format, struct tuple *tuple)
 static inline const uint32_t *
 tuple_field_map(struct tuple *tuple)
 {
-	return (const uint32_t *) ((const char *) tuple + tuple->data_offset);
+	return (const uint32_t *) ((const char *) tuple +
+				   tuple_data_offset(tuple));
 }
 
 /**
@@ -1144,7 +1188,7 @@ tuple_unref(struct tuple *tuple)
 	if (unlikely(tuple->is_bigref))
 		tuple_unref_slow(tuple);
 	else if (--tuple->refs == 0) {
-		assert(!tuple->is_dirty);
+		assert(!tuple_is_dirty(tuple));
 		tuple_delete(tuple);
 	}
 }
diff --git a/src/box/tuple_compare.cc b/src/box/tuple_compare.cc
index 0946d77f8..eb148c2f5 100644
--- a/src/box/tuple_compare.cc
+++ b/src/box/tuple_compare.cc
@@ -874,7 +874,7 @@ tuple_compare_with_key_sequential(struct tuple *tuple, hint_t tuple_hint,
 		 * Key's and tuple's first field_count fields are
 		 * equal, and their bsize too.
 		 */
-		key += tuple->bsize - mp_sizeof_array(field_count);
+		key += tuple_bsize(tuple) - mp_sizeof_array(field_count);
 		for (uint32_t i = field_count; i < part_count;
 		     ++i, mp_next(&key)) {
 			if (mp_typeof(*key) != MP_NIL)
diff --git a/src/box/tuple_extract_key.cc b/src/box/tuple_extract_key.cc
index c1ad3929e..795dc6559 100644
--- a/src/box/tuple_extract_key.cc
+++ b/src/box/tuple_extract_key.cc
@@ -95,7 +95,7 @@ tuple_extract_key_sequential(struct tuple *tuple, struct key_def *key_def,
 	assert(!has_optional_parts || key_def->is_nullable);
 	assert(has_optional_parts == key_def->has_optional_parts);
 	const char *data = tuple_data(tuple);
-	const char *data_end = data + tuple->bsize;
+	const char *data_end = data + tuple_bsize(tuple);
 	return tuple_extract_key_sequential_raw<has_optional_parts>(data,
 								    data_end,
 								    key_def,
@@ -127,7 +127,7 @@ tuple_extract_key_slowpath(struct tuple *tuple, struct key_def *key_def,
 	uint32_t bsize = mp_sizeof_array(part_count);
 	struct tuple_format *format = tuple_format(tuple);
 	const uint32_t *field_map = tuple_field_map(tuple);
-	const char *tuple_end = data + tuple->bsize;
+	const char *tuple_end = data + tuple_bsize(tuple);
 
 	/* Calculate the key size. */
 	for (uint32_t i = 0; i < part_count; ++i) {
diff --git a/src/box/tuple_format.c b/src/box/tuple_format.c
index 5f5e833b4..dca0fbddc 100644
--- a/src/box/tuple_format.c
+++ b/src/box/tuple_format.c
@@ -482,7 +482,7 @@ tuple_format_create(struct tuple_format *format, struct key_def * const *keys,
 	       || json_token_is_multikey(&tuple_format_field(format, 0)->token));
 	size_t field_map_size = -current_slot * sizeof(uint32_t);
 	if (field_map_size > INT16_MAX) {
-		/** tuple->data_offset is 15 bits */
+		/** tuple data_offset can't be more than 15 bits */
 		diag_set(ClientError, ER_INDEX_FIELD_COUNT_LIMIT,
 			 -current_slot);
 		return -1;
diff --git a/src/box/vy_stmt.c b/src/box/vy_stmt.c
index 92e0aa1c5..accafc654 100644
--- a/src/box/vy_stmt.c
+++ b/src/box/vy_stmt.c
@@ -161,7 +161,7 @@ vy_stmt_alloc(struct tuple_format *format, uint32_t data_offset, uint32_t bsize)
 	assert(data_offset >= sizeof(struct vy_stmt) + format->field_map_size);
 
 	if (data_offset > INT16_MAX) {
-		/** tuple->data_offset is 15 bits */
+		/** tuple data_offset can't be more than 15 bits */
 		diag_set(ClientError, ER_TUPLE_METADATA_IS_TOO_BIG,
 			 data_offset);
 		return NULL;
@@ -196,9 +196,9 @@ vy_stmt_alloc(struct tuple_format *format, uint32_t data_offset, uint32_t bsize)
 	tuple->format_id = tuple_format_id(format);
 	if (cord_is_main())
 		tuple_format_ref(format);
-	tuple->bsize = bsize;
-	tuple->data_offset = data_offset;
-	tuple->is_dirty = false;
+	tuple_set_bsize(tuple, bsize);
+	tuple_set_data_offset(tuple, data_offset);
+	tuple_set_dirty_bit(tuple, false);
 	vy_stmt_set_lsn(tuple, 0);
 	vy_stmt_set_type(tuple, 0);
 	vy_stmt_set_flags(tuple, 0);
@@ -214,11 +214,12 @@ vy_stmt_dup(struct tuple *stmt)
 	 * the original tuple.
 	 */
 	struct tuple *res = vy_stmt_alloc(tuple_format(stmt),
-					  stmt->data_offset, stmt->bsize);
+					  tuple_data_offset(stmt),
+					  tuple_bsize(stmt));
 	if (res == NULL)
 		return NULL;
 	assert(tuple_size(res) == tuple_size(stmt));
-	assert(res->data_offset == stmt->data_offset);
+	assert(tuple_data_offset(res) == tuple_data_offset(stmt));
 	memcpy(res, stmt, tuple_size(stmt));
 	res->refs = 1;
 	return res;
@@ -411,17 +412,20 @@ vy_stmt_replace_from_upsert(struct tuple *upsert)
 	/* Get statement size without UPSERT operations */
 	uint32_t bsize;
 	vy_upsert_data_range(upsert, &bsize);
-	assert(bsize <= upsert->bsize);
+	assert(bsize <= tuple_bsize(upsert));
 
 	/* Copy statement data excluding UPSERT operations */
 	struct tuple_format *format = tuple_format(upsert);
-	struct tuple *replace = vy_stmt_alloc(format, upsert->data_offset, bsize);
+	struct tuple *replace = vy_stmt_alloc(format,
+					      tuple_data_offset(upsert),
+					      bsize);
 	if (replace == NULL)
 		return NULL;
 	/* Copy both data and field_map. */
 	char *dst = (char *)replace + sizeof(struct vy_stmt);
 	char *src = (char *)upsert + sizeof(struct vy_stmt);
-	memcpy(dst, src, upsert->data_offset + bsize - sizeof(struct vy_stmt));
+	memcpy(dst, src, tuple_data_offset(upsert) +
+			 bsize - sizeof(struct vy_stmt));
 	vy_stmt_set_type(replace, IPROTO_REPLACE);
 	vy_stmt_set_lsn(replace, vy_stmt_lsn(upsert));
 	return replace;
diff --git a/src/box/vy_stmt.h b/src/box/vy_stmt.h
index 24c7eaad7..69f46d67c 100644
--- a/src/box/vy_stmt.h
+++ b/src/box/vy_stmt.h
@@ -583,7 +583,7 @@ vy_stmt_upsert_ops(struct tuple *tuple, uint32_t *mp_size)
 	assert(vy_stmt_type(tuple) == IPROTO_UPSERT);
 	const char *mp = tuple_data(tuple);
 	mp_next(&mp);
-	*mp_size = tuple_data(tuple) + tuple->bsize - mp;
+	*mp_size = tuple_data(tuple) + tuple_bsize(tuple) - mp;
 	return mp;
 }
 
-- 
2.17.1


^ permalink raw reply	[flat|nested] 5+ messages in thread

* [Tarantool-patches] [PATCH 3/4] core: introduce tiny tuples
  2021-01-18 23:50 [Tarantool-patches] [PATCH 0/4] Introduce tiny tuples with perf test Ilya Kosarev via Tarantool-patches
  2021-01-18 23:50 ` [Tarantool-patches] [PATCH 1/4] tuple: introduce unit perf test based on gbench Ilya Kosarev via Tarantool-patches
  2021-01-18 23:50 ` [Tarantool-patches] [PATCH 2/4] tuple: use getters and setters Ilya Kosarev via Tarantool-patches
@ 2021-01-18 23:50 ` Ilya Kosarev via Tarantool-patches
  2021-01-18 23:50 ` [Tarantool-patches] [PATCH 4/4] box: introduce 1 byte field map offsets Ilya Kosarev via Tarantool-patches
  3 siblings, 0 replies; 5+ messages in thread
From: Ilya Kosarev via Tarantool-patches @ 2021-01-18 23:50 UTC (permalink / raw)
  To: v.shpilevoy, alyapunov; +Cc: tarantool-patches

Tuple bsize was stored in 4 byte uint32_t field. Tuple data_offset was
stored in 2 byte uint16_t field. Now the tuple contains flexible array
at the end (0 or 4 bytes), allowing to put it in the same way.
On the other hand, if it is possible, bsize and data_offset use 1 byte
each. Such tuples are called tiny tuples. They only require 6 bytes
instead of 10.
As long as struct tuple is reassembled to achieve it and is now of
variable size, inherited struct vy_stmt is also reworked now. It's
members (except struct tuple itself) are not directly listed anymore.
They were not used directly before and now they are not any more
available to avoid accidental mistakes. lsn, type and flags of
struct vy_stmt have to be accessed only using getters and setters now.

Part of #5385
---
 src/box/memtx_engine.c          |  13 ++-
 src/box/tuple.c                 |  14 ++-
 src/box/tuple.h                 | 184 ++++++++++++++++++++++++++++----
 src/box/vy_stmt.c               |  19 ++--
 src/box/vy_stmt.h               |  38 +++++--
 test/box/errinj.result          |  18 ++--
 test/box/upsert_errinj.result   |   2 +-
 test/vinyl/cache.result         |   6 +-
 test/vinyl/quota.result         |  10 +-
 test/vinyl/quota_timeout.result |   8 +-
 test/vinyl/stat.result          | 104 +++++++++---------
 11 files changed, 300 insertions(+), 116 deletions(-)

diff --git a/src/box/memtx_engine.c b/src/box/memtx_engine.c
index c37b35586..adb90e1c8 100644
--- a/src/box/memtx_engine.c
+++ b/src/box/memtx_engine.c
@@ -1219,6 +1219,8 @@ memtx_tuple_new(struct tuple_format *format, const char *data, const char *end)
 	struct tuple *tuple = NULL;
 	struct region *region = &fiber()->gc;
 	size_t region_svp = region_used(region);
+	size_t tuple_len = end - data;
+	bool is_tiny = (tuple_len <= UINT8_MAX);
 	struct field_map_builder builder;
 	if (tuple_field_map_create(format, data, true, &builder) != 0)
 		goto end;
@@ -1228,7 +1230,12 @@ memtx_tuple_new(struct tuple_format *format, const char *data, const char *end)
 	 * tuple base, not from memtx_tuple, because the struct
 	 * tuple is not the first field of the memtx_tuple.
 	 */
-	uint32_t data_offset = sizeof(struct tuple) + field_map_size;
+	is_tiny = (is_tiny && (sizeof(struct tuple) +
+			       field_map_size <= MAX_TINY_DATA_OFFSET));
+	uint32_t extra_size = field_map_size +
+			      !is_tiny * sizeof(struct tuple_extra);
+	uint32_t data_offset = sizeof(struct tuple) + extra_size;
+	assert(!is_tiny || data_offset <= MAX_TINY_DATA_OFFSET);
 	if (data_offset > INT16_MAX) {
 		/** tuple data_offset can't be more than 15 bits */
 		diag_set(ClientError, ER_TUPLE_METADATA_IS_TOO_BIG,
@@ -1236,8 +1243,7 @@ memtx_tuple_new(struct tuple_format *format, const char *data, const char *end)
 		goto end;
 	}
 
-	size_t tuple_len = end - data;
-	size_t total = sizeof(struct memtx_tuple) + field_map_size + tuple_len;
+	size_t total = sizeof(struct memtx_tuple) + tuple_len + extra_size;
 
 	ERROR_INJECT(ERRINJ_TUPLE_ALLOC, {
 		diag_set(OutOfMemory, total, "slab allocator", "memtx_tuple");
@@ -1263,6 +1269,7 @@ memtx_tuple_new(struct tuple_format *format, const char *data, const char *end)
 	tuple = &memtx_tuple->base;
 	tuple->refs = 0;
 	memtx_tuple->version = memtx->snapshot_version;
+	tuple_set_tiny_bit(tuple, is_tiny);
 	tuple_set_bsize(tuple, tuple_len);
 	tuple->format_id = tuple_format_id(format);
 	tuple_format_ref(format);
diff --git a/src/box/tuple.c b/src/box/tuple.c
index c73ad4566..db95d5872 100644
--- a/src/box/tuple.c
+++ b/src/box/tuple.c
@@ -79,11 +79,17 @@ runtime_tuple_new(struct tuple_format *format, const char *data, const char *end
 	struct tuple *tuple = NULL;
 	struct region *region = &fiber()->gc;
 	size_t region_svp = region_used(region);
+	size_t data_len = end - data;
+	bool is_tiny = (data_len <= UINT8_MAX);
 	struct field_map_builder builder;
 	if (tuple_field_map_create(format, data, true, &builder) != 0)
 		goto end;
 	uint32_t field_map_size = field_map_build_size(&builder);
-	uint32_t data_offset = sizeof(struct tuple) + field_map_size;
+	is_tiny = (is_tiny && (sizeof(struct tuple) +
+			       field_map_size <= MAX_TINY_DATA_OFFSET));
+	uint32_t data_offset = sizeof(struct tuple) + field_map_size +
+			       !is_tiny * sizeof(uint32_t);
+	assert(!is_tiny || data_offset <= MAX_TINY_DATA_OFFSET);
 	if (data_offset > INT16_MAX) {
 		/** tuple data_offset can't be more than 15 bits */
 		diag_set(ClientError, ER_TUPLE_METADATA_IS_TOO_BIG,
@@ -91,9 +97,8 @@ runtime_tuple_new(struct tuple_format *format, const char *data, const char *end
 		goto end;
 	}
 
-	size_t data_len = end - data;
-	size_t total = sizeof(struct tuple) + field_map_size + data_len;
-	tuple = (struct tuple *) smalloc(&runtime_alloc, total);
+	size_t total = data_offset + data_len;
+	tuple = (struct tuple *)smalloc(&runtime_alloc, total);
 	if (tuple == NULL) {
 		diag_set(OutOfMemory, (unsigned) total,
 			 "malloc", "tuple");
@@ -101,6 +106,7 @@ runtime_tuple_new(struct tuple_format *format, const char *data, const char *end
 	}
 
 	tuple->refs = 0;
+	tuple_set_tiny_bit(tuple, is_tiny);
 	tuple_set_bsize(tuple, data_len);
 	tuple->format_id = tuple_format_id(format);
 	tuple_format_ref(format);
diff --git a/src/box/tuple.h b/src/box/tuple.h
index aac054787..9eac52f01 100644
--- a/src/box/tuple.h
+++ b/src/box/tuple.h
@@ -296,6 +296,64 @@ box_tuple_validate(box_tuple_t *tuple, box_tuple_format_t *format);
 
 /** \endcond public */
 
+#define MAX_TINY_DATA_OFFSET 63
+
+/** Part of struct tuple. */
+struct tiny_tuple_props {
+	/**
+	 * Tuple is tiny in case it's bsize fits in 1 byte,
+	 * it's data_offset fits in 6 bits and field map
+	 * offsets fit into 1 byte each.
+	 */
+	bool is_tiny : 1;
+	/**
+	 * The tuple (if it's found in index for example) could be invisible
+	 * for current transactions. The flag means that the tuple must
+	 * be clarified by the transaction engine.
+	 */
+	bool is_dirty : 1;
+	/**
+	 * Offset to the MessagePack from the beginning of the
+	 * tuple. 6 bits in case tuple is tiny.
+	 */
+	uint8_t data_offset : 6;
+	/**
+	 * Length of the MessagePack data in raw part of the
+	 * tuple. 8 bits in case tuple is tiny.
+	 */
+	uint8_t bsize;
+};
+
+/** Part of struct tuple. */
+struct tuple_props {
+	/**
+	 * Tuple is tiny in case it's bsize fits in 1 byte,
+	 * it's data_offset fits in 6 bits and field map
+	 * offsets fit into 1 byte each.
+	 */
+	bool is_tiny : 1;
+	/**
+	 * Offset to the MessagePack from the beginning of the
+	 * tuple. 15 bits in case tuple is not tiny.
+	 */
+	uint16_t data_offset: 15;
+};
+
+/** Part of struct tuple. */
+struct PACKED tuple_extra {
+	/**
+	 * The tuple (if it's found in index for example) could be invisible
+	 * for current transactions. The flag means that the tuple must
+	 * be clarified by the transaction engine.
+	 */
+	bool is_dirty : 1;
+	/**
+	 * Length of the MessagePack data in raw part of the
+	 * tuple. 31 bits in case tuple is not tiny.
+	 */
+	uint32_t bsize : 31;
+};
+
 /**
  * An atom of Tarantool storage. Represents MsgPack Array.
  * Tuple has the following structure:
@@ -323,20 +381,40 @@ struct PACKED tuple
 	/** Format identifier. */
 	uint16_t format_id;
 	/**
-	 * Length of the MessagePack data in raw part of the
-	 * tuple.
-	 */
-	uint32_t bsize;
-	/**
-	 * Offset to the MessagePack from the begin of the tuple.
+	 * Both structs in the following union contain is_tiny bit
+	 * as the first field. It is guaranteed it will be the same
+	 * for both of them in any case. Tuple is tiny in case it's
+	 * bsize fits in 1 byte, it's data_offset fits in 6 bits and
+	 * field map offsets fit into 1 byte each. In case it is tiny
+	 * we will obtain data_offset, bsize and is_dirty flag from the
+	 * struct tiny_tuple_props. Otherwise we will obtain data_offset
+	 * from struct tuple_props, while bsize and is_dirty flag will be
+	 * stored in struct tuple_extra (4 bytes at the end of the tuple).
 	 */
-	uint16_t data_offset : 15;
+	union {
+		/**
+		 * In case the tuple is tiny this struct is used to obtain
+		 * data_offset (offset to the MessagePack from the beginning
+		 * of the tuple), bsize (the length of the MessagePack data
+		 * in the raw part of the tuple) and is_dirty flag (true if
+		 * the tuple must be clarified by transaction engine).
+		 */
+		struct tiny_tuple_props tiny_props;
+		/**
+		 * In case the tuple is not tiny this struct is used to obtain
+		 * data_offset (offset to the MessagePack from the beginning of
+		 * the tuple).
+		 */
+		struct tuple_props props;
+	};
 	/**
-	 * The tuple (if it's found in index for example) could be invisible
-	 * for current transactions. The flag means that the tuple must
-	 * be clarified by transaction engine.
+	 * In case the tuple is not tiny this struct contains is_dirty
+	 * flag (true if the tuple must be clarified by transaction
+	 * engine) and bsize (the length of the MessagePack data
+	 * in the raw part of the tuple).
+	 * If the tuple is tiny, this fields is 0 bytes.
 	 */
-	bool is_dirty : 1;
+	struct tuple_extra extra[];
 	/**
 	 * Engine specific fields and offsets array concatenated
 	 * with MessagePack fields array.
@@ -344,47 +422,117 @@ struct PACKED tuple
 	 */
 };
 
+/**
+ * According to C standard sections 6.5.2.3-5 it is guaranteed
+ * if a union contains several structures that share a common
+ * initial sequence of members (bool is_tiny in this case), it is
+ * permitted to inspect the common initial part of any of them
+ * anywhere that a declaration of the complete type of the union
+ * is visible. Two structures share a common initial sequence if
+ * corresponding members have compatible types (and, for
+ * bit-fields, the same widths) for a sequence of one or more
+ * initial members. Thus we are guaranteed that is_tiny bit is
+ * same for both struct tiny_tuple_props and struct tuple_props
+ * and we can simply read or write any of them.
+ */
+static inline void
+tuple_set_tiny_bit(struct tuple *tuple, bool is_tiny)
+{
+	assert(tuple != NULL);
+	tuple->props.is_tiny = is_tiny;
+}
+
+static inline bool
+tuple_is_tiny(struct tuple *tuple)
+{
+	assert(tuple != NULL);
+	return tuple->props.is_tiny;
+}
+
+static inline void
+tiny_tuple_set_dirty_bit(struct tuple *tuple, bool is_dirty)
+{
+	tuple->tiny_props.is_dirty = is_dirty;
+}
+
+static inline void
+basic_tuple_set_dirty_bit(struct tuple *tuple, bool is_dirty)
+{
+	tuple->extra->is_dirty = is_dirty;
+}
+
 static inline void
 tuple_set_dirty_bit(struct tuple *tuple, bool is_dirty)
 {
 	assert(tuple != NULL);
-	tuple->is_dirty = is_dirty;
+	tuple->props.is_tiny ? tiny_tuple_set_dirty_bit(tuple, is_dirty) :
+			       basic_tuple_set_dirty_bit(tuple, is_dirty);
 }
 
 static inline bool
 tuple_is_dirty(struct tuple *tuple)
 {
 	assert(tuple != NULL);
-	return tuple->is_dirty;
+	return tuple->props.is_tiny ? tuple->tiny_props.is_dirty :
+				      tuple->extra->is_dirty;
+}
+
+static inline void
+tiny_tuple_set_bsize(struct tuple *tuple, uint32_t bsize)
+{
+	assert(bsize <= UINT8_MAX); /* bsize has to fit in UINT8_MAX */
+	tuple->tiny_props.bsize = bsize;
+}
+
+static inline void
+basic_tuple_set_bsize(struct tuple *tuple, uint32_t bsize)
+{
+	assert(bsize <= INT32_MAX); /* bsize has to fit in INT32_MAX */
+	tuple->extra->bsize = bsize;
 }
 
 static inline void
 tuple_set_bsize(struct tuple *tuple, uint32_t bsize)
 {
 	assert(tuple != NULL);
-	assert(bsize <= UINT32_MAX); /* bsize is UINT32_MAX */
-	tuple->bsize = bsize;
+	tuple->props.is_tiny ? tiny_tuple_set_bsize(tuple, bsize) :
+			       basic_tuple_set_bsize(tuple, bsize);
 }
 
 static inline uint32_t
 tuple_bsize(struct tuple *tuple)
 {
 	assert(tuple != NULL);
-	return tuple->bsize;
+	return tuple->props.is_tiny ? tuple->tiny_props.bsize :
+				      tuple->extra->bsize;
+}
+
+static inline void
+tiny_tuple_set_data_offset(struct tuple *tuple, uint8_t data_offset)
+{
+	tuple->tiny_props.data_offset = data_offset;
+}
+
+static inline void
+basic_tuple_set_data_offset(struct tuple *tuple, uint16_t data_offset)
+{
+	tuple->props.data_offset = data_offset;
 }
 
 static inline void
 tuple_set_data_offset(struct tuple *tuple, uint16_t data_offset)
 {
 	assert(tuple != NULL);
-	tuple->data_offset = data_offset;
+	tuple->props.is_tiny ? tiny_tuple_set_data_offset(tuple, data_offset) :
+			       basic_tuple_set_data_offset(tuple, data_offset);
 }
 
 static inline uint16_t
 tuple_data_offset(struct tuple *tuple)
 {
 	assert(tuple != NULL);
-	return tuple->data_offset;
+	return tuple->props.is_tiny ? tuple->tiny_props.data_offset :
+				      tuple->props.data_offset;
 }
 
 /** Size of the tuple including size of struct tuple. */
diff --git a/src/box/vy_stmt.c b/src/box/vy_stmt.c
index accafc654..97bc1e408 100644
--- a/src/box/vy_stmt.c
+++ b/src/box/vy_stmt.c
@@ -158,7 +158,8 @@ vy_stmt_format_new(struct vy_stmt_env *env, struct key_def *const *keys,
 static struct tuple *
 vy_stmt_alloc(struct tuple_format *format, uint32_t data_offset, uint32_t bsize)
 {
-	assert(data_offset >= sizeof(struct vy_stmt) + format->field_map_size);
+	assert(data_offset >= size_of_struct_vy_stmt() +
+			      format->field_map_size);
 
 	if (data_offset > INT16_MAX) {
 		/** tuple data_offset can't be more than 15 bits */
@@ -196,6 +197,7 @@ vy_stmt_alloc(struct tuple_format *format, uint32_t data_offset, uint32_t bsize)
 	tuple->format_id = tuple_format_id(format);
 	if (cord_is_main())
 		tuple_format_ref(format);
+	tuple_set_tiny_bit(tuple, false);
 	tuple_set_bsize(tuple, bsize);
 	tuple_set_data_offset(tuple, data_offset);
 	tuple_set_dirty_bit(tuple, false);
@@ -280,11 +282,12 @@ vy_key_new(struct tuple_format *format, const char *key, uint32_t part_count)
 	/* Allocate stmt */
 	uint32_t key_size = key_end - key;
 	uint32_t bsize = mp_sizeof_array(part_count) + key_size;
-	struct tuple *stmt = vy_stmt_alloc(format, sizeof(struct vy_stmt), bsize);
+	struct tuple *stmt = vy_stmt_alloc(format, size_of_struct_vy_stmt(),
+						   bsize);
 	if (stmt == NULL)
 		return NULL;
 	/* Copy MsgPack data */
-	char *raw = (char *) stmt + sizeof(struct vy_stmt);
+	char *raw = (char *)stmt + size_of_struct_vy_stmt();
 	char *data = mp_encode_array(raw, part_count);
 	memcpy(data, key, key_size);
 	assert(data + key_size == raw + bsize);
@@ -351,7 +354,7 @@ vy_stmt_new_with_ops(struct tuple_format *format, const char *tuple_begin,
 	 */
 	size_t mpsize = (tuple_end - tuple_begin);
 	size_t bsize = mpsize + ops_size;
-	stmt = vy_stmt_alloc(format, sizeof(struct vy_stmt) +
+	stmt = vy_stmt_alloc(format, size_of_struct_vy_stmt() +
 			     field_map_size, bsize);
 	if (stmt == NULL)
 		goto end;
@@ -422,10 +425,10 @@ vy_stmt_replace_from_upsert(struct tuple *upsert)
 	if (replace == NULL)
 		return NULL;
 	/* Copy both data and field_map. */
-	char *dst = (char *)replace + sizeof(struct vy_stmt);
-	char *src = (char *)upsert + sizeof(struct vy_stmt);
+	char *dst = (char *)replace + size_of_struct_vy_stmt();
+	char *src = (char *)upsert + size_of_struct_vy_stmt();
 	memcpy(dst, src, tuple_data_offset(upsert) +
-			 bsize - sizeof(struct vy_stmt));
+			 bsize - size_of_struct_vy_stmt());
 	vy_stmt_set_type(replace, IPROTO_REPLACE);
 	vy_stmt_set_lsn(replace, vy_stmt_lsn(upsert));
 	return replace;
@@ -502,7 +505,7 @@ vy_stmt_new_surrogate_delete_raw(struct tuple_format *format,
 	assert(pos <= data + src_size);
 	uint32_t bsize = pos - data;
 	uint32_t field_map_size = field_map_build_size(&builder);
-	stmt = vy_stmt_alloc(format, sizeof(struct vy_stmt) + field_map_size,
+	stmt = vy_stmt_alloc(format, size_of_struct_vy_stmt() + field_map_size,
 			     bsize);
 	if (stmt == NULL)
 		goto out;
diff --git a/src/box/vy_stmt.h b/src/box/vy_stmt.h
index 69f46d67c..50b196fc3 100644
--- a/src/box/vy_stmt.h
+++ b/src/box/vy_stmt.h
@@ -171,9 +171,15 @@ enum {
  */
 struct vy_stmt {
 	struct tuple base;
-	int64_t lsn;
-	uint8_t  type; /* IPROTO_INSERT/REPLACE/UPSERT/DELETE */
-	uint8_t flags;
+	/**
+	 * Following fields are stored without direct naming
+	 * due to the fact that struct tuple has variable size.
+	 * It also allows us to store them in compact way.
+	 * Use getters and setters to access them.
+	 * int64_t lsn;
+	 * uint8_t type; IPROTO_INSERT/REPLACE/UPSERT/DELETE
+	 * uint8_t flags;
+	 */
 	/**
 	 * Offsets array concatenated with MessagePack fields
 	 * array.
@@ -181,46 +187,60 @@ struct vy_stmt {
 	 */
 };
 
+static inline size_t
+size_of_basic_tuple(void)
+{
+	return sizeof(struct tuple) + sizeof(struct tuple_extra);
+}
+
+static inline size_t
+size_of_struct_vy_stmt(void)
+{
+	return size_of_basic_tuple() + sizeof(int64_t) + 2 * sizeof(uint8_t);
+}
+
 /** Get LSN of the vinyl statement. */
 static inline int64_t
 vy_stmt_lsn(struct tuple *stmt)
 {
-	return ((struct vy_stmt *) stmt)->lsn;
+	return load_u64((void *)stmt + size_of_basic_tuple());
 }
 
 /** Set LSN of the vinyl statement. */
 static inline void
 vy_stmt_set_lsn(struct tuple *stmt, int64_t lsn)
 {
-	((struct vy_stmt *) stmt)->lsn = lsn;
+	store_u64((void *)stmt + size_of_basic_tuple(), lsn);
 }
 
 /** Get type of the vinyl statement. */
 static inline enum iproto_type
 vy_stmt_type(struct tuple *stmt)
 {
-	return (enum iproto_type)((struct vy_stmt *) stmt)->type;
+	return load_u8((void *)stmt + size_of_basic_tuple() + sizeof(int64_t));
 }
 
 /** Set type of the vinyl statement. */
 static inline void
 vy_stmt_set_type(struct tuple *stmt, enum iproto_type type)
 {
-	((struct vy_stmt *) stmt)->type = type;
+	store_u8((void *)stmt + size_of_basic_tuple() + sizeof(int64_t), type);
 }
 
 /** Get flags of the vinyl statement. */
 static inline uint8_t
 vy_stmt_flags(struct tuple *stmt)
 {
-	return ((struct vy_stmt *)stmt)->flags;
+	return load_u8((void *)stmt + size_of_basic_tuple() + sizeof(int64_t) +
+							      sizeof(uint8_t));
 }
 
 /** Set flags of the vinyl statement. */
 static inline void
 vy_stmt_set_flags(struct tuple *stmt, uint8_t flags)
 {
-	((struct vy_stmt *)stmt)->flags = flags;
+	store_u8((void *)stmt + size_of_basic_tuple() +
+				sizeof(int64_t) + sizeof(uint8_t), flags);
 }
 
 /**
diff --git a/test/box/errinj.result b/test/box/errinj.result
index b8c2476c3..0b1990e03 100644
--- a/test/box/errinj.result
+++ b/test/box/errinj.result
@@ -455,7 +455,7 @@ errinj.set("ERRINJ_TUPLE_ALLOC", true)
 ...
 s:auto_increment{}
 ---
-- error: Failed to allocate 16 bytes in slab allocator for memtx_tuple
+- error: Failed to allocate 12 bytes in slab allocator for memtx_tuple
 ...
 s:select{}
 ---
@@ -463,7 +463,7 @@ s:select{}
 ...
 s:auto_increment{}
 ---
-- error: Failed to allocate 16 bytes in slab allocator for memtx_tuple
+- error: Failed to allocate 12 bytes in slab allocator for memtx_tuple
 ...
 s:select{}
 ---
@@ -471,7 +471,7 @@ s:select{}
 ...
 s:auto_increment{}
 ---
-- error: Failed to allocate 16 bytes in slab allocator for memtx_tuple
+- error: Failed to allocate 12 bytes in slab allocator for memtx_tuple
 ...
 s:select{}
 ---
@@ -485,7 +485,7 @@ box.begin()
     s:insert{1}
 box.commit();
 ---
-- error: Failed to allocate 16 bytes in slab allocator for memtx_tuple
+- error: Failed to allocate 12 bytes in slab allocator for memtx_tuple
 ...
 box.rollback();
 ---
@@ -499,7 +499,7 @@ box.begin()
     s:insert{2}
 box.commit();
 ---
-- error: Failed to allocate 16 bytes in slab allocator for memtx_tuple
+- error: Failed to allocate 12 bytes in slab allocator for memtx_tuple
 ...
 s:select{};
 ---
@@ -513,7 +513,7 @@ box.begin()
     s:insert{2}
 box.commit();
 ---
-- error: Failed to allocate 16 bytes in slab allocator for memtx_tuple
+- error: Failed to allocate 12 bytes in slab allocator for memtx_tuple
 ...
 s:select{};
 ---
@@ -532,7 +532,7 @@ box.begin()
     s:insert{2}
 box.commit();
 ---
-- error: Failed to allocate 16 bytes in slab allocator for memtx_tuple
+- error: Failed to allocate 12 bytes in slab allocator for memtx_tuple
 ...
 errinj.set("ERRINJ_TUPLE_ALLOC", false);
 ---
@@ -794,7 +794,7 @@ errinj.set("ERRINJ_TUPLE_ALLOC", true)
 ...
 s:replace{1, "test"}
 ---
-- error: Failed to allocate 21 bytes in slab allocator for memtx_tuple
+- error: Failed to allocate 17 bytes in slab allocator for memtx_tuple
 ...
 s:bsize()
 ---
@@ -806,7 +806,7 @@ utils.space_bsize(s)
 ...
 s:update({1}, {{'=', 3, '!'}})
 ---
-- error: Failed to allocate 20 bytes in slab allocator for memtx_tuple
+- error: Failed to allocate 16 bytes in slab allocator for memtx_tuple
 ...
 s:bsize()
 ---
diff --git a/test/box/upsert_errinj.result b/test/box/upsert_errinj.result
index ed52e2855..3aca98191 100644
--- a/test/box/upsert_errinj.result
+++ b/test/box/upsert_errinj.result
@@ -19,7 +19,7 @@ errinj.set("ERRINJ_TUPLE_ALLOC", true)
 ...
 s:upsert({111, '111', 222, '222'}, {{'!', 5, '!'}})
 ---
-- error: Failed to allocate 26 bytes in slab allocator for memtx_tuple
+- error: Failed to allocate 22 bytes in slab allocator for memtx_tuple
 ...
 errinj.set("ERRINJ_TUPLE_ALLOC", false)
 ---
diff --git a/test/vinyl/cache.result b/test/vinyl/cache.result
index 49d2bcc7a..4c9e0a96d 100644
--- a/test/vinyl/cache.result
+++ b/test/vinyl/cache.result
@@ -1033,14 +1033,14 @@ for i = 1, 100 do s:get{i} end
 ...
 box.stat.vinyl().memory.tuple_cache
 ---
-- 108500
+- 107300
 ...
 box.cfg{vinyl_cache = 50 * 1000}
 ---
 ...
 box.stat.vinyl().memory.tuple_cache
 ---
-- 49910
+- 49358
 ...
 box.cfg{vinyl_cache = 0}
 ---
@@ -1116,7 +1116,7 @@ s.index.i2:count()
 ...
 box.stat.vinyl().memory.tuple_cache -- should be about 200 KB
 ---
-- 219200
+- 218000
 ...
 s:drop()
 ---
diff --git a/test/vinyl/quota.result b/test/vinyl/quota.result
index 70e81453d..0d0cded1a 100644
--- a/test/vinyl/quota.result
+++ b/test/vinyl/quota.result
@@ -36,7 +36,7 @@ space:insert({1, 1})
 ...
 box.stat.vinyl().memory.level0
 ---
-- 98344
+- 98336
 ...
 space:insert({1, 1})
 ---
@@ -44,7 +44,7 @@ space:insert({1, 1})
 ...
 box.stat.vinyl().memory.level0
 ---
-- 98344
+- 98336
 ...
 space:update({1}, {{'!', 1, 100}}) -- try to modify the primary key
 ---
@@ -52,7 +52,7 @@ space:update({1}, {{'!', 1, 100}}) -- try to modify the primary key
 ...
 box.stat.vinyl().memory.level0
 ---
-- 98344
+- 98336
 ...
 space:insert({2, 2})
 ---
@@ -68,7 +68,7 @@ space:insert({4, 4})
 ...
 box.stat.vinyl().memory.level0
 ---
-- 98463
+- 98417
 ...
 box.snapshot()
 ---
@@ -94,7 +94,7 @@ _ = space:replace{1, 1, string.rep('a', 1024 * 1024 * 5)}
 ...
 box.stat.vinyl().memory.level0
 ---
-- 5292080
+- 5292064
 ...
 space:drop()
 ---
diff --git a/test/vinyl/quota_timeout.result b/test/vinyl/quota_timeout.result
index 31ca23670..0690823cf 100644
--- a/test/vinyl/quota_timeout.result
+++ b/test/vinyl/quota_timeout.result
@@ -49,7 +49,7 @@ s:count()
 ...
 box.stat.vinyl().memory.level0
 ---
-- 748248
+- 748232
 ...
 -- Since the following operation requires more memory than configured
 -- and dump is disabled, it should fail with ER_VY_QUOTA_TIMEOUT.
@@ -63,7 +63,7 @@ s:count()
 ...
 box.stat.vinyl().memory.level0
 ---
-- 748248
+- 748232
 ...
 --
 -- Check that increasing box.cfg.vinyl_memory wakes up fibers
@@ -135,7 +135,7 @@ test_run:cmd("push filter '[0-9.]+ sec' to '<sec> sec'")
 ...
 test_run:grep_log('test', 'waited for .* quota for too long.*')
 ---
-- 'waited for 1048615 bytes of vinyl memory quota for too long: <sec> sec'
+- 'waited for 1048603 bytes of vinyl memory quota for too long: <sec> sec'
 ...
 test_run:cmd("clear filter")
 ---
@@ -167,7 +167,7 @@ pad = string.rep('x', box.cfg.vinyl_memory)
 ...
 _ = s:auto_increment{pad}
 ---
-- error: Failed to allocate 1572903 bytes in lsregion for vinyl transaction
+- error: Failed to allocate 1572891 bytes in lsregion for vinyl transaction
 ...
 s:drop()
 ---
diff --git a/test/vinyl/stat.result b/test/vinyl/stat.result
index a895528b9..18b186633 100644
--- a/test/vinyl/stat.result
+++ b/test/vinyl/stat.result
@@ -301,7 +301,7 @@ stat_diff(istat(), st)
 ---
 - put:
     rows: 25
-    bytes: 26525
+    bytes: 26225
   rows: 25
   run_avg: 1
   run_count: 1
@@ -318,7 +318,7 @@ stat_diff(istat(), st)
     dump:
       input:
         rows: 25
-        bytes: 26525
+        bytes: 26225
       count: 1
       output:
         bytes: 26049
@@ -350,7 +350,7 @@ stat_diff(istat(), st)
 ---
 - put:
     rows: 50
-    bytes: 53050
+    bytes: 52450
   bytes: 26042
   disk:
     last_level:
@@ -364,7 +364,7 @@ stat_diff(istat(), st)
     dump:
       input:
         rows: 50
-        bytes: 53050
+        bytes: 52450
       count: 1
       output:
         bytes: 52091
@@ -402,11 +402,11 @@ stat_diff(istat(), st)
 - cache:
     index_size: 49152
     rows: 1
-    bytes: 1061
+    bytes: 1049
     lookup: 1
     put:
       rows: 1
-      bytes: 1061
+      bytes: 1049
   lookup: 1
   disk:
     iterator:
@@ -418,13 +418,13 @@ stat_diff(istat(), st)
       lookup: 1
       get:
         rows: 1
-        bytes: 1061
+        bytes: 1049
   memory:
     iterator:
       lookup: 1
   get:
     rows: 1
-    bytes: 1061
+    bytes: 1049
 ...
 -- point lookup from cache
 st = istat()
@@ -440,14 +440,14 @@ stat_diff(istat(), st)
     lookup: 1
     put:
       rows: 1
-      bytes: 1061
+      bytes: 1049
     get:
       rows: 1
-      bytes: 1061
+      bytes: 1049
   lookup: 1
   get:
     rows: 1
-    bytes: 1061
+    bytes: 1049
 ...
 -- put in memory + cache invalidate
 st = istat()
@@ -461,18 +461,18 @@ stat_diff(istat(), st)
 - cache:
     invalidate:
       rows: 1
-      bytes: 1061
+      bytes: 1049
     rows: -1
-    bytes: -1061
+    bytes: -1049
   rows: 1
   memory:
     index_size: 49152
-    bytes: 1061
+    bytes: 1049
     rows: 1
   put:
     rows: 1
-    bytes: 1061
-  bytes: 1061
+    bytes: 1049
+  bytes: 1049
 ...
 -- point lookup from memory
 st = istat()
@@ -485,22 +485,22 @@ s:get(1) ~= nil
 stat_diff(istat(), st)
 ---
 - cache:
-    bytes: 1061
+    bytes: 1049
     lookup: 1
     rows: 1
     put:
       rows: 1
-      bytes: 1061
+      bytes: 1049
   memory:
     iterator:
       lookup: 1
       get:
         rows: 1
-        bytes: 1061
+        bytes: 1049
   lookup: 1
   get:
     rows: 1
-    bytes: 1061
+    bytes: 1049
 ...
 -- put in txw + point lookup from txw
 st = istat()
@@ -520,16 +520,16 @@ stat_diff(istat(), st)
 ---
 - txw:
     rows: 1
-    bytes: 1061
+    bytes: 1049
     iterator:
       lookup: 1
       get:
         rows: 1
-        bytes: 1061
+        bytes: 1049
   lookup: 1
   get:
     rows: 1
-    bytes: 1061
+    bytes: 1049
 ...
 box.rollback()
 ---
@@ -586,15 +586,15 @@ for i = 1, 100 do s:get(i) end
 ...
 stat_diff(istat(), st, 'cache')
 ---
-- rows: 14
-  bytes: 14854
+- rows: 15
+  bytes: 15735
   evict:
-    rows: 86
-    bytes: 91246
+    rows: 85
+    bytes: 89165
   lookup: 100
   put:
     rows: 100
-    bytes: 106100
+    bytes: 104900
 ...
 -- range split
 for i = 1, 100 do put(i) end
@@ -649,15 +649,15 @@ st = istat()
 stat_diff(istat(), st)
 ---
 - cache:
-    rows: 13
-    bytes: 13793
+    rows: 14
+    bytes: 14686
     evict:
-      rows: 37
-      bytes: 39257
+      rows: 36
+      bytes: 37764
     lookup: 1
     put:
       rows: 51
-      bytes: 54111
+      bytes: 53499
   disk:
     iterator:
       read:
@@ -668,23 +668,23 @@ stat_diff(istat(), st)
       lookup: 2
       get:
         rows: 100
-        bytes: 106100
+        bytes: 104900
   txw:
     iterator:
       lookup: 1
       get:
         rows: 50
-        bytes: 53050
+        bytes: 52450
   memory:
     iterator:
       lookup: 1
       get:
         rows: 100
-        bytes: 106100
+        bytes: 104900
   lookup: 1
   get:
     rows: 100
-    bytes: 106100
+    bytes: 104900
 ...
 box.rollback()
 ---
@@ -717,17 +717,17 @@ stat_diff(istat(), st)
     lookup: 1
     put:
       rows: 5
-      bytes: 5305
+      bytes: 5245
     get:
       rows: 5
-      bytes: 5305
+      bytes: 5245
   txw:
     iterator:
       lookup: 1
   lookup: 1
   get:
     rows: 5
-    bytes: 5305
+    bytes: 5245
 ...
 box.rollback()
 ---
@@ -761,7 +761,7 @@ put(1)
 ...
 stat_diff(gstat(), st, 'memory.level0')
 ---
-- 1064
+- 1049
 ...
 -- use cache
 st = gstat()
@@ -772,7 +772,7 @@ _ = s:get(1)
 ...
 stat_diff(gstat(), st, 'memory.tuple_cache')
 ---
-- 1109
+- 1097
 ...
 s:delete(1)
 ---
@@ -1011,7 +1011,7 @@ istat()
       rows: 0
       bytes: 0
     index_size: 49152
-    rows: 13
+    rows: 14
     evict:
       rows: 0
       bytes: 0
@@ -1019,7 +1019,7 @@ istat()
       rows: 0
       bytes: 0
     lookup: 0
-    bytes: 13793
+    bytes: 14686
     get:
       rows: 0
       bytes: 0
@@ -1088,7 +1088,7 @@ istat()
   upsert:
     squashed: 0
     applied: 0
-  bytes: 317731
+  bytes: 315259
   put:
     rows: 0
     bytes: 0
@@ -1105,7 +1105,7 @@ istat()
         rows: 0
         bytes: 0
   memory:
-    bytes: 213431
+    bytes: 210959
     index_size: 49152
     rows: 206
     iterator:
@@ -1128,9 +1128,9 @@ gstat()
     gap_locks: 0
     read_views: 0
   memory:
-    tuple_cache: 14417
+    tuple_cache: 15358
     tx: 0
-    level0: 263210
+    level0: 260118
     page_index: 1250
     bloom_filter: 140
   disk:
@@ -1173,7 +1173,7 @@ box.snapshot()
 ...
 stat_diff(gstat(), st, 'scheduler')
 ---
-- dump_input: 104200
+- dump_input: 103000
   dump_output: 103592
   tasks_completed: 2
   dump_count: 1
@@ -1190,7 +1190,7 @@ box.snapshot()
 ...
 stat_diff(gstat(), st, 'scheduler')
 ---
-- dump_input: 10420
+- dump_input: 10300
   dump_output: 10411
   tasks_completed: 2
   dump_count: 1
@@ -1272,7 +1272,7 @@ st2 = i2:stat()
 ...
 s:bsize()
 ---
-- 53300
+- 52700
 ...
 i1:len(), i2:len()
 ---
@@ -1397,7 +1397,7 @@ st2 = i2:stat()
 ...
 s:bsize()
 ---
-- 107199
+- 105999
 ...
 i1:len(), i2:len()
 ---
-- 
2.17.1


^ permalink raw reply	[flat|nested] 5+ messages in thread

* [Tarantool-patches] [PATCH 4/4] box: introduce 1 byte field map offsets
  2021-01-18 23:50 [Tarantool-patches] [PATCH 0/4] Introduce tiny tuples with perf test Ilya Kosarev via Tarantool-patches
                   ` (2 preceding siblings ...)
  2021-01-18 23:50 ` [Tarantool-patches] [PATCH 3/4] core: introduce tiny tuples Ilya Kosarev via Tarantool-patches
@ 2021-01-18 23:50 ` Ilya Kosarev via Tarantool-patches
  3 siblings, 0 replies; 5+ messages in thread
From: Ilya Kosarev via Tarantool-patches @ 2021-01-18 23:50 UTC (permalink / raw)
  To: v.shpilevoy, alyapunov; +Cc: tarantool-patches

Now tiny tuples use 1 byte offsets instead of 4 byte offsets. This
allows to save even more memory for tiny tuples.

Closes #5385
---
 src/box/field_map.c          | 14 +++---
 src/box/field_map.h          | 19 +++++---
 src/box/lua/tuple.c          |  3 +-
 src/box/memtx_engine.c       |  7 +--
 src/box/sql.c                |  5 ++-
 src/box/tuple.c              | 27 +++++++-----
 src/box/tuple.h              | 39 +++++++++--------
 src/box/tuple_compare.cc     | 84 +++++++++++++++++++++++-------------
 src/box/tuple_extract_key.cc | 25 +++++++----
 src/box/tuple_format.c       | 13 +++---
 src/box/tuple_format.h       |  3 +-
 src/box/tuple_hash.cc        | 15 ++++---
 src/box/vy_stmt.c            | 18 +++++---
 13 files changed, 167 insertions(+), 105 deletions(-)

diff --git a/src/box/field_map.c b/src/box/field_map.c
index dc903115e..6f8b99ddf 100644
--- a/src/box/field_map.c
+++ b/src/box/field_map.c
@@ -80,7 +80,7 @@ field_map_builder_slot_extent_new(struct field_map_builder *builder,
 }
 
 void
-field_map_build(struct field_map_builder *builder, char *buffer)
+field_map_build(struct field_map_builder *builder, char *buffer, bool is_tiny)
 {
 	/*
 	 * To initialize the field map and its extents, prepare
@@ -97,8 +97,8 @@ field_map_build(struct field_map_builder *builder, char *buffer)
 	 * The buffer size is assumed to be sufficient to write
 	 * field_map_build_size(builder) bytes there.
 	 */
-	uint32_t *field_map =
-		(uint32_t *)(buffer + field_map_build_size(builder));
+	uint8_t *field_map =
+		(uint8_t *)(buffer + field_map_build_size(builder, is_tiny));
 	char *extent_wptr = buffer;
 	for (int32_t i = -1; i >= -(int32_t)builder->slot_count; i--) {
 		/*
@@ -108,13 +108,17 @@ field_map_build(struct field_map_builder *builder, char *buffer)
 		 * explicitly.
 		 */
 		if (!builder->slots[i].has_extent) {
-			store_u32(&field_map[i], builder->slots[i].offset);
+			is_tiny ? store_u8(&field_map[i],
+					   builder->slots[i].offset) :
+				  store_u32(&field_map[i * sizeof(uint32_t)],
+					    builder->slots[i].offset);
 			continue;
 		}
 		struct field_map_builder_slot_extent *extent =
 						builder->slots[i].extent;
 		/** Retrive memory for the extent. */
-		store_u32(&field_map[i], extent_wptr - (char *)field_map);
+		store_u32(&field_map[i * sizeof(uint32_t)],
+			  extent_wptr - (char *)field_map);
 		store_u32(extent_wptr, extent->size);
 		uint32_t extent_offset_sz = extent->size * sizeof(uint32_t);
 		memcpy(&((uint32_t *) extent_wptr)[1], extent->offset,
diff --git a/src/box/field_map.h b/src/box/field_map.h
index d8ef726a1..427f14701 100644
--- a/src/box/field_map.h
+++ b/src/box/field_map.h
@@ -149,15 +149,17 @@ struct field_map_builder_slot {
  * When a field is not in the data tuple, its offset is 0.
  */
 static inline uint32_t
-field_map_get_offset(const uint32_t *field_map, int32_t offset_slot,
-		     int multikey_idx)
+field_map_get_offset(const uint8_t *field_map, int32_t offset_slot,
+		     int multikey_idx, bool is_tiny)
 {
 	/*
 	 * Can not access field_map as a normal uint32 array
 	 * because its alignment may be < 4 bytes. Need to use
 	 * unaligned store-load operations explicitly.
 	 */
-	uint32_t offset = load_u32(&field_map[offset_slot]);
+	uint32_t offset = is_tiny ? load_u8(&field_map[offset_slot]) :
+				    load_u32(&field_map[offset_slot *
+							sizeof(uint32_t)]);
 	if (multikey_idx != MULTIKEY_NONE && (int32_t)offset < 0) {
 		/**
 		 * The field_map extent has the following
@@ -213,16 +215,18 @@ static inline int
 field_map_builder_set_slot(struct field_map_builder *builder,
 			   int32_t offset_slot, uint32_t offset,
 			   int32_t multikey_idx, uint32_t multikey_count,
-			   struct region *region)
+			   struct region *region, bool *is_tiny)
 {
 	assert(offset_slot < 0);
 	assert((uint32_t)-offset_slot <= builder->slot_count);
 	assert(offset > 0);
 	if (multikey_idx == MULTIKEY_NONE) {
 		builder->slots[offset_slot].offset = offset;
+		*is_tiny = ((*is_tiny) && (offset <= UINT8_MAX));
 	} else {
 		assert(multikey_idx >= 0);
 		assert(multikey_idx < (int32_t)multikey_count);
+		*is_tiny = false;
 		struct field_map_builder_slot_extent *extent;
 		if (builder->slots[offset_slot].has_extent) {
 			extent = builder->slots[offset_slot].extent;
@@ -243,9 +247,10 @@ field_map_builder_set_slot(struct field_map_builder *builder,
  * Calculate the size of tuple field_map to be built.
  */
 static inline uint32_t
-field_map_build_size(struct field_map_builder *builder)
+field_map_build_size(struct field_map_builder *builder, bool is_tiny)
 {
-	return builder->slot_count * sizeof(uint32_t) +
+	return builder->slot_count * is_tiny * sizeof(uint8_t) +
+	       builder->slot_count * !is_tiny * sizeof(uint32_t) +
 	       builder->extents_size;
 }
 
@@ -255,6 +260,6 @@ field_map_build_size(struct field_map_builder *builder)
  * The buffer must have at least field_map_build_size(builder) bytes.
  */
 void
-field_map_build(struct field_map_builder *builder, char *buffer);
+field_map_build(struct field_map_builder *builder, char *buffer, bool is_tiny);
 
 #endif /* TARANTOOL_BOX_FIELD_MAP_H_INCLUDED */
diff --git a/src/box/lua/tuple.c b/src/box/lua/tuple.c
index 3e6f043b4..0e54cf379 100644
--- a/src/box/lua/tuple.c
+++ b/src/box/lua/tuple.c
@@ -641,7 +641,8 @@ lbox_tuple_field_by_path(struct lua_State *L)
 					     tuple_data(tuple),
 					     tuple_field_map(tuple),
 					     path, (uint32_t)len,
-					     lua_hashstring(L, 2));
+					     lua_hashstring(L, 2),
+					     tuple_is_tiny(tuple));
 	if (field == NULL)
 		return 0;
 	luamp_decode(L, luaL_msgpack_default, &field);
diff --git a/src/box/memtx_engine.c b/src/box/memtx_engine.c
index adb90e1c8..0871e17fc 100644
--- a/src/box/memtx_engine.c
+++ b/src/box/memtx_engine.c
@@ -1222,9 +1222,9 @@ memtx_tuple_new(struct tuple_format *format, const char *data, const char *end)
 	size_t tuple_len = end - data;
 	bool is_tiny = (tuple_len <= UINT8_MAX);
 	struct field_map_builder builder;
-	if (tuple_field_map_create(format, data, true, &builder) != 0)
+	if (tuple_field_map_create(format, data, true, &builder, &is_tiny) != 0)
 		goto end;
-	uint32_t field_map_size = field_map_build_size(&builder);
+	uint32_t field_map_size = field_map_build_size(&builder, is_tiny);
 	/*
 	 * Data offset is calculated from the begin of the struct
 	 * tuple base, not from memtx_tuple, because the struct
@@ -1232,6 +1232,7 @@ memtx_tuple_new(struct tuple_format *format, const char *data, const char *end)
 	 */
 	is_tiny = (is_tiny && (sizeof(struct tuple) +
 			       field_map_size <= MAX_TINY_DATA_OFFSET));
+	field_map_size = field_map_build_size(&builder, is_tiny);
 	uint32_t extra_size = field_map_size +
 			      !is_tiny * sizeof(struct tuple_extra);
 	uint32_t data_offset = sizeof(struct tuple) + extra_size;
@@ -1276,7 +1277,7 @@ memtx_tuple_new(struct tuple_format *format, const char *data, const char *end)
 	tuple_set_data_offset(tuple, data_offset);
 	tuple_set_dirty_bit(tuple, false);
 	char *raw = (char *) tuple + data_offset;
-	field_map_build(&builder, raw - field_map_size);
+	field_map_build(&builder, raw - field_map_size, is_tiny);
 	memcpy(raw, data, tuple_len);
 	say_debug("%s(%zu) = %p", __func__, tuple_len, memtx_tuple);
 end:
diff --git a/src/box/sql.c b/src/box/sql.c
index 59e1e88fc..3702f4f19 100644
--- a/src/box/sql.c
+++ b/src/box/sql.c
@@ -727,7 +727,7 @@ tarantoolsqlIdxKeyCompare(struct BtCursor *cursor,
 	struct tuple *tuple;
 	const char *base;
 	struct tuple_format *format;
-	const uint32_t *field_map;
+	const uint8_t *field_map;
 	uint32_t field_count, next_fieldno = 0;
 	const char *p, *field0;
 	u32 i, n;
@@ -776,7 +776,8 @@ tarantoolsqlIdxKeyCompare(struct BtCursor *cursor,
 				uint32_t field_offset =
 					field_map_get_offset(field_map,
 							     field->offset_slot,
-							     MULTIKEY_NONE);
+							     MULTIKEY_NONE,
+							     tuple_is_tiny(tuple));
 				p = base + field_offset;
 			}
 		}
diff --git a/src/box/tuple.c b/src/box/tuple.c
index db95d5872..e9e9b7af2 100644
--- a/src/box/tuple.c
+++ b/src/box/tuple.c
@@ -82,11 +82,12 @@ runtime_tuple_new(struct tuple_format *format, const char *data, const char *end
 	size_t data_len = end - data;
 	bool is_tiny = (data_len <= UINT8_MAX);
 	struct field_map_builder builder;
-	if (tuple_field_map_create(format, data, true, &builder) != 0)
+	if (tuple_field_map_create(format, data, true, &builder, &is_tiny) != 0)
 		goto end;
-	uint32_t field_map_size = field_map_build_size(&builder);
+	uint32_t field_map_size = field_map_build_size(&builder, is_tiny);
 	is_tiny = (is_tiny && (sizeof(struct tuple) +
 			       field_map_size <= MAX_TINY_DATA_OFFSET));
+	field_map_size = field_map_build_size(&builder, is_tiny);
 	uint32_t data_offset = sizeof(struct tuple) + field_map_size +
 			       !is_tiny * sizeof(uint32_t);
 	assert(!is_tiny || data_offset <= MAX_TINY_DATA_OFFSET);
@@ -113,7 +114,7 @@ runtime_tuple_new(struct tuple_format *format, const char *data, const char *end
 	tuple_set_data_offset(tuple, data_offset);
 	tuple_set_dirty_bit(tuple, false);
 	char *raw = (char *) tuple + data_offset;
-	field_map_build(&builder, raw - field_map_size);
+	field_map_build(&builder, raw - field_map_size, is_tiny);
 	memcpy(raw, data, data_len);
 	say_debug("%s(%zu) = %p", __func__, data_len, tuple);
 end:
@@ -141,7 +142,9 @@ tuple_validate_raw(struct tuple_format *format, const char *tuple)
 	struct region *region = &fiber()->gc;
 	size_t region_svp = region_used(region);
 	struct field_map_builder builder;
-	int rc = tuple_field_map_create(format, tuple, true, &builder);
+	bool is_tiny = false;
+	int rc = tuple_field_map_create(format, tuple, true, &builder,
+					&is_tiny);
 	region_truncate(region, region_svp);
 	return rc;
 }
@@ -494,8 +497,9 @@ tuple_go_to_path(const char **data, const char *path, uint32_t path_len,
 
 const char *
 tuple_field_raw_by_full_path(struct tuple_format *format, const char *tuple,
-			     const uint32_t *field_map, const char *path,
-			     uint32_t path_len, uint32_t path_hash)
+			     const uint8_t *field_map, const char *path,
+			     uint32_t path_len, uint32_t path_hash,
+			     bool is_tiny)
 {
 	assert(path_len > 0);
 	uint32_t fieldno;
@@ -507,7 +511,8 @@ tuple_field_raw_by_full_path(struct tuple_format *format, const char *tuple,
 	 */
 	if (tuple_fieldno_by_name(format->dict, path, path_len, path_hash,
 				  &fieldno) == 0)
-		return tuple_field_raw(format, tuple, field_map, fieldno);
+		return tuple_field_raw(format, tuple, field_map, fieldno,
+				       is_tiny);
 	struct json_lexer lexer;
 	struct json_token token;
 	json_lexer_create(&lexer, path, path_len, TUPLE_INDEX_BASE);
@@ -545,13 +550,13 @@ tuple_field_raw_by_full_path(struct tuple_format *format, const char *tuple,
 	return tuple_field_raw_by_path(format, tuple, field_map, fieldno,
 				       path + lexer.offset,
 				       path_len - lexer.offset,
-				       NULL, MULTIKEY_NONE);
+				       NULL, MULTIKEY_NONE, is_tiny);
 }
 
 uint32_t
 tuple_raw_multikey_count(struct tuple_format *format, const char *data,
-			       const uint32_t *field_map,
-			       struct key_def *key_def)
+			 const uint8_t *field_map,
+			 struct key_def *key_def, bool is_tiny)
 {
 	assert(key_def->is_multikey);
 	const char *array_raw =
@@ -559,7 +564,7 @@ tuple_raw_multikey_count(struct tuple_format *format, const char *data,
 					key_def->multikey_fieldno,
 					key_def->multikey_path,
 					key_def->multikey_path_len,
-					NULL, MULTIKEY_NONE);
+					NULL, MULTIKEY_NONE, is_tiny);
 	if (array_raw == NULL)
 		return 0;
 	enum mp_type type = mp_typeof(*array_raw);
diff --git a/src/box/tuple.h b/src/box/tuple.h
index 9eac52f01..685524672 100644
--- a/src/box/tuple.h
+++ b/src/box/tuple.h
@@ -723,11 +723,10 @@ tuple_validate(struct tuple_format *format, struct tuple *tuple)
  * @returns a field map for the tuple.
  * @sa tuple_field_map_create()
  */
-static inline const uint32_t *
+static inline const uint8_t *
 tuple_field_map(struct tuple *tuple)
 {
-	return (const uint32_t *) ((const char *) tuple +
-				   tuple_data_offset(tuple));
+	return (uint8_t *)tuple + tuple_data_offset(tuple);
 }
 
 /**
@@ -804,9 +803,10 @@ tuple_field_go_to_key(const char **field, const char *key, int len);
  */
 static inline const char *
 tuple_field_raw_by_path(struct tuple_format *format, const char *tuple,
-			const uint32_t *field_map, uint32_t fieldno,
+			const uint8_t *field_map, uint32_t fieldno,
 			const char *path, uint32_t path_len,
-			int32_t *offset_slot_hint, int multikey_idx)
+			int32_t *offset_slot_hint, int multikey_idx,
+			bool is_tiny)
 {
 	int32_t offset_slot;
 	if (offset_slot_hint != NULL &&
@@ -853,7 +853,7 @@ tuple_field_raw_by_path(struct tuple_format *format, const char *tuple,
 offset_slot_access:
 		/* Indexed field */
 		offset = field_map_get_offset(field_map, offset_slot,
-					      multikey_idx);
+					      multikey_idx, is_tiny);
 		if (offset == 0)
 			return NULL;
 		tuple += offset;
@@ -887,7 +887,7 @@ parse:
  */
 static inline const char *
 tuple_field_raw(struct tuple_format *format, const char *tuple,
-		const uint32_t *field_map, uint32_t field_no)
+		const uint8_t *field_map, uint32_t field_no, bool is_tiny)
 {
 	if (likely(field_no < format->index_field_count)) {
 		int32_t offset_slot;
@@ -903,7 +903,7 @@ tuple_field_raw(struct tuple_format *format, const char *tuple,
 		if (offset_slot == TUPLE_OFFSET_SLOT_NIL)
 			goto parse;
 		offset = field_map_get_offset(field_map, offset_slot,
-					      MULTIKEY_NONE);
+					      MULTIKEY_NONE, is_tiny);
 		if (offset == 0)
 			return NULL;
 		tuple += offset;
@@ -931,7 +931,8 @@ static inline const char *
 tuple_field(struct tuple *tuple, uint32_t fieldno)
 {
 	return tuple_field_raw(tuple_format(tuple), tuple_data(tuple),
-			       tuple_field_map(tuple), fieldno);
+			       tuple_field_map(tuple), fieldno,
+			       tuple_is_tiny(tuple));
 }
 
 /**
@@ -951,8 +952,9 @@ tuple_field(struct tuple *tuple, uint32_t fieldno)
  */
 const char *
 tuple_field_raw_by_full_path(struct tuple_format *format, const char *tuple,
-			     const uint32_t *field_map, const char *path,
-			     uint32_t path_len, uint32_t path_hash);
+			     const uint8_t *field_map, const char *path,
+			     uint32_t path_len, uint32_t path_hash,
+			     bool is_tiny);
 
 /**
  * Get a tuple field pointed to by an index part and multikey
@@ -966,8 +968,8 @@ tuple_field_raw_by_full_path(struct tuple_format *format, const char *tuple,
  */
 static inline const char *
 tuple_field_raw_by_part(struct tuple_format *format, const char *data,
-			const uint32_t *field_map,
-			struct key_part *part, int multikey_idx)
+			const uint8_t *field_map,
+			struct key_part *part, int multikey_idx, bool is_tiny)
 {
 	if (unlikely(part->format_epoch != format->epoch)) {
 		assert(format->epoch != 0);
@@ -980,7 +982,8 @@ tuple_field_raw_by_part(struct tuple_format *format, const char *data,
 	}
 	return tuple_field_raw_by_path(format, data, field_map, part->fieldno,
 				       part->path, part->path_len,
-				       &part->offset_slot_cache, multikey_idx);
+				       &part->offset_slot_cache, multikey_idx,
+				       is_tiny);
 }
 
 /**
@@ -996,7 +999,7 @@ tuple_field_by_part(struct tuple *tuple, struct key_part *part,
 {
 	return tuple_field_raw_by_part(tuple_format(tuple), tuple_data(tuple),
 				       tuple_field_map(tuple), part,
-				       multikey_idx);
+				       multikey_idx, tuple_is_tiny(tuple));
 }
 
 /**
@@ -1010,7 +1013,8 @@ tuple_field_by_part(struct tuple *tuple, struct key_part *part,
  */
 uint32_t
 tuple_raw_multikey_count(struct tuple_format *format, const char *data,
-			 const uint32_t *field_map, struct key_def *key_def);
+			 const uint8_t *field_map, struct key_def *key_def,
+			 bool is_tiny);
 
 /**
  * Get count of multikey index keys in tuple by given multikey
@@ -1023,7 +1027,8 @@ static inline uint32_t
 tuple_multikey_count(struct tuple *tuple, struct key_def *key_def)
 {
 	return tuple_raw_multikey_count(tuple_format(tuple), tuple_data(tuple),
-					tuple_field_map(tuple), key_def);
+					tuple_field_map(tuple), key_def,
+					tuple_is_tiny(tuple));
 }
 
 /**
diff --git a/src/box/tuple_compare.cc b/src/box/tuple_compare.cc
index eb148c2f5..d9b286738 100644
--- a/src/box/tuple_compare.cc
+++ b/src/box/tuple_compare.cc
@@ -571,8 +571,8 @@ tuple_compare_slowpath(struct tuple *tuple_a, hint_t tuple_a_hint,
 	bool was_null_met = false;
 	struct tuple_format *format_a = tuple_format(tuple_a);
 	struct tuple_format *format_b = tuple_format(tuple_b);
-	const uint32_t *field_map_a = tuple_field_map(tuple_a);
-	const uint32_t *field_map_b = tuple_field_map(tuple_b);
+	const uint8_t *field_map_a = tuple_field_map(tuple_a);
+	const uint8_t *field_map_b = tuple_field_map(tuple_b);
 	struct key_part *end;
 	const char *field_a, *field_b;
 	enum mp_type a_type, b_type;
@@ -585,22 +585,28 @@ tuple_compare_slowpath(struct tuple *tuple_a, hint_t tuple_a_hint,
 		if (is_multikey) {
 			field_a = tuple_field_raw_by_part(format_a, tuple_a_raw,
 							  field_map_a, part,
-							  (int)tuple_a_hint);
+							  (int)tuple_a_hint,
+							  tuple_is_tiny(tuple_a));
 			field_b = tuple_field_raw_by_part(format_b, tuple_b_raw,
 							  field_map_b, part,
-							  (int)tuple_b_hint);
+							  (int)tuple_b_hint,
+							  tuple_is_tiny(tuple_b));
 		} else if (has_json_paths) {
 			field_a = tuple_field_raw_by_part(format_a, tuple_a_raw,
 							  field_map_a, part,
-							  MULTIKEY_NONE);
+							  MULTIKEY_NONE,
+							  tuple_is_tiny(tuple_a));
 			field_b = tuple_field_raw_by_part(format_b, tuple_b_raw,
 							  field_map_b, part,
-							  MULTIKEY_NONE);
+							  MULTIKEY_NONE,
+							  tuple_is_tiny(tuple_b));
 		} else {
 			field_a = tuple_field_raw(format_a, tuple_a_raw,
-						  field_map_a, part->fieldno);
+						  field_map_a, part->fieldno,
+						  tuple_is_tiny(tuple_a));
 			field_b = tuple_field_raw(format_b, tuple_b_raw,
-						  field_map_b, part->fieldno);
+						  field_map_b, part->fieldno,
+						  tuple_is_tiny(tuple_b));
 		}
 		assert(has_optional_parts ||
 		       (field_a != NULL && field_b != NULL));
@@ -651,22 +657,28 @@ tuple_compare_slowpath(struct tuple *tuple_a, hint_t tuple_a_hint,
 		if (is_multikey) {
 			field_a = tuple_field_raw_by_part(format_a, tuple_a_raw,
 							  field_map_a, part,
-							  (int)tuple_a_hint);
+							  (int)tuple_a_hint,
+							  tuple_is_tiny(tuple_a));
 			field_b = tuple_field_raw_by_part(format_b, tuple_b_raw,
 							  field_map_b, part,
-							  (int)tuple_b_hint);
+							  (int)tuple_b_hint,
+							  tuple_is_tiny(tuple_b));
 		} else if (has_json_paths) {
 			field_a = tuple_field_raw_by_part(format_a, tuple_a_raw,
 							  field_map_a, part,
-							  MULTIKEY_NONE);
+							  MULTIKEY_NONE,
+							  tuple_is_tiny(tuple_a));
 			field_b = tuple_field_raw_by_part(format_b, tuple_b_raw,
 							  field_map_b, part,
-							  MULTIKEY_NONE);
+							  MULTIKEY_NONE,
+							  tuple_is_tiny(tuple_b));
 		} else {
 			field_a = tuple_field_raw(format_a, tuple_a_raw,
-						  field_map_a, part->fieldno);
+						  field_map_a, part->fieldno,
+						  tuple_is_tiny(tuple_a));
 			field_b = tuple_field_raw(format_b, tuple_b_raw,
-						  field_map_b, part->fieldno);
+						  field_map_b, part->fieldno,
+						  tuple_is_tiny(tuple_b));
 		}
 		/*
 		 * Extended parts are primary, and they can not
@@ -703,21 +715,24 @@ tuple_compare_with_key_slowpath(struct tuple *tuple, hint_t tuple_hint,
 	struct key_part *part = key_def->parts;
 	struct tuple_format *format = tuple_format(tuple);
 	const char *tuple_raw = tuple_data(tuple);
-	const uint32_t *field_map = tuple_field_map(tuple);
+	const uint8_t *field_map = tuple_field_map(tuple);
 	enum mp_type a_type, b_type;
 	if (likely(part_count == 1)) {
 		const char *field;
 		if (is_multikey) {
 			field = tuple_field_raw_by_part(format, tuple_raw,
 							field_map, part,
-							(int)tuple_hint);
+							(int)tuple_hint,
+							tuple_is_tiny(tuple));
 		} else if (has_json_paths) {
 			field = tuple_field_raw_by_part(format, tuple_raw,
 							field_map, part,
-							MULTIKEY_NONE);
+							MULTIKEY_NONE,
+							tuple_is_tiny(tuple));
 		} else {
 			field = tuple_field_raw(format, tuple_raw, field_map,
-						part->fieldno);
+						part->fieldno,
+						tuple_is_tiny(tuple));
 		}
 		if (! is_nullable) {
 			return tuple_compare_field(field, key, part->type,
@@ -745,14 +760,16 @@ tuple_compare_with_key_slowpath(struct tuple *tuple, hint_t tuple_hint,
 		if (is_multikey) {
 			field = tuple_field_raw_by_part(format, tuple_raw,
 							field_map, part,
-							(int)tuple_hint);
+							(int)tuple_hint,
+							tuple_is_tiny(tuple));
 		} else if (has_json_paths) {
 			field = tuple_field_raw_by_part(format, tuple_raw,
 							field_map, part,
-							MULTIKEY_NONE);
+							MULTIKEY_NONE,
+							tuple_is_tiny(tuple));
 		} else {
 			field = tuple_field_raw(format, tuple_raw, field_map,
-						part->fieldno);
+						part->fieldno, tuple_is_tiny(tuple));
 		}
 		if (! is_nullable) {
 			rc = tuple_compare_field(field, key, part->type,
@@ -1062,10 +1079,10 @@ struct FieldCompare<IDX, TYPE, IDX2, TYPE2, MORE_TYPES...>
 				return r;
 			field_a = tuple_field_raw(format_a, tuple_data(tuple_a),
 						  tuple_field_map(tuple_a),
-						  IDX2);
+						  IDX2, tuple_is_tiny(tuple_a));
 			field_b = tuple_field_raw(format_b, tuple_data(tuple_b),
 						  tuple_field_map(tuple_b),
-						  IDX2);
+						  IDX2, tuple_is_tiny(tuple_b));
 		}
 		return FieldCompare<IDX2, TYPE2, MORE_TYPES...>::
 			compare(tuple_a, tuple_b, format_a,
@@ -1104,9 +1121,11 @@ struct TupleCompare
 		struct tuple_format *format_b = tuple_format(tuple_b);
 		const char *field_a, *field_b;
 		field_a = tuple_field_raw(format_a, tuple_data(tuple_a),
-					  tuple_field_map(tuple_a), IDX);
+					  tuple_field_map(tuple_a), IDX,
+					  tuple_is_tiny(tuple_a));
 		field_b = tuple_field_raw(format_b, tuple_data(tuple_b),
-					  tuple_field_map(tuple_b), IDX);
+					  tuple_field_map(tuple_b), IDX,
+					  tuple_is_tiny(tuple_b));
 		return FieldCompare<IDX, TYPE, MORE_TYPES...>::
 			compare(tuple_a, tuple_b, format_a,
 				format_b, field_a, field_b);
@@ -1248,7 +1267,8 @@ struct FieldCompareWithKey<FLD_ID, IDX, TYPE, IDX2, TYPE2, MORE_TYPES...>
 			if (r || part_count == FLD_ID + 1)
 				return r;
 			field = tuple_field_raw(format, tuple_data(tuple),
-						tuple_field_map(tuple), IDX2);
+						tuple_field_map(tuple), IDX2,
+						tuple_is_tiny(tuple));
 			mp_next(&key);
 		}
 		return FieldCompareWithKey<FLD_ID + 1, IDX2, TYPE2, MORE_TYPES...>::
@@ -1290,7 +1310,7 @@ struct TupleCompareWithKey
 		struct tuple_format *format = tuple_format(tuple);
 		const char *field = tuple_field_raw(format, tuple_data(tuple),
 						    tuple_field_map(tuple),
-						    IDX);
+						    IDX, tuple_is_tiny(tuple));
 		return FieldCompareWithKey<FLD_ID, IDX, TYPE, MORE_TYPES...>::
 				compare(tuple, key, part_count,
 					key_def, format, field);
@@ -1386,17 +1406,19 @@ func_index_compare(struct tuple *tuple_a, hint_t tuple_a_hint,
 	const char *tuple_b_raw = tuple_data(tuple_b);
 	struct tuple_format *format_a = tuple_format(tuple_a);
 	struct tuple_format *format_b = tuple_format(tuple_b);
-	const uint32_t *field_map_a = tuple_field_map(tuple_a);
-	const uint32_t *field_map_b = tuple_field_map(tuple_b);
+	const uint8_t *field_map_a = tuple_field_map(tuple_a);
+	const uint8_t *field_map_b = tuple_field_map(tuple_b);
 	const char *field_a, *field_b;
 	for (uint32_t i = key_part_count; i < cmp_def->part_count; i++) {
 		struct key_part *part = &cmp_def->parts[i];
 		field_a = tuple_field_raw_by_part(format_a, tuple_a_raw,
 						  field_map_a, part,
-						  MULTIKEY_NONE);
+						  MULTIKEY_NONE,
+						  tuple_is_tiny(tuple_a));
 		field_b = tuple_field_raw_by_part(format_b, tuple_b_raw,
 						  field_map_b, part,
-						  MULTIKEY_NONE);
+						  MULTIKEY_NONE,
+						  tuple_is_tiny(tuple_b));
 		assert(field_a != NULL && field_b != NULL);
 		rc = tuple_compare_field(field_a, field_b, part->type,
 					 part->coll);
diff --git a/src/box/tuple_extract_key.cc b/src/box/tuple_extract_key.cc
index 795dc6559..a61306fba 100644
--- a/src/box/tuple_extract_key.cc
+++ b/src/box/tuple_extract_key.cc
@@ -126,7 +126,7 @@ tuple_extract_key_slowpath(struct tuple *tuple, struct key_def *key_def,
 	uint32_t part_count = key_def->part_count;
 	uint32_t bsize = mp_sizeof_array(part_count);
 	struct tuple_format *format = tuple_format(tuple);
-	const uint32_t *field_map = tuple_field_map(tuple);
+	const uint8_t *field_map = tuple_field_map(tuple);
 	const char *tuple_end = data + tuple_bsize(tuple);
 
 	/* Calculate the key size. */
@@ -134,15 +134,18 @@ tuple_extract_key_slowpath(struct tuple *tuple, struct key_def *key_def,
 		const char *field;
 		if (!has_json_paths) {
 			field = tuple_field_raw(format, data, field_map,
-						key_def->parts[i].fieldno);
+						key_def->parts[i].fieldno,
+						tuple_is_tiny(tuple));
 		} else if (!is_multikey) {
 			field = tuple_field_raw_by_part(format, data, field_map,
 							&key_def->parts[i],
-							MULTIKEY_NONE);
+							MULTIKEY_NONE,
+							tuple_is_tiny(tuple));
 		} else {
 			field = tuple_field_raw_by_part(format, data, field_map,
 							&key_def->parts[i],
-							multikey_idx);
+							multikey_idx,
+							tuple_is_tiny(tuple));
 		}
 		if (has_optional_parts && field == NULL) {
 			bsize += mp_sizeof_nil();
@@ -186,15 +189,18 @@ tuple_extract_key_slowpath(struct tuple *tuple, struct key_def *key_def,
 		const char *field;
 		if (!has_json_paths) {
 			field = tuple_field_raw(format, data, field_map,
-						key_def->parts[i].fieldno);
+						key_def->parts[i].fieldno,
+						tuple_is_tiny(tuple));
 		} else if (!is_multikey) {
 			field = tuple_field_raw_by_part(format, data, field_map,
 							&key_def->parts[i],
-							MULTIKEY_NONE);
+							MULTIKEY_NONE,
+							tuple_is_tiny(tuple));
 		} else {
 			field = tuple_field_raw_by_part(format, data, field_map,
 							&key_def->parts[i],
-							multikey_idx);
+							multikey_idx,
+							tuple_is_tiny(tuple));
 		}
 		if (has_optional_parts && field == NULL) {
 			key_buf = mp_encode_nil(key_buf);
@@ -464,12 +470,13 @@ tuple_key_contains_null(struct tuple *tuple, struct key_def *def,
 {
 	struct tuple_format *format = tuple_format(tuple);
 	const char *data = tuple_data(tuple);
-	const uint32_t *field_map = tuple_field_map(tuple);
+	const uint8_t *field_map = tuple_field_map(tuple);
 	for (struct key_part *part = def->parts, *end = part + def->part_count;
 	     part < end; ++part) {
 		const char *field = tuple_field_raw_by_part(format, data,
 							    field_map, part,
-							    multikey_idx);
+							    multikey_idx,
+							    tuple_is_tiny(tuple));
 		if (field == NULL || mp_typeof(*field) == MP_NIL)
 			return true;
 	}
diff --git a/src/box/tuple_format.c b/src/box/tuple_format.c
index dca0fbddc..b2c4d1ea6 100644
--- a/src/box/tuple_format.c
+++ b/src/box/tuple_format.c
@@ -859,7 +859,8 @@ tuple_format_required_fields_validate(struct tuple_format *format,
 
 static int
 tuple_field_map_create_plain(struct tuple_format *format, const char *tuple,
-			     bool validate, struct field_map_builder *builder)
+			     bool validate, struct field_map_builder *builder,
+			     bool *is_tiny)
 {
 	struct region *region = &fiber()->gc;
 	const char *pos = tuple;
@@ -906,7 +907,7 @@ tuple_field_map_create_plain(struct tuple_format *format, const char *tuple,
 		if (field->offset_slot != TUPLE_OFFSET_SLOT_NIL &&
 		    field_map_builder_set_slot(builder, field->offset_slot,
 					       pos - tuple, MULTIKEY_NONE,
-					       0, NULL) != 0) {
+					       0, NULL, is_tiny) != 0) {
 			return -1;
 		}
 	}
@@ -922,7 +923,8 @@ end:
 /** @sa declaration for details. */
 int
 tuple_field_map_create(struct tuple_format *format, const char *tuple,
-		       bool validate, struct field_map_builder *builder)
+		       bool validate, struct field_map_builder *builder,
+		       bool *is_tiny)
 {
 	struct region *region = &fiber()->gc;
 	if (field_map_builder_create(builder, format->field_map_size,
@@ -937,7 +939,7 @@ tuple_field_map_create(struct tuple_format *format, const char *tuple,
 	 */
 	if (format->fields_depth == 1) {
 		return tuple_field_map_create_plain(format, tuple, validate,
-						    builder);
+						    builder, is_tiny);
 	}
 
 	uint32_t field_count;
@@ -954,7 +956,8 @@ tuple_field_map_create(struct tuple_format *format, const char *tuple,
 		if (entry.field->offset_slot != TUPLE_OFFSET_SLOT_NIL &&
 		    field_map_builder_set_slot(builder, entry.field->offset_slot,
 					entry.data - tuple, entry.multikey_idx,
-					entry.multikey_count, region) != 0)
+					entry.multikey_count, region,
+					is_tiny) != 0)
 			return -1;
 	}
 	return entry.data == NULL ? 0 : -1;
diff --git a/src/box/tuple_format.h b/src/box/tuple_format.h
index 021072d3d..08ebf0939 100644
--- a/src/box/tuple_format.h
+++ b/src/box/tuple_format.h
@@ -427,7 +427,8 @@ box_tuple_format_unref(box_tuple_format_t *format);
  */
 int
 tuple_field_map_create(struct tuple_format *format, const char *tuple,
-		       bool validate, struct field_map_builder *builder);
+		       bool validate, struct field_map_builder *builder,
+		       bool *is_tiny);
 
 /**
  * Initialize tuple format subsystem.
diff --git a/src/box/tuple_hash.cc b/src/box/tuple_hash.cc
index 39f89a659..235590f30 100644
--- a/src/box/tuple_hash.cc
+++ b/src/box/tuple_hash.cc
@@ -372,14 +372,15 @@ tuple_hash_slowpath(struct tuple *tuple, struct key_def *key_def)
 	uint32_t prev_fieldno = key_def->parts[0].fieldno;
 	struct tuple_format *format = tuple_format(tuple);
 	const char *tuple_raw = tuple_data(tuple);
-	const uint32_t *field_map = tuple_field_map(tuple);
+	const uint8_t *field_map = tuple_field_map(tuple);
 	const char *field;
 	if (has_json_paths) {
 		field = tuple_field_raw_by_part(format, tuple_raw, field_map,
-						key_def->parts, MULTIKEY_NONE);
+						key_def->parts, MULTIKEY_NONE,
+						tuple_is_tiny(tuple));
 	} else {
 		field = tuple_field_raw(format, tuple_raw, field_map,
-					prev_fieldno);
+					prev_fieldno, tuple_is_tiny(tuple));
 	}
 	const char *end = (char *)tuple + tuple_size(tuple);
 	if (has_optional_parts && field == NULL) {
@@ -398,10 +399,12 @@ tuple_hash_slowpath(struct tuple *tuple, struct key_def *key_def)
 			if (has_json_paths) {
 				field = tuple_field_raw_by_part(format, tuple_raw,
 								field_map, part,
-								MULTIKEY_NONE);
+								MULTIKEY_NONE,
+								tuple_is_tiny(tuple));
 			} else {
-				field = tuple_field_raw(format, tuple_raw, field_map,
-						    part->fieldno);
+				field = tuple_field_raw(format, tuple_raw,
+							field_map, part->fieldno,
+							tuple_is_tiny(tuple));
 			}
 		}
 		if (has_optional_parts && (field == NULL || field >= end)) {
diff --git a/src/box/vy_stmt.c b/src/box/vy_stmt.c
index 97bc1e408..6440c7d3f 100644
--- a/src/box/vy_stmt.c
+++ b/src/box/vy_stmt.c
@@ -345,9 +345,11 @@ vy_stmt_new_with_ops(struct tuple_format *format, const char *tuple_begin,
 	 * with tuple_validate() anyway.
 	 */
 	struct field_map_builder builder;
-	if (tuple_field_map_create(format, tuple_begin, false, &builder) != 0)
+	bool is_tiny = false;
+	if (tuple_field_map_create(format, tuple_begin, false, &builder,
+				   &is_tiny) != 0)
 		goto end;
-	uint32_t field_map_size = field_map_build_size(&builder);
+	uint32_t field_map_size = field_map_build_size(&builder, is_tiny);
 	/*
 	 * Allocate stmt. Offsets: one per key part + offset of the
 	 * statement end.
@@ -361,7 +363,7 @@ vy_stmt_new_with_ops(struct tuple_format *format, const char *tuple_begin,
 	/* Copy MsgPack data */
 	char *raw = (char *) tuple_data(stmt);
 	char *wpos = raw;
-	field_map_build(&builder, wpos - field_map_size);
+	field_map_build(&builder, wpos - field_map_size, is_tiny);
 	memcpy(wpos, tuple_begin, mpsize);
 	wpos += mpsize;
 	for (struct iovec *op = ops, *end = ops + op_count;
@@ -484,11 +486,12 @@ vy_stmt_new_surrogate_delete_raw(struct tuple_format *format,
 					    entry.field->token.len);
 		}
 		/* Initialize field_map with data offset. */
+		bool is_tiny = false;
 		uint32_t offset_slot = entry.field->offset_slot;
 		if (offset_slot != TUPLE_OFFSET_SLOT_NIL &&
 		    field_map_builder_set_slot(&builder, offset_slot,
-					pos - data, entry.multikey_idx,
-					entry.multikey_count, region) != 0)
+				pos - data, entry.multikey_idx,
+				entry.multikey_count, region, &is_tiny) != 0)
 			goto out;
 		/* Copy field data. */
 		if (entry.field->type == FIELD_TYPE_ARRAY) {
@@ -503,8 +506,9 @@ vy_stmt_new_surrogate_delete_raw(struct tuple_format *format,
 	if (entry.data != NULL)
 		goto out;
 	assert(pos <= data + src_size);
+	bool is_tiny = false;
 	uint32_t bsize = pos - data;
-	uint32_t field_map_size = field_map_build_size(&builder);
+	uint32_t field_map_size = field_map_build_size(&builder, is_tiny);
 	stmt = vy_stmt_alloc(format, size_of_struct_vy_stmt() + field_map_size,
 			     bsize);
 	if (stmt == NULL)
@@ -512,7 +516,7 @@ vy_stmt_new_surrogate_delete_raw(struct tuple_format *format,
 	char *stmt_data = (char *) tuple_data(stmt);
 	char *stmt_field_map_begin = stmt_data - field_map_size;
 	memcpy(stmt_data, data, bsize);
-	field_map_build(&builder, stmt_field_map_begin);
+	field_map_build(&builder, stmt_field_map_begin, is_tiny);
 	vy_stmt_set_type(stmt, IPROTO_DELETE);
 	mp_tuple_assert(stmt_data, stmt_data + bsize);
 out:
-- 
2.17.1


^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2021-01-18 23:52 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-01-18 23:50 [Tarantool-patches] [PATCH 0/4] Introduce tiny tuples with perf test Ilya Kosarev via Tarantool-patches
2021-01-18 23:50 ` [Tarantool-patches] [PATCH 1/4] tuple: introduce unit perf test based on gbench Ilya Kosarev via Tarantool-patches
2021-01-18 23:50 ` [Tarantool-patches] [PATCH 2/4] tuple: use getters and setters Ilya Kosarev via Tarantool-patches
2021-01-18 23:50 ` [Tarantool-patches] [PATCH 3/4] core: introduce tiny tuples Ilya Kosarev via Tarantool-patches
2021-01-18 23:50 ` [Tarantool-patches] [PATCH 4/4] box: introduce 1 byte field map offsets Ilya Kosarev via Tarantool-patches

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox