[PATCH 2/8] memtx: fold memtx_tuple.cc into memtx_engine.c

Vladimir Davydov vdavydov.dev at gmail.com
Tue May 22 14:46:10 MSK 2018


The two files are too closely related: memtx_arena is defined and
used in memtx_engine.c, but initialized in memtx_tuple.cc. Since
memtx_tuple.cc is small, let's fold it into memtx_engine.c.
---
 src/box/CMakeLists.txt |   1 -
 src/box/alter.cc       |   1 -
 src/box/lua/tuple.c    |   1 -
 src/box/lua/xlog.c     |   1 -
 src/box/memtx_engine.c | 152 ++++++++++++++++++++++++++++++++++----
 src/box/memtx_engine.h |  13 ++++
 src/box/memtx_space.c  |   3 +-
 src/box/memtx_tuple.cc | 193 -------------------------------------------------
 src/box/memtx_tuple.h  | 100 -------------------------
 9 files changed, 152 insertions(+), 313 deletions(-)
 delete mode 100644 src/box/memtx_tuple.cc
 delete mode 100644 src/box/memtx_tuple.h

diff --git a/src/box/CMakeLists.txt b/src/box/CMakeLists.txt
index 0bbc857a..6b1ae3e8 100644
--- a/src/box/CMakeLists.txt
+++ b/src/box/CMakeLists.txt
@@ -68,7 +68,6 @@ add_library(box STATIC
     engine.c
     memtx_engine.c
     memtx_space.c
-    memtx_tuple.cc
     sysview_engine.c
     sysview_index.c
     vinyl.c
diff --git a/src/box/alter.cc b/src/box/alter.cc
index 518f515b..f0315ff7 100644
--- a/src/box/alter.cc
+++ b/src/box/alter.cc
@@ -50,7 +50,6 @@
 #include "xrow.h"
 #include "iproto_constants.h"
 #include "identifier.h"
-#include "memtx_tuple.h"
 #include "version.h"
 #include "sequence.h"
 
diff --git a/src/box/lua/tuple.c b/src/box/lua/tuple.c
index dc98e967..80573313 100644
--- a/src/box/lua/tuple.c
+++ b/src/box/lua/tuple.c
@@ -41,7 +41,6 @@
 #include "box/tuple.h"
 #include "box/tuple_convert.h"
 #include "box/errcode.h"
-#include "box/memtx_tuple.h"
 #include "json/path.h"
 
 /** {{{ box.tuple Lua library
diff --git a/src/box/lua/xlog.c b/src/box/lua/xlog.c
index 030f5c2d..70384c1d 100644
--- a/src/box/lua/xlog.c
+++ b/src/box/lua/xlog.c
@@ -43,7 +43,6 @@
 #include <box/lua/tuple.h>
 #include <lua/msgpack.h>
 #include <lua/utils.h>
-#include "box/memtx_tuple.h"
 
 /* {{{ Helpers */
 
diff --git a/src/box/memtx_engine.c b/src/box/memtx_engine.c
index fd93491e..0c5136cf 100644
--- a/src/box/memtx_engine.c
+++ b/src/box/memtx_engine.c
@@ -30,12 +30,14 @@
  */
 #include "memtx_engine.h"
 #include "memtx_space.h"
-#include "memtx_tuple.h"
 
+#include <small/quota.h>
 #include <small/small.h>
 #include <small/mempool.h>
 
 #include "fiber.h"
+#include "clock.h"
+#include "errinj.h"
 #include "coio_file.h"
 #include "tuple.h"
 #include "txn.h"
@@ -48,16 +50,25 @@
 #include "schema.h"
 #include "gc.h"
 
-/** For all memory used by all indexes.
- * If you decide to use memtx_index_arena or
- * memtx_index_slab_cache for anything other than
- * memtx_index_extent_pool, make sure this is reflected in
- * box.slab.info(), @sa lua/slab.cc
+/*
+ * If you decide to use memtx_arena for anything other than
+ * memtx_alloc or memtx_index_extent_pool, make sure this
+ * is reflected in box.slab.info(), @sa lua/slab.c.
  */
-extern struct quota memtx_quota;
-struct slab_arena memtx_arena; /* used by memtx_tuple.cc */
+
+/** Common quota for memtx tuples and indexes. */
+static struct quota memtx_quota;
+/** Common slab arena for memtx tuples and indexes. */
+static struct slab_arena memtx_arena;
+/** Slab cache for allocating memtx tuples. */
+static struct slab_cache memtx_slab_cache;
+/** Memtx tuple allocator. */
+struct small_alloc memtx_alloc; /* used by box.slab.info() */
+/** Slab cache for allocating memtx index extents. */
 static struct slab_cache memtx_index_slab_cache;
-struct mempool memtx_index_extent_pool;
+/** Memtx index extent allocator. */
+struct mempool memtx_index_extent_pool; /* used by box.slab.info() */
+
 /**
  * To ensure proper statement-level rollback in case
  * of out of memory conditions, we maintain a number
@@ -68,6 +79,12 @@ struct mempool memtx_index_extent_pool;
 static int memtx_index_num_reserved_extents;
 static void *memtx_index_reserved_extents;
 
+/** Maximal allowed tuple size, box.cfg.memtx_max_tuple_size. */
+static size_t memtx_max_tuple_size = 1 * 1024 * 1024;
+
+/** Incremented with each next snapshot. */
+uint32_t snapshot_version;
+
 static void
 txn_on_yield_or_stop(struct trigger *trigger, void *event)
 {
@@ -76,6 +93,23 @@ txn_on_yield_or_stop(struct trigger *trigger, void *event)
 	txn_rollback(); /* doesn't throw */
 }
 
+struct memtx_tuple {
+	/*
+	 * sic: the header of the tuple is used
+	 * to store a free list pointer in smfree_delayed.
+	 * Please don't change it without understanding
+	 * how smfree_delayed and snapshotting COW works.
+	 */
+	/** Snapshot generation version. */
+	uint32_t version;
+	struct tuple base;
+};
+
+enum {
+	OBJSIZE_MIN = 16,
+	SLAB_SIZE = 16 * 1024 * 1024,
+};
+
 static int
 memtx_end_build_primary_key(struct space *space, void *param)
 {
@@ -140,7 +174,6 @@ memtx_engine_shutdown(struct engine *engine)
 		mempool_destroy(&memtx->bitset_iterator_pool);
 	xdir_destroy(&memtx->snap_dir);
 	free(memtx);
-	memtx_tuple_free();
 }
 
 static int
@@ -668,7 +701,8 @@ memtx_engine_begin_checkpoint(struct engine *engine)
 	}
 
 	/* increment snapshot version; set tuple deletion to delayed mode */
-	memtx_tuple_begin_snapshot();
+	snapshot_version++;
+	small_alloc_setopt(&memtx_alloc, SMALL_DELAYED_FREE_MODE, true);
 	return 0;
 }
 
@@ -714,7 +748,7 @@ memtx_engine_commit_checkpoint(struct engine *engine, struct vclock *vclock)
 	/* waitCheckpoint() must have been done. */
 	assert(!memtx->checkpoint->waiting_for_snap_thread);
 
-	memtx_tuple_end_snapshot();
+	small_alloc_setopt(&memtx_alloc, SMALL_DELAYED_FREE_MODE, false);
 
 	if (!memtx->checkpoint->touch) {
 		int64_t lsn = vclock_sum(memtx->checkpoint->vclock);
@@ -757,7 +791,7 @@ memtx_engine_abort_checkpoint(struct engine *engine)
 		memtx->checkpoint->waiting_for_snap_thread = false;
 	}
 
-	memtx_tuple_end_snapshot();
+	small_alloc_setopt(&memtx_alloc, SMALL_DELAYED_FREE_MODE, false);
 
 	/** Remove garbage .inprogress file. */
 	char *filename =
@@ -950,8 +984,6 @@ memtx_engine_new(const char *snap_dirname, bool force_recovery,
 		 uint64_t tuple_arena_max_size, uint32_t objsize_min,
 		 float alloc_factor)
 {
-	memtx_tuple_init(tuple_arena_max_size, objsize_min, alloc_factor);
-
 	struct memtx_engine *memtx = calloc(1, sizeof(*memtx));
 	if (memtx == NULL) {
 		diag_set(OutOfMemory, sizeof(*memtx),
@@ -970,6 +1002,18 @@ memtx_engine_new(const char *snap_dirname, bool force_recovery,
 	if (memtx->gc_fiber == NULL)
 		goto fail;
 
+	/* Apply lowest allowed objsize bound. */
+	if (objsize_min < OBJSIZE_MIN)
+		objsize_min = OBJSIZE_MIN;
+
+	/* Initialize tuple allocator. */
+	quota_init(&memtx_quota, tuple_arena_max_size);
+	tuple_arena_create(&memtx_arena, &memtx_quota, tuple_arena_max_size,
+			   SLAB_SIZE, "memtx");
+	slab_cache_create(&memtx_slab_cache, &memtx_arena);
+	small_alloc_create(&memtx_alloc, &memtx_slab_cache,
+			   objsize_min, alloc_factor);
+
 	/* Initialize index extent allocator. */
 	slab_cache_create(&memtx_index_slab_cache, &memtx_arena);
 	mempool_create(&memtx_index_extent_pool, &memtx_index_slab_cache,
@@ -1012,6 +1056,84 @@ memtx_engine_set_max_tuple_size(struct memtx_engine *memtx, size_t max_size)
 	memtx_max_tuple_size = max_size;
 }
 
+struct tuple *
+memtx_tuple_new(struct tuple_format *format, const char *data, const char *end)
+{
+	assert(mp_typeof(*data) == MP_ARRAY);
+	size_t tuple_len = end - data;
+	size_t meta_size = tuple_format_meta_size(format);
+	size_t total = sizeof(struct memtx_tuple) + meta_size + tuple_len;
+
+	ERROR_INJECT(ERRINJ_TUPLE_ALLOC, {
+		diag_set(OutOfMemory, total, "slab allocator", "memtx_tuple");
+		return NULL;
+	});
+	if (unlikely(total > memtx_max_tuple_size)) {
+		diag_set(ClientError, ER_MEMTX_MAX_TUPLE_SIZE, total);
+		error_log(diag_last_error(diag_get()));
+		return NULL;
+	}
+
+	struct memtx_tuple *memtx_tuple = smalloc(&memtx_alloc, total);
+	if (memtx_tuple == NULL) {
+		diag_set(OutOfMemory, total, "slab allocator", "memtx_tuple");
+		return NULL;
+	}
+	struct tuple *tuple = &memtx_tuple->base;
+	tuple->refs = 0;
+	memtx_tuple->version = snapshot_version;
+	assert(tuple_len <= UINT32_MAX); /* bsize is UINT32_MAX */
+	tuple->bsize = tuple_len;
+	tuple->format_id = tuple_format_id(format);
+	tuple_format_ref(format);
+	/*
+	 * Data offset is calculated from the begin of the struct
+	 * tuple base, not from memtx_tuple, because the struct
+	 * tuple is not the first field of the memtx_tuple.
+	 */
+	tuple->data_offset = sizeof(struct tuple) + meta_size;
+	char *raw = (char *) tuple + tuple->data_offset;
+	uint32_t *field_map = (uint32_t *) raw;
+	memcpy(raw, data, tuple_len);
+	if (tuple_init_field_map(format, field_map, raw)) {
+		memtx_tuple_delete(format, tuple);
+		return NULL;
+	}
+	say_debug("%s(%zu) = %p", __func__, tuple_len, memtx_tuple);
+	return tuple;
+}
+
+void
+memtx_tuple_delete(struct tuple_format *format, struct tuple *tuple)
+{
+	say_debug("%s(%p)", __func__, tuple);
+	assert(tuple->refs == 0);
+#ifndef NDEBUG
+	struct errinj *delay = errinj(ERRINJ_MEMTX_TUPLE_DELETE_DELAY,
+				      ERRINJ_DOUBLE);
+	if (delay != NULL && delay->dparam > 0) {
+		double start = clock_monotonic();
+		while (clock_monotonic() < start + delay->dparam)
+			/* busy loop */ ;
+	}
+#endif
+	size_t total = sizeof(struct memtx_tuple) +
+		       tuple_format_meta_size(format) + tuple->bsize;
+	tuple_format_unref(format);
+	struct memtx_tuple *memtx_tuple =
+		container_of(tuple, struct memtx_tuple, base);
+	if (memtx_alloc.free_mode != SMALL_DELAYED_FREE ||
+	    memtx_tuple->version == snapshot_version)
+		smfree(&memtx_alloc, memtx_tuple, total);
+	else
+		smfree_delayed(&memtx_alloc, memtx_tuple, total);
+}
+
+struct tuple_format_vtab memtx_tuple_format_vtab = {
+	memtx_tuple_delete,
+	memtx_tuple_new,
+};
+
 /**
  * Allocate a block of size MEMTX_EXTENT_SIZE for memtx index
  */
diff --git a/src/box/memtx_engine.h b/src/box/memtx_engine.h
index 72f40528..9f28c268 100644
--- a/src/box/memtx_engine.h
+++ b/src/box/memtx_engine.h
@@ -45,6 +45,8 @@ extern "C" {
 
 struct index;
 struct fiber;
+struct tuple;
+struct tuple_format;
 
 /**
  * The state of memtx recovery process.
@@ -148,6 +150,17 @@ memtx_engine_set_snap_io_rate_limit(struct memtx_engine *memtx, double limit);
 void
 memtx_engine_set_max_tuple_size(struct memtx_engine *memtx, size_t max_size);
 
+/** Allocate a memtx tuple. @sa tuple_new(). */
+struct tuple *
+memtx_tuple_new(struct tuple_format *format, const char *data, const char *end);
+
+/** Free a memtx tuple. @sa tuple_delete(). */
+void
+memtx_tuple_delete(struct tuple_format *format, struct tuple *tuple);
+
+/** Tuple format vtab for memtx engine. */
+extern struct tuple_format_vtab memtx_tuple_format_vtab;
+
 enum {
 	MEMTX_EXTENT_SIZE = 16 * 1024,
 	MEMTX_SLAB_SIZE = 4 * 1024 * 1024
diff --git a/src/box/memtx_space.c b/src/box/memtx_space.c
index 73df4c7d..7c795b99 100644
--- a/src/box/memtx_space.c
+++ b/src/box/memtx_space.c
@@ -32,13 +32,14 @@
 #include "space.h"
 #include "iproto_constants.h"
 #include "txn.h"
+#include "tuple.h"
 #include "tuple_update.h"
 #include "xrow.h"
 #include "memtx_hash.h"
 #include "memtx_tree.h"
 #include "memtx_rtree.h"
 #include "memtx_bitset.h"
-#include "memtx_tuple.h"
+#include "memtx_engine.h"
 #include "column_mask.h"
 #include "sequence.h"
 
diff --git a/src/box/memtx_tuple.cc b/src/box/memtx_tuple.cc
deleted file mode 100644
index 60564e3c..00000000
--- a/src/box/memtx_tuple.cc
+++ /dev/null
@@ -1,193 +0,0 @@
-/*
- * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file.
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- *    copyright notice, this list of conditions and the
- *    following disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- *    copyright notice, this list of conditions and the following
- *    disclaimer in the documentation and/or other materials
- *    provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY AUTHORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
- * AUTHORS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
- * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
- * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- */
-
-#include "memtx_tuple.h"
-
-#include "small/small.h"
-#include "small/region.h"
-#include "small/quota.h"
-#include "fiber.h"
-#include "clock.h"
-#include "errinj.h"
-#include "box.h"
-
-struct memtx_tuple {
-	/*
-	 * sic: the header of the tuple is used
-	 * to store a free list pointer in smfree_delayed.
-	 * Please don't change it without understanding
-	 * how smfree_delayed and snapshotting COW works.
-	 */
-	/** Snapshot generation version. */
-	uint32_t version;
-	struct tuple base;
-};
-
-/** Memtx slab arena */
-extern struct slab_arena memtx_arena; /* defined in memtx_engine.cc */
-/* Memtx slab_cache for tuples */
-static struct slab_cache memtx_slab_cache;
-/** Common quota for memtx tuples and indexes */
-static struct quota memtx_quota;
-/** Memtx tuple allocator */
-struct small_alloc memtx_alloc; /* used box box.slab.info() */
-/* The maximal allowed tuple size, box.cfg.memtx_max_tuple_size */
-size_t memtx_max_tuple_size = 1 * 1024 * 1024; /* set dynamically */
-uint32_t snapshot_version;
-
-enum {
-	/** Lowest allowed slab_alloc_minimal */
-	OBJSIZE_MIN = 16,
-	SLAB_SIZE = 16 * 1024 * 1024,
-};
-
-void
-memtx_tuple_init(uint64_t tuple_arena_max_size, uint32_t objsize_min,
-		 float alloc_factor)
-{
-	/* Apply lowest allowed objsize bounds */
-	if (objsize_min < OBJSIZE_MIN)
-		objsize_min = OBJSIZE_MIN;
-	/** Preallocate entire quota. */
-	quota_init(&memtx_quota, tuple_arena_max_size);
-	tuple_arena_create(&memtx_arena, &memtx_quota, tuple_arena_max_size,
-			   SLAB_SIZE, "memtx");
-	slab_cache_create(&memtx_slab_cache, &memtx_arena);
-	small_alloc_create(&memtx_alloc, &memtx_slab_cache,
-			   objsize_min, alloc_factor);
-}
-
-void
-memtx_tuple_free(void)
-{
-}
-
-struct tuple_format_vtab memtx_tuple_format_vtab = {
-	memtx_tuple_delete,
-	memtx_tuple_new,
-};
-
-struct tuple *
-memtx_tuple_new(struct tuple_format *format, const char *data, const char *end)
-{
-	assert(mp_typeof(*data) == MP_ARRAY);
-	size_t tuple_len = end - data;
-	size_t meta_size = tuple_format_meta_size(format);
-	size_t total = sizeof(struct memtx_tuple) + meta_size + tuple_len;
-
-	ERROR_INJECT(ERRINJ_TUPLE_ALLOC,
-		     do { diag_set(OutOfMemory, (unsigned) total,
-				   "slab allocator", "memtx_tuple"); return NULL; }
-		     while(false); );
-	if (unlikely(total > memtx_max_tuple_size)) {
-		diag_set(ClientError, ER_MEMTX_MAX_TUPLE_SIZE,
-			 (unsigned) total);
-		error_log(diag_last_error(diag_get()));
-		return NULL;
-	}
-
-	struct memtx_tuple *memtx_tuple =
-		(struct memtx_tuple *) smalloc(&memtx_alloc, total);
-	/**
-	 * Use a nothrow version and throw an exception here,
-	 * to throw an instance of ClientError. Apart from being
-	 * more nice to the user, ClientErrors are ignored in
-	 * force_recovery=true mode, allowing us to start
-	 * with lower arena than necessary in the circumstances
-	 * of disaster recovery.
-	 */
-	if (memtx_tuple == NULL) {
-		diag_set(OutOfMemory, (unsigned) total,
-				 "slab allocator", "memtx_tuple");
-		return NULL;
-	}
-	struct tuple *tuple = &memtx_tuple->base;
-	tuple->refs = 0;
-	memtx_tuple->version = snapshot_version;
-	assert(tuple_len <= UINT32_MAX); /* bsize is UINT32_MAX */
-	tuple->bsize = tuple_len;
-	tuple->format_id = tuple_format_id(format);
-	tuple_format_ref(format);
-	/*
-	 * Data offset is calculated from the begin of the struct
-	 * tuple base, not from memtx_tuple, because the struct
-	 * tuple is not the first field of the memtx_tuple.
-	 */
-	tuple->data_offset = sizeof(struct tuple) + meta_size;
-	char *raw = (char *) tuple + tuple->data_offset;
-	uint32_t *field_map = (uint32_t *) raw;
-	memcpy(raw, data, tuple_len);
-	if (tuple_init_field_map(format, field_map, raw)) {
-		memtx_tuple_delete(format, tuple);
-		return NULL;
-	}
-	say_debug("%s(%zu) = %p", __func__, tuple_len, memtx_tuple);
-	return tuple;
-}
-
-void
-memtx_tuple_delete(struct tuple_format *format, struct tuple *tuple)
-{
-	say_debug("%s(%p)", __func__, tuple);
-	assert(tuple->refs == 0);
-#ifndef NDEBUG
-	struct errinj *delay = errinj(ERRINJ_MEMTX_TUPLE_DELETE_DELAY,
-				      ERRINJ_DOUBLE);
-	if (delay != NULL && delay->dparam > 0) {
-		double start = clock_monotonic();
-		while (clock_monotonic() < start + delay->dparam)
-			/* busy loop */ ;
-	}
-#endif
-	size_t total = sizeof(struct memtx_tuple) +
-		       tuple_format_meta_size(format) + tuple->bsize;
-	tuple_format_unref(format);
-	struct memtx_tuple *memtx_tuple =
-		container_of(tuple, struct memtx_tuple, base);
-	if (memtx_alloc.free_mode != SMALL_DELAYED_FREE ||
-	    memtx_tuple->version == snapshot_version)
-		smfree(&memtx_alloc, memtx_tuple, total);
-	else
-		smfree_delayed(&memtx_alloc, memtx_tuple, total);
-}
-
-void
-memtx_tuple_begin_snapshot()
-{
-	snapshot_version++;
-	small_alloc_setopt(&memtx_alloc, SMALL_DELAYED_FREE_MODE, true);
-}
-
-void
-memtx_tuple_end_snapshot()
-{
-	small_alloc_setopt(&memtx_alloc, SMALL_DELAYED_FREE_MODE, false);
-}
diff --git a/src/box/memtx_tuple.h b/src/box/memtx_tuple.h
deleted file mode 100644
index e06fb63d..00000000
--- a/src/box/memtx_tuple.h
+++ /dev/null
@@ -1,100 +0,0 @@
-#ifndef INCLUDES_TARANTOOL_BOX_MEMTX_TUPLE_H
-#define INCLUDES_TARANTOOL_BOX_MEMTX_TUPLE_H
-/*
- * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file.
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- *    copyright notice, this list of conditions and the
- *    following disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- *    copyright notice, this list of conditions and the following
- *    disclaimer in the documentation and/or other materials
- *    provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY AUTHORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
- * AUTHORS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
- * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
- * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- */
-
-#include "diag.h"
-#include "tuple_format.h"
-#include "tuple.h"
-
-#if defined(__cplusplus)
-extern "C" {
-#endif /* defined(__cplusplus) */
-
-/** Memtx tuple allocator, available to statistics.  */
-extern struct small_alloc memtx_alloc;
-
-/**
- * Initialize memtx_tuple library
- */
-void
-memtx_tuple_init(uint64_t tuple_arena_max_size, uint32_t objsize_min,
-		 float alloc_factor);
-
-/**
- * Cleanup memtx_tuple library
- */
-void
-memtx_tuple_free(void);
-
-/** Create a tuple in the memtx engine format. @sa tuple_new(). */
-struct tuple *
-memtx_tuple_new(struct tuple_format *format, const char *data, const char *end);
-
-/**
- * Free the tuple of a memtx space.
- * @pre tuple->refs  == 0
- */
-void
-memtx_tuple_delete(struct tuple_format *format, struct tuple *tuple);
-
-/** Maximal allowed tuple size (box.cfg.memtx_max_tuple_size) */
-extern size_t memtx_max_tuple_size;
-
-/** tuple format vtab for memtx engine. */
-extern struct tuple_format_vtab memtx_tuple_format_vtab;
-
-void
-memtx_tuple_begin_snapshot();
-
-void
-memtx_tuple_end_snapshot();
-
-#if defined(__cplusplus)
-}
-
-/**
- * Create a tuple in the memtx engine format. Throw an exception
- * if an error occured. @sa memtx_tuple_new().
- */
-static inline struct tuple *
-memtx_tuple_new_xc(struct tuple_format *format, const char *data,
-		   const char *end)
-{
-	struct tuple *res = memtx_tuple_new(format, data, end);
-	if (res == NULL)
-		diag_raise();
-	return res;
-}
-
-#endif /* defined(__cplusplus) */
-
-#endif
-- 
2.11.0




More information about the Tarantool-patches mailing list