[Tarantool-patches] [PATCH v2 4/5] Implement system allocator, based on malloc
mechanik20051988
mechanik20051988 at tarantool.org
Wed Jan 20 10:53:42 MSK 2021
Slab allocator, which is used for tuples allocation,
has a certain disadvantage - it tends to unresolvable
fragmentation on certain workloads (size migration).
In this case user should be able to choose other allocator.
System allocator based on malloc function, but restricted
by the same qouta as slab allocator. System allocator
does not alloc all memory at start, istead, it allocate
memory as needed, checking that quota is not exceeded.
---
src/box/CMakeLists.txt | 2 +
src/box/lua/slab.cc | 44 ++++++--
src/box/memtx_engine.cc | 20 ++++
src/box/memtx_engine.h | 1 +
src/box/memtx_space.cc | 6 +-
src/box/sysalloc.c | 210 ++++++++++++++++++++++++++++++++++++
src/box/sysalloc.h | 145 +++++++++++++++++++++++++
src/box/system_allocator.cc | 68 ++++++++++++
src/box/system_allocator.h | 54 ++++++++++
9 files changed, 541 insertions(+), 9 deletions(-)
create mode 100644 src/box/sysalloc.c
create mode 100644 src/box/sysalloc.h
create mode 100644 src/box/system_allocator.cc
create mode 100644 src/box/system_allocator.h
diff --git a/src/box/CMakeLists.txt b/src/box/CMakeLists.txt
index aebf76bd4..55fb14d0a 100644
--- a/src/box/CMakeLists.txt
+++ b/src/box/CMakeLists.txt
@@ -130,6 +130,8 @@ add_library(box STATIC
memtx_engine.cc
memtx_space.cc
small_allocator.cc
+ system_allocator.cc
+ sysalloc.c
sysview.c
blackhole.c
service_engine.c
diff --git a/src/box/lua/slab.cc b/src/box/lua/slab.cc
index 4b247885f..c4fdd35c4 100644
--- a/src/box/lua/slab.cc
+++ b/src/box/lua/slab.cc
@@ -44,14 +44,18 @@
#include "box/engine.h"
#include "box/memtx_engine.h"
#include "box/small_allocator.h"
-
-static int
-small_stats_noop_cb(const struct mempool_stats *stats, void *cb_ctx)
-{
- (void) stats;
- (void) cb_ctx;
- return 0;
+#include "box/system_allocator.h"
+
+#define STATS_NOOP_CB(allocator, cb_stats) \
+static int \
+allocator##_stats_noop_cb(const struct cb_stats *stats, void *cb_ctx) \
+{ \
+ (void) stats; \
+ (void) cb_ctx; \
+ return 0; \
}
+STATS_NOOP_CB(small, mempool_stats)
+STATS_NOOP_CB(system, system_stats)
static int
small_stats_lua_cb(const struct mempool_stats *stats, void *cb_ctx)
@@ -103,6 +107,25 @@ small_stats_lua_cb(const struct mempool_stats *stats, void *cb_ctx)
return 0;
}
+static int
+system_stats_lua_cb(const struct system_stats *stats, void *cb_ctx)
+{
+ struct lua_State *L = (struct lua_State *) cb_ctx;
+ lua_pushnumber(L, lua_objlen(L, -1) + 1);
+ lua_newtable(L);
+ luaL_setmaphint(L, -1);
+ lua_pushstring(L, "mem_used");
+ luaL_pushuint64(L, stats->used);
+ lua_settable(L, -3);
+
+ lua_pushstring(L, "mem_free");
+ luaL_pushuint64(L, stats->total - stats->used);
+ lua_settable(L, -3);
+ lua_settable(L, -3);
+ return 0;
+}
+
+
template <class allocator_stats, class cb_stats, class Allocator,
int (*stats_cb)(const cb_stats *stats, void *cb_ctx)>
static int
@@ -120,7 +143,7 @@ lbox_slab_stats(struct lua_State *L)
Allocator::stats(&totals, stats_cb, L);
struct mempool_stats index_stats;
mempool_stats(&memtx->index_extent_pool, &index_stats);
- stats_cb(&index_stats, L);
+ small_stats_lua_cb(&index_stats, L);
return 1;
}
@@ -282,6 +305,11 @@ box_lua_slab_init(struct lua_State *L)
struct mempool_stats, SmallAllocator,
small_stats_noop_cb, small_stats_lua_cb>(L);
break;
+ case MEMTX_SYSTEM_ALLOCATOR:
+ box_lua_slab_init<struct system_stats,
+ struct system_stats, SystemAllocator,
+ system_stats_noop_cb, system_stats_lua_cb>(L);
+ break;
default:
;
}
diff --git a/src/box/memtx_engine.cc b/src/box/memtx_engine.cc
index dc1632d69..ad904889a 100644
--- a/src/box/memtx_engine.cc
+++ b/src/box/memtx_engine.cc
@@ -51,6 +51,7 @@
#include "gc.h"
#include "raft.h"
#include "small_allocator.h"
+#include "system_allocator.h"
/* sync snapshot every 16MB */
#define SNAP_SYNC_INTERVAL (1 << 24)
@@ -155,6 +156,8 @@ memtx_engine_shutdown(struct engine *engine)
case MEMTX_SMALL_ALLOCATOR:
SmallAllocator::destroy();
break;
+ case MEMTX_SYSTEM_ALLOCATOR:
+ SystemAllocator::destroy();
default:
;
}
@@ -994,6 +997,14 @@ small_stats_noop_cb(const struct mempool_stats *stats, void *cb_ctx)
return 0;
}
+static int
+system_stats_noop_cb(const struct system_stats *stats, void *cb_ctx)
+{
+ (void)stats;
+ (void)cb_ctx;
+ return 0;
+}
+
template <class allocator_stats, class cb_stats, class Allocator,
int (*stats_cb)(const cb_stats *stats, void *cb_ctx)>
static void
@@ -1164,6 +1175,9 @@ memtx_engine_new(const char *snap_dirname, bool force_recovery,
say_info("Actual slab_alloc_factor calculated on the basis of desired "
"slab_alloc_factor = %f", actual_alloc_factor);
break;
+ case MEMTX_SYSTEM_ALLOCATOR:
+ SystemAllocator::create(&memtx->arena);
+ break;
default:
;
}
@@ -1235,6 +1249,9 @@ memtx_enter_delayed_free_mode(struct memtx_engine *memtx)
case MEMTX_SMALL_ALLOCATOR:
SmallAllocator::enter_delayed_free_mode();
break;
+ case MEMTX_SYSTEM_ALLOCATOR:
+ SystemAllocator::enter_delayed_free_mode();
+ break;
default:
;
}
@@ -1250,6 +1267,9 @@ memtx_leave_delayed_free_mode(struct memtx_engine *memtx)
case MEMTX_SMALL_ALLOCATOR:
SmallAllocator::leave_delayed_free_mode();
break;
+ case MEMTX_SYSTEM_ALLOCATOR:
+ SystemAllocator::leave_delayed_free_mode();
+ break;
default:
;
}
diff --git a/src/box/memtx_engine.h b/src/box/memtx_engine.h
index 1bdc78d7c..ba2394462 100644
--- a/src/box/memtx_engine.h
+++ b/src/box/memtx_engine.h
@@ -101,6 +101,7 @@ enum memtx_reserve_extents_num {
enum memtx_allocator_type {
MEMTX_SMALL_ALLOCATOR,
+ MEMTX_SYSTEM_ALLOCATOR,
};
/**
diff --git a/src/box/memtx_space.cc b/src/box/memtx_space.cc
index 932b3af16..df0a7021e 100644
--- a/src/box/memtx_space.cc
+++ b/src/box/memtx_space.cc
@@ -1170,8 +1170,8 @@ memtx_space_prepare_alter(struct space *old_space, struct space *new_space)
/* }}} DDL */
-struct SmallAllocator;
#define MEMTX_SPACE_VTAB(Allocator, allocator) \
+struct Allocator; \
static const struct space_vtab memtx_space_vtab_##allocator = { \
/* .destroy = */ memtx_space_destroy, \
/* .bsize = */ memtx_space_bsize, \
@@ -1195,6 +1195,7 @@ static const struct space_vtab memtx_space_vtab_##allocator = { \
/* .invalidate = */ generic_space_invalidate, \
};
MEMTX_SPACE_VTAB(SmallAllocator, small)
+MEMTX_SPACE_VTAB(SystemAllocator, system)
struct space *
memtx_space_new(struct memtx_engine *memtx,
@@ -1231,6 +1232,9 @@ memtx_space_new(struct memtx_engine *memtx,
case MEMTX_SMALL_ALLOCATOR:
vtab = &memtx_space_vtab_small;
break;
+ case MEMTX_SYSTEM_ALLOCATOR:
+ vtab = &memtx_space_vtab_system;
+ break;
default:
tuple_format_unref(format);
free(memtx_space);
diff --git a/src/box/sysalloc.c b/src/box/sysalloc.c
new file mode 100644
index 000000000..40dd067ad
--- /dev/null
+++ b/src/box/sysalloc.c
@@ -0,0 +1,210 @@
+/*
+ * Copyright 2010-2020, Tarantool AUTHORS, please see AUTHORS file.
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the
+ * following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY <COPYRIGHT HOLDER> ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
+ * <COPYRIGHT HOLDER> OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
+ * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+#include "sysalloc.h"
+
+#include <small/slab_arena.h>
+#include <small/rlist.h>
+
+#if TARGET_OS_DARWIN
+#include <malloc/malloc.h>
+static inline size_t
+portable_malloc_usable_size(void *p)
+{
+ return malloc_size(p);
+}
+#elif (TARGET_OS_FREEBSD || TARGET_OS_NETBSD || TARGET_OS_OPENBSD)
+#include <malloc_np.h>
+static inline size_t
+portable_malloc_usable_size(void *p)
+{
+ return malloc_usable_size(p);
+}
+#elif TARGET_OS_LINUX
+#include <malloc.h>
+static inline size_t
+portable_malloc_usable_size(void *p)
+{
+ return malloc_usable_size(p);
+}
+#else
+#error "Undefined system type"
+#endif
+
+static RLIST_HEAD(alloc_list);
+
+static inline void
+system_collect_garbage(struct system_alloc *alloc)
+{
+ if (alloc->free_mode != SYSTEM_COLLECT_GARBAGE)
+ return;
+
+ const int BATCH = 100;
+ if (!lifo_is_empty(&alloc->delayed)) {
+ for (int i = 0; i < BATCH; i++) {
+ void *item = lifo_pop(&alloc->delayed);
+ if (item == NULL)
+ break;
+ sysfree(alloc, item, 0 /* unused parameter */);
+ }
+ } else {
+ /* Finish garbage collection and switch to regular mode */
+ alloc->free_mode = SYSTEM_FREE;
+ }
+}
+
+void
+system_alloc_setopt(struct system_alloc *alloc, enum system_opt opt, bool val)
+{
+ switch (opt) {
+ case SYSTEM_DELAYED_FREE_MODE:
+ alloc->free_mode = val ? SYSTEM_DELAYED_FREE :
+ SYSTEM_COLLECT_GARBAGE;
+ break;
+ default:
+ assert(false);
+ break;
+ }
+}
+
+void
+system_stats(struct system_alloc *alloc, struct system_stats *totals,
+ system_stats_cb cb, void *cb_ctx)
+{
+ totals->used = pm_atomic_load_explicit(&alloc->used_bytes,
+ pm_memory_order_relaxed);
+ totals->total = quota_total(alloc->quota);
+ cb(totals, cb_ctx);
+}
+
+void
+system_alloc_create(struct system_alloc *alloc, struct slab_arena *arena)
+{
+ alloc->used_bytes = 0;
+ alloc->arena_bytes = 0;
+ alloc->arena = arena;
+ alloc->quota = arena->quota;
+ lifo_init(&alloc->delayed);
+ alloc->allocator_thread = pthread_self();
+}
+
+void
+system_alloc_destroy(struct system_alloc *alloc)
+{
+ assert(alloc->allocator_thread == pthread_self());
+ struct rlist *item, *tmp;
+ for (item = alloc_list.next; (item != &alloc_list) &&
+ (tmp = item->next); item = tmp)
+ sysfree(alloc, ((void *)item) + sizeof(struct rlist), (~0lu));
+ assert(alloc->used_bytes == 0);
+ uint32_t units = alloc->arena_bytes / alloc->arena->slab_size;
+ pm_atomic_fetch_sub(&alloc->arena->used,
+ units * alloc->arena->slab_size);
+}
+
+void
+sysfree(struct system_alloc *alloc, void *ptr, size_t bytes)
+{
+ assert(alloc->allocator_thread == pthread_self());
+ ptr -= sizeof(struct rlist);
+ size_t size = portable_malloc_usable_size(ptr);
+ uint32_t s = size % QUOTA_UNIT_SIZE, units = size / QUOTA_UNIT_SIZE;
+ size_t used_bytes = pm_atomic_fetch_sub(&alloc->used_bytes, size);
+ if (small_align(used_bytes, QUOTA_UNIT_SIZE) >
+ small_align(used_bytes - s, QUOTA_UNIT_SIZE))
+ units++;
+ if (units > 0)
+ quota_release(alloc->quota, units * QUOTA_UNIT_SIZE);
+ pm_atomic_fetch_add(&alloc->arena_bytes, size);
+ if (bytes != (~0lu))
+ rlist_del((struct rlist *)ptr);
+ free(ptr);
+}
+
+void
+sysfree_delayed(struct system_alloc *alloc, void *ptr, size_t bytes)
+{
+ assert(alloc->allocator_thread == pthread_self());
+ if (alloc->free_mode == SYSTEM_DELAYED_FREE && ptr) {
+ lifo_push(&alloc->delayed, ptr);
+ } else {
+ sysfree(alloc, ptr, bytes);
+ }
+}
+
+void *
+sysalloc(struct system_alloc *alloc, size_t bytes)
+{
+ assert(alloc->allocator_thread == pthread_self());
+ system_collect_garbage(alloc);
+
+ void *ptr = malloc(sizeof(struct rlist) + bytes);
+ if (!ptr)
+ return NULL;
+ size_t size = portable_malloc_usable_size(ptr);
+ uint32_t s = size % QUOTA_UNIT_SIZE, units = size / QUOTA_UNIT_SIZE;
+ while (1) {
+ size_t used_bytes = pm_atomic_load(&alloc->used_bytes);
+ if (small_align(used_bytes, QUOTA_UNIT_SIZE) <
+ small_align(used_bytes + s, QUOTA_UNIT_SIZE))
+ units++;
+ if (units > 0) {
+ if (quota_use(alloc->quota,
+ units * QUOTA_UNIT_SIZE) < 0) {
+ free(ptr);
+ return NULL;
+ }
+ }
+ if (pm_atomic_compare_exchange_strong(&alloc->used_bytes,
+ &used_bytes, used_bytes + size))
+ break;
+ if (units > 0)
+ quota_release(alloc->quota, units * QUOTA_UNIT_SIZE);
+ }
+
+ size_t arena_bytes;
+ do {
+ while (size > (arena_bytes = pm_atomic_load(&alloc->arena_bytes))) {
+ uint32_t units = (size - arena_bytes) /
+ alloc->arena->slab_size + 1;
+ if (!pm_atomic_compare_exchange_strong(&alloc->arena_bytes,
+ &arena_bytes, arena_bytes +
+ units * alloc->arena->slab_size))
+ continue;
+ pm_atomic_fetch_add(&alloc->arena->used,
+ units * alloc->arena->slab_size);
+ }
+ } while (!pm_atomic_compare_exchange_strong(&alloc->arena_bytes,
+ &arena_bytes, arena_bytes - size));
+
+ rlist_add_tail(&alloc_list, (struct rlist *)ptr);
+ return ptr + sizeof(struct rlist);
+}
+
diff --git a/src/box/sysalloc.h b/src/box/sysalloc.h
new file mode 100644
index 000000000..bd906c8cf
--- /dev/null
+++ b/src/box/sysalloc.h
@@ -0,0 +1,145 @@
+#pragma once
+/*
+ * Copyright 2010-2020, Tarantool AUTHORS, please see AUTHORS file.
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the
+ * following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY <COPYRIGHT HOLDER> ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
+ * <COPYRIGHT HOLDER> OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
+ * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+#include <pthread.h>
+#include <trivia/util.h>
+#include <trivia/config.h>
+#include <small/slab_arena.h>
+#include <small/quota.h>
+#include <small/lifo.h>
+
+#if defined(__cplusplus)
+extern "C" {
+#endif /* defined(__cplusplus) */
+
+enum system_opt {
+ SYSTEM_DELAYED_FREE_MODE
+};
+
+/**
+ * Free mode
+ */
+enum system_free_mode {
+ /** Free objects immediately. */
+ SYSTEM_FREE,
+ /** Collect garbage after delayed free. */
+ SYSTEM_COLLECT_GARBAGE,
+ /** Postpone deletion of objects. */
+ SYSTEM_DELAYED_FREE,
+};
+
+struct system_alloc {
+ /**
+ * Bytes allocated by system allocator
+ */
+ uint64_t used_bytes;
+ /**
+ * Arena free bytes
+ */
+ uint64_t arena_bytes;
+ /**
+ * Allocator arena
+ */
+ struct slab_arena *arena;
+ /**
+ * Allocator quota
+ */
+ struct quota *quota;
+ /**
+ * Free mode.
+ */
+ enum system_free_mode free_mode;
+ /**
+ * List of pointers for delayed free.
+ */
+ struct lifo delayed;
+ /**
+ * Allocator thread
+ */
+ pthread_t allocator_thread;
+};
+
+struct system_stats {
+ size_t used;
+ size_t total;
+};
+
+typedef int (*system_stats_cb)(const struct system_stats *stats,
+ void *cb_ctx);
+
+/** Initialize a system memory allocator. */
+void
+system_alloc_create(struct system_alloc *alloc, struct slab_arena *arena);
+
+/**
+ * Enter or leave delayed mode - in delayed mode sysfree_delayed()
+ * doesn't free memory but puts them into a list, for futher deletion.
+ */
+ void
+system_alloc_setopt(struct system_alloc *alloc, enum system_opt opt, bool val);
+
+/**
+ * Destroy the allocator, the destruction of
+ * all allocated memory is on the user's conscience.
+ */
+void
+system_alloc_destroy(struct system_alloc *alloc);
+
+/**
+ * Allocate memory in the system allocator, using malloc.
+ */
+void *
+sysalloc(struct system_alloc *alloc, size_t bytes);
+
+/**
+ * Free memory in the system allocator, using feee.
+ */
+void
+sysfree(struct system_alloc *alloc, void *ptr, MAYBE_UNUSED size_t bytes);
+
+/**
+ * Free memory allocated by the system allocator
+ * if not in snapshot mode, otherwise put to the delayed
+ * free list.
+ */
+void
+sysfree_delayed(struct system_alloc *alloc, void *ptr, size_t bytes);
+
+/**
+ * Get system allocator statistic
+ */
+void
+system_stats(struct system_alloc *alloc, struct system_stats *totals,
+ system_stats_cb cb, void *cb_ctx);
+
+#if defined(__cplusplus)
+} /* extern "C" */
+#endif /* defined(__cplusplus) */
diff --git a/src/box/system_allocator.cc b/src/box/system_allocator.cc
new file mode 100644
index 000000000..8b9e3114b
--- /dev/null
+++ b/src/box/system_allocator.cc
@@ -0,0 +1,68 @@
+/*
+ * Copyright 2010-2020, Tarantool AUTHORS, please see AUTHORS file.
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the
+ * following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY <COPYRIGHT HOLDER> ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
+ * <COPYRIGHT HOLDER> OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
+ * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+#include "system_allocator.h"
+
+void
+SystemAllocator::create(struct slab_arena *arena)
+{
+ system_alloc_create(&system_alloc, arena);
+}
+
+void
+SystemAllocator::destroy(void)
+{
+ system_alloc_destroy(&system_alloc);
+}
+
+void
+SystemAllocator::enter_delayed_free_mode(void)
+{
+ system_alloc_setopt(&system_alloc, SYSTEM_DELAYED_FREE_MODE, true);
+}
+
+void
+SystemAllocator::leave_delayed_free_mode(void)
+{
+ system_alloc_setopt(&system_alloc, SYSTEM_DELAYED_FREE_MODE, false);
+}
+
+void
+SystemAllocator::stats(struct system_stats *stats, system_stats_cb cb, void *cb_ctx)
+{
+ system_stats(&system_alloc, stats, cb, cb_ctx);
+}
+
+void
+SystemAllocator::memory_check(void)
+{
+}
+
+struct system_alloc SystemAllocator::system_alloc;
diff --git a/src/box/system_allocator.h b/src/box/system_allocator.h
new file mode 100644
index 000000000..aed4e0fc9
--- /dev/null
+++ b/src/box/system_allocator.h
@@ -0,0 +1,54 @@
+#pragma once
+/*
+ * Copyright 2010-2020, Tarantool AUTHORS, please see AUTHORS file.
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the
+ * following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY <COPYRIGHT HOLDER> ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
+ * <COPYRIGHT HOLDER> OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
+ * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+#include "sysalloc.h"
+
+struct SystemAllocator
+{
+ static void create(struct slab_arena *arena);
+ static void destroy(void);
+ static void enter_delayed_free_mode(void);
+ static void leave_delayed_free_mode(void);
+ static void stats(struct system_stats *stats, system_stats_cb cb, void *cb_ctx);
+ static void memory_check(void);
+ static inline void *alloc(size_t size) {
+ return sysalloc(&system_alloc, size);
+ };
+ static inline void free(void *ptr, size_t size) {
+ sysfree(&system_alloc, ptr, size);
+ }
+ static inline void free_delayed(void *ptr, size_t size) {
+ sysfree_delayed(&system_alloc, ptr, size);
+ }
+
+ /** Tuple allocator. */
+ static struct system_alloc system_alloc;
+};
--
2.20.1
More information about the Tarantool-patches
mailing list