From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mail-lf1-f45.google.com (mail-lf1-f45.google.com [209.85.167.45]) (using TLSv1.2 with cipher ECDHE-RSA-AES128-GCM-SHA256 (128/128 bits)) (No client certificate requested) by dev.tarantool.org (Postfix) with ESMTPS id D9F1645C304 for ; Fri, 18 Dec 2020 17:11:06 +0300 (MSK) Received: by mail-lf1-f45.google.com with SMTP id 23so5693521lfg.10 for ; Fri, 18 Dec 2020 06:11:06 -0800 (PST) From: mechanik20051988 Date: Fri, 18 Dec 2020 17:11:03 +0300 Message-Id: <20201218141103.29488-1-mechanik20051988@gmail.com> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit Subject: [Tarantool-patches] [PATCH] Add new 'allocator' option in box.cfg (cpp version) List-Id: Tarantool development patches List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , To: tarantool-patches@dev.tarantool.org, Vladislav Shpilevoy Cc: mechanik20051988 From: mechanik20051988 Branch: https://github.com/tarantool/tarantool/tree/mechanik20051988/gh-5419-choose-allocator-for-memtx-cpp Issue: https://github.com/tarantool/tarantool/issues/5419 Slab allocator, which is used for tuples allocation, has a certan disadvantage - it tends to unresolvable fragmentation on certain workloads (size migration). New option allows to select the appropriate allocator if necessary. @TarantoolBot document Title: Add new 'allocator' option Add new 'allocator' option which allows to select the appropriate allocator for memtx tuples if necessary. Use box.cfg{allocator="small"} or no option to use default small allocator, use box.cfg{allocator="system"} to use libc malloc. Closes #5419 --- CMakeLists.txt | 11 + perf/allocator_perf.test.lua | 34 +++ src/box/CMakeLists.txt | 4 +- src/box/allocator.h | 162 +++++++++++++++ src/box/box.cc | 1 + src/box/field_map.h | 8 + src/box/lua/load_cfg.lua | 2 + src/box/lua/{slab.c => slab.cc} | 39 +++- src/box/{memtx_engine.c => memtx_engine.cc} | 193 +++++++++++++---- src/box/memtx_engine.h | 41 ++-- src/box/memtx_hash.c | 4 +- src/box/memtx_space.c | 12 +- src/box/memtx_tree.cc | 4 +- src/box/system_allocator.h | 218 ++++++++++++++++++++ src/trivia/config.h.cmake | 3 + test/app-tap/init_script.result | 1 + test/box/admin.result | 4 +- test/box/cfg.result | 8 +- test/box/choose_memtx_allocator.lua | 9 + test/box/choose_memtx_allocator.result | 135 ++++++++++++ test/box/choose_memtx_allocator.test.lua | 43 ++++ 21 files changed, 856 insertions(+), 80 deletions(-) create mode 100755 perf/allocator_perf.test.lua create mode 100644 src/box/allocator.h rename src/box/lua/{slab.c => slab.cc} (88%) rename src/box/{memtx_engine.c => memtx_engine.cc} (87%) create mode 100644 src/box/system_allocator.h create mode 100644 test/box/choose_memtx_allocator.lua create mode 100644 test/box/choose_memtx_allocator.result create mode 100644 test/box/choose_memtx_allocator.test.lua diff --git a/CMakeLists.txt b/CMakeLists.txt index fa6818f8e..290cd535a 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -92,6 +92,17 @@ check_symbol_exists(posix_fadvise fcntl.h HAVE_POSIX_FADVISE) check_symbol_exists(fallocate fcntl.h HAVE_FALLOCATE) check_symbol_exists(mremap sys/mman.h HAVE_MREMAP) +check_function_exists(malloc_usable_size HAVE_MALLOC_USABLE_SIZE) +check_symbol_exists(malloc_size malloc/malloc.h HAVE_MALLOC_SIZE_DARWIN) + +if (HAVE_MALLOC_USABLE_SIZE) + if (TARGET_OS_LINUX) + set(HAVE_MALLOC_USABLE_SIZE_LINUX 1) + else () + set(HAVE_MALLOC_USABLE_SIZE_BSD 1) + endif () +endif () + check_function_exists(sync_file_range HAVE_SYNC_FILE_RANGE) check_function_exists(memmem HAVE_MEMMEM) check_function_exists(memrchr HAVE_MEMRCHR) diff --git a/perf/allocator_perf.test.lua b/perf/allocator_perf.test.lua new file mode 100755 index 000000000..be270379b --- /dev/null +++ b/perf/allocator_perf.test.lua @@ -0,0 +1,34 @@ +#!/usr/bin/env ../src/tarantool +os.execute('rm -rf *.snap *.xlog *.vylog ./512 ./513 ./514 ./515 ./516 ./517 ./518 ./519 ./520 ./521') +local clock = require('clock') +box.cfg{listen = 3301, wal_mode='none', allocator=arg[1]} +local space = box.schema.space.create('test') +space:format({ {name = 'id', type = 'unsigned'}, {name = 'year', type = 'unsigned'} }) +space:create_index('primary', { parts = {'id'} }) +local time_insert = 0 +local time_replace = 0 +local time_delete = 0 +local cnt = 0 +local cnt_max = 20 +local op_max = 2500000 +local nanosec = 1.0e9 +while cnt < cnt_max do + cnt = cnt + 1 + local time_before = clock.monotonic64() + for key = 1, op_max do space:insert({key, key + 1000}) end + local time_after = clock.monotonic64() + time_insert = time_insert + (time_after - time_before) + time_before = clock.monotonic64() + for key = 1, op_max do space:replace({key, key + 5000}) end + time_after = clock.monotonic64() + time_replace = time_replace + (time_after - time_before) + time_before = clock.monotonic64() + for key = 1, op_max do space:delete(key) end + time_after = clock.monotonic64() + time_delete = time_delete + (time_after - time_before) +end +io.write("{\n") +io.write(string.format(" \"alloc time\": \"%.3f\"\n", tonumber(time_insert) / (nanosec * cnt_max))) +io.write(string.format(" \"replace time\": \"%.3f\"\n", tonumber(time_replace) / (nanosec * cnt_max))) +io.write(string.format(" \"delete time\": \"%.3f\"\n}\n", tonumber(time_delete) / (nanosec * cnt_max))) +os.exit() diff --git a/src/box/CMakeLists.txt b/src/box/CMakeLists.txt index 19203f770..ba8c4248a 100644 --- a/src/box/CMakeLists.txt +++ b/src/box/CMakeLists.txt @@ -127,7 +127,7 @@ add_library(box STATIC memtx_bitset.c memtx_tx.c engine.c - memtx_engine.c + memtx_engine.cc memtx_space.c sysview.c blackhole.c @@ -197,7 +197,7 @@ add_library(box STATIC lua/console.c lua/serialize_lua.c lua/tuple.c - lua/slab.c + lua/slab.cc lua/index.c lua/space.cc lua/sequence.c diff --git a/src/box/allocator.h b/src/box/allocator.h new file mode 100644 index 000000000..c40afe6d2 --- /dev/null +++ b/src/box/allocator.h @@ -0,0 +1,162 @@ +#pragma once +/* + * Copyright 2010-2020, Tarantool AUTHORS, please see AUTHORS file. + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * 1. Redistributions of source code must retain the above + * copyright notice, this list of conditions and the + * following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL + * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, + * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF + * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ +#include +#include + +#include "system_allocator.h" + +enum memtx_allocator_type { + SMALL_ALLOCATOR_TYPE = 0, + SYSTEM_ALLOCATOR_TYPE, +}; + +struct allocator_stats { + size_t used; + size_t total; +}; + +struct SmallAlloc +{ + /* Tuple allocator */ + static struct small_alloc small_alloc; + /** Slab cache for allocating tuples. */ + static struct slab_cache slab_cache; + static inline void + init(struct slab_arena *arena, uint32_t objsize_min, float alloc_factor) + { + slab_cache_create(&slab_cache, arena); + small_alloc_create(&small_alloc, &slab_cache, objsize_min, alloc_factor); + } + static inline void + destroy() + { + small_alloc_destroy(&small_alloc); + slab_cache_destroy(&slab_cache); + } + static inline void * + alloc(size_t size) + { + return smalloc(&small_alloc, size); + } + static inline void + free(void *ptr, size_t size) + { + return smfree(&small_alloc, ptr, size); + } + static inline void + free_delayed(void *ptr, size_t size) + { + return smfree_delayed(&small_alloc, ptr, size); + } + static inline void + enter_delayed_free_mode() + { + return small_alloc_setopt(&small_alloc, SMALL_DELAYED_FREE_MODE, true); + } + static inline void + leave_delayed_free_mode() + { + return small_alloc_setopt(&small_alloc, SMALL_DELAYED_FREE_MODE, false); + } + static inline void + mem_check() + { + return slab_cache_check(&slab_cache); + } + static inline void + stats(struct allocator_stats *stats, va_list argptr) + { + mempool_stats_cb stats_cb = va_arg(argptr, mempool_stats_cb); + void *cb_ctx = va_arg(argptr, void *); + + struct small_stats data_stats; + small_stats(&small_alloc, &data_stats, stats_cb, cb_ctx); + stats->used = data_stats.used; + stats->total = data_stats.total; + } +}; +extern struct SmallAlloc small_alloc; + +struct SystemAlloc +{ + /* Tuple allocator */ + static struct system_alloc system_alloc; + static inline void + init(struct quota *quota) + { + system_alloc_create(&system_alloc, quota); + } + static inline void + destroy() + { + system_alloc_destroy(&system_alloc); + } + static inline void * + alloc(size_t size) + { + return sysalloc(&system_alloc, size); + } + static inline void + free(void *ptr, size_t size) + { + return sysfree(&system_alloc, ptr, size); + } + static inline void + free_delayed(void *ptr, size_t size) + { + return sysfree_delayed(&system_alloc, ptr, size); + } + static inline void + enter_delayed_free_mode() + { + return system_alloc_setopt(&system_alloc, SYSTEM_DELAYED_FREE_MODE, true); + } + static inline void + leave_delayed_free_mode() + { + return system_alloc_setopt(&system_alloc, SYSTEM_DELAYED_FREE_MODE, false); + } + static inline void + mem_check() + { + + } + static inline void + stats(struct allocator_stats *stats, MAYBE_UNUSED va_list argptr) + { + struct system_stats data_stats; + system_stats(&system_alloc, &data_stats); + stats->used = data_stats.used; + stats->total = data_stats.total; + } +}; +extern struct SystemAlloc system_alloc; \ No newline at end of file diff --git a/src/box/box.cc b/src/box/box.cc index a8bc3471d..66f6030df 100644 --- a/src/box/box.cc +++ b/src/box/box.cc @@ -2250,6 +2250,7 @@ engine_init() cfg_getd("memtx_memory"), cfg_geti("memtx_min_tuple_size"), cfg_geti("strip_core"), + cfg_gets("allocator"), cfg_getd("slab_alloc_factor")); engine_register((struct engine *)memtx); box_set_memtx_max_tuple_size(); diff --git a/src/box/field_map.h b/src/box/field_map.h index d8ef726a1..5087d25e5 100644 --- a/src/box/field_map.h +++ b/src/box/field_map.h @@ -35,6 +35,10 @@ #include #include "bit/bit.h" +#if defined(__cplusplus) +extern "C" { +#endif /* defined(__cplusplus) */ + struct region; struct field_map_builder_slot; @@ -257,4 +261,8 @@ field_map_build_size(struct field_map_builder *builder) void field_map_build(struct field_map_builder *builder, char *buffer); +#if defined(__cplusplus) +} /* extern "C" */ +#endif /* defined(__plusplus) */ + #endif /* TARANTOOL_BOX_FIELD_MAP_H_INCLUDED */ diff --git a/src/box/lua/load_cfg.lua b/src/box/lua/load_cfg.lua index 770442052..817b8dbd5 100644 --- a/src/box/lua/load_cfg.lua +++ b/src/box/lua/load_cfg.lua @@ -43,6 +43,7 @@ local default_cfg = { memtx_min_tuple_size = 16, memtx_max_tuple_size = 1024 * 1024, slab_alloc_factor = 1.05, + allocator = "small", work_dir = nil, memtx_dir = ".", wal_dir = ".", @@ -123,6 +124,7 @@ local template_cfg = { memtx_min_tuple_size = 'number', memtx_max_tuple_size = 'number', slab_alloc_factor = 'number', + allocator = 'string', work_dir = 'string', memtx_dir = 'string', wal_dir = 'string', diff --git a/src/box/lua/slab.c b/src/box/lua/slab.cc similarity index 88% rename from src/box/lua/slab.c rename to src/box/lua/slab.cc index 9f5e7e95c..d44e77e94 100644 --- a/src/box/lua/slab.c +++ b/src/box/lua/slab.cc @@ -43,6 +43,7 @@ #include "memory.h" #include "box/engine.h" #include "box/memtx_engine.h" +#include "box/allocator.h" static int small_stats_noop_cb(const struct mempool_stats *stats, void *cb_ctx) @@ -108,13 +109,28 @@ lbox_slab_stats(struct lua_State *L) struct memtx_engine *memtx; memtx = (struct memtx_engine *)engine_by_name("memtx"); - struct small_stats totals; + struct allocator_stats totals; lua_newtable(L); /* * List all slabs used for tuples and slabs used for * indexes, with their stats. */ - small_stats(&memtx->alloc, &totals, small_stats_lua_cb, L); + if (memtx->type == SMALL_ALLOCATOR_TYPE) { + g_memtx_allocator_stats(&totals, small_stats_lua_cb, L); + } else { + g_memtx_allocator_stats(&totals); + lua_pushnumber(L, lua_objlen(L, -1) + 1); + lua_newtable(L); + luaL_setmaphint(L, -1); + lua_pushstring(L, "mem_used"); + luaL_pushuint64(L, totals.used); + lua_settable(L, -3); + + lua_pushstring(L, "mem_free"); + luaL_pushuint64(L, totals.total - totals.used); + lua_settable(L, -3); + lua_settable(L, -3); + } struct mempool_stats index_stats; mempool_stats(&memtx->index_extent_pool, &index_stats); small_stats_lua_cb(&index_stats, L); @@ -128,14 +144,21 @@ lbox_slab_info(struct lua_State *L) struct memtx_engine *memtx; memtx = (struct memtx_engine *)engine_by_name("memtx"); - struct small_stats totals; + struct allocator_stats totals; + bool is_small_alloc; /* * List all slabs used for tuples and slabs used for * indexes, with their stats. */ lua_newtable(L); - small_stats(&memtx->alloc, &totals, small_stats_noop_cb, L); + if (memtx->type == SMALL_ALLOCATOR_TYPE) { + g_memtx_allocator_stats(&totals, small_stats_noop_cb, L); + is_small_alloc = true; + } else { + g_memtx_allocator_stats(&totals); + is_small_alloc = false; + } struct mempool_stats index_stats; mempool_stats(&memtx->index_extent_pool, &index_stats); @@ -187,10 +210,10 @@ lbox_slab_info(struct lua_State *L) * data (tuples and indexes). */ lua_pushstring(L, "arena_used"); - luaL_pushuint64(L, totals.used + index_stats.totals.used); + luaL_pushuint64(L, (is_small_alloc ? totals.used : 0) + index_stats.totals.used); lua_settable(L, -3); - ratio = 100 * ((double) (totals.used + index_stats.totals.used) + ratio = 100 * ((double) ((is_small_alloc ? totals.used : 0) + index_stats.totals.used) / (double) arena_size); snprintf(ratio_buf, sizeof(ratio_buf), "%0.1lf%%", ratio); @@ -257,9 +280,7 @@ lbox_runtime_info(struct lua_State *L) static int lbox_slab_check(MAYBE_UNUSED struct lua_State *L) { - struct memtx_engine *memtx; - memtx = (struct memtx_engine *)engine_by_name("memtx"); - slab_cache_check(memtx->alloc.cache); + g_memtx_mem_check(); return 0; } diff --git a/src/box/memtx_engine.c b/src/box/memtx_engine.cc similarity index 87% rename from src/box/memtx_engine.c rename to src/box/memtx_engine.cc index db2bb2333..76f314223 100644 --- a/src/box/memtx_engine.c +++ b/src/box/memtx_engine.cc @@ -50,10 +50,83 @@ #include "schema.h" #include "gc.h" #include "raft.h" +#include "allocator.h" /* sync snapshot every 16MB */ #define SNAP_SYNC_INTERVAL (1 << 24) +struct slab_cache SmallAlloc::slab_cache; +struct small_alloc SmallAlloc::small_alloc; +struct SmallAlloc small_alloc; + +struct system_alloc SystemAlloc::system_alloc; +struct SystemAlloc system_alloc; + +struct tuple_format_vtab memtx_tuple_format_vtab; + +template +struct tuple * +memtx_tuple_new(struct tuple_format *format, const char *data, const char *end); + +template +void +memtx_tuple_delete(struct tuple_format *format, struct tuple *tuple); + +template +void +metmx_tuple_chunk_delete(struct tuple_format *format, const char *data); + +template +const char * +memtx_tuple_chunk_new(struct tuple_format *format, struct tuple *tuple, + const char *data, uint32_t data_sz); + +template +void +memtx_enter_delayed_free_mode(struct memtx_engine *memtx); + +template +void +memtx_leave_delayed_free_mode(struct memtx_engine *memtx); + +template +void +memtx_mem_check(); + +template +void +memtx_allocator_stats(struct allocator_stats *stats, ...); + +global_memtx_tuple_new g_memtx_tuple_new; +global_memtx_tuple_delete g_memtx_tuple_delete; +global_memtx_enter_delayed_free_mode g_memtx_enter_delayed_free_mode; +global_memtx_leave_delayed_free_mode g_memtx_leave_delayed_free_mode; +global_memtx_mem_check g_memtx_mem_check; +global_memtx_allocator_stats g_memtx_allocator_stats; + +#define DECLARE_MEMTX_ALLOCATOR_CHOICE(PREFIX, prefix, AllocType) \ +static inline void \ +prefix##_allocator_choice(struct memtx_engine *memtx) \ +{ \ + memtx->type = PREFIX##_ALLOCATOR_TYPE; \ + memtx_tuple_format_vtab.tuple_delete = memtx_tuple_delete; \ + memtx_tuple_format_vtab.tuple_new = memtx_tuple_new; \ + memtx_tuple_format_vtab.tuple_chunk_delete = \ + metmx_tuple_chunk_delete; \ + memtx_tuple_format_vtab.tuple_chunk_new = \ + memtx_tuple_chunk_new; \ + g_memtx_tuple_new = memtx_tuple_new; \ + g_memtx_tuple_delete = memtx_tuple_delete; \ + g_memtx_enter_delayed_free_mode = \ + memtx_enter_delayed_free_mode; \ + g_memtx_leave_delayed_free_mode = \ + memtx_leave_delayed_free_mode; \ + g_memtx_mem_check = memtx_mem_check; \ + g_memtx_allocator_stats = memtx_allocator_stats; \ +} +DECLARE_MEMTX_ALLOCATOR_CHOICE(SMALL, small, SmallAlloc) +DECLARE_MEMTX_ALLOCATOR_CHOICE(SYSTEM, system, SystemAlloc) + static void checkpoint_cancel(struct checkpoint *ckpt); @@ -141,8 +214,10 @@ memtx_engine_shutdown(struct engine *engine) mempool_destroy(&memtx->rtree_iterator_pool); mempool_destroy(&memtx->index_extent_pool); slab_cache_destroy(&memtx->index_slab_cache); - small_alloc_destroy(&memtx->alloc); - slab_cache_destroy(&memtx->slab_cache); + if (memtx->type == SMALL_ALLOCATOR_TYPE) + small_alloc.destroy(); + else + system_alloc.destroy(); tuple_arena_destroy(&memtx->arena); xdir_destroy(&memtx->snap_dir); free(memtx); @@ -540,7 +615,7 @@ struct checkpoint { static struct checkpoint * checkpoint_new(const char *snap_dirname, uint64_t snap_io_rate_limit) { - struct checkpoint *ckpt = malloc(sizeof(*ckpt)); + struct checkpoint *ckpt = (struct checkpoint *)malloc(sizeof(*ckpt)); if (ckpt == NULL) { diag_set(OutOfMemory, sizeof(*ckpt), "malloc", "struct checkpoint"); @@ -609,7 +684,7 @@ checkpoint_add_space(struct space *sp, void *data) if (!pk) return 0; struct checkpoint *ckpt = (struct checkpoint *)data; - struct checkpoint_entry *entry = malloc(sizeof(*entry)); + struct checkpoint_entry *entry = (struct checkpoint_entry *)malloc(sizeof(*entry)); if (entry == NULL) { diag_set(OutOfMemory, sizeof(*entry), "malloc", "struct checkpoint_entry"); @@ -839,7 +914,7 @@ struct memtx_join_ctx { static int memtx_join_add_space(struct space *space, void *arg) { - struct memtx_join_ctx *ctx = arg; + struct memtx_join_ctx *ctx = (struct memtx_join_ctx *)arg; if (!space_is_memtx(space)) return 0; if (space_is_temporary(space)) @@ -849,7 +924,7 @@ memtx_join_add_space(struct space *space, void *arg) struct index *pk = space_index(space, 0); if (pk == NULL) return 0; - struct memtx_join_entry *entry = malloc(sizeof(*entry)); + struct memtx_join_entry *entry = (struct memtx_join_entry *)malloc(sizeof(*entry)); if (entry == NULL) { diag_set(OutOfMemory, sizeof(*entry), "malloc", "struct memtx_join_entry"); @@ -869,7 +944,7 @@ static int memtx_engine_prepare_join(struct engine *engine, void **arg) { (void)engine; - struct memtx_join_ctx *ctx = malloc(sizeof(*ctx)); + struct memtx_join_ctx *ctx = (struct memtx_join_ctx *)malloc(sizeof(*ctx)); if (ctx == NULL) { diag_set(OutOfMemory, sizeof(*ctx), "malloc", "struct memtx_join_ctx"); @@ -907,7 +982,7 @@ memtx_join_send_tuple(struct xstream *stream, uint32_t space_id, static int memtx_join_f(va_list ap) { - struct memtx_join_ctx *ctx = va_arg(ap, struct memtx_join_ctx *); + struct memtx_join_ctx *ctx = (struct memtx_join_ctx *)va_arg(ap, struct memtx_join_ctx *); struct memtx_join_entry *entry; rlist_foreach_entry(entry, &ctx->entries, in_ctx) { struct snapshot_iterator *it = entry->iterator; @@ -929,7 +1004,7 @@ static int memtx_engine_join(struct engine *engine, void *arg, struct xstream *stream) { (void)engine; - struct memtx_join_ctx *ctx = arg; + struct memtx_join_ctx *ctx = (struct memtx_join_ctx *)arg; ctx->stream = stream; /* * Memtx snapshot iterators are safe to use from another @@ -950,7 +1025,7 @@ static void memtx_engine_complete_join(struct engine *engine, void *arg) { (void)engine; - struct memtx_join_ctx *ctx = arg; + struct memtx_join_ctx *ctx = (struct memtx_join_ctx *)arg; struct memtx_join_entry *entry, *next; rlist_foreach_entry_safe(entry, &ctx->entries, in_ctx, next) { entry->iterator->free(entry->iterator); @@ -971,10 +1046,14 @@ static void memtx_engine_memory_stat(struct engine *engine, struct engine_memory_stat *stat) { struct memtx_engine *memtx = (struct memtx_engine *)engine; - struct small_stats data_stats; + struct allocator_stats data_stats; struct mempool_stats index_stats; mempool_stats(&memtx->index_extent_pool, &index_stats); - small_stats(&memtx->alloc, &data_stats, small_stats_noop_cb, NULL); + if (memtx->type == SMALL_ALLOCATOR_TYPE) { + g_memtx_allocator_stats(&data_stats, small_stats_noop_cb, NULL); + } else { + g_memtx_allocator_stats(&data_stats); + } stat->data += data_stats.used; stat->index += index_stats.totals.used; } @@ -1052,15 +1131,27 @@ memtx_engine_gc_f(va_list va) struct memtx_engine * memtx_engine_new(const char *snap_dirname, bool force_recovery, uint64_t tuple_arena_max_size, uint32_t objsize_min, - bool dontdump, float alloc_factor) + bool dontdump, const char* allocator, float alloc_factor) { - struct memtx_engine *memtx = calloc(1, sizeof(*memtx)); + int64_t snap_signature; + struct memtx_engine *memtx = (struct memtx_engine *)calloc(1, sizeof(*memtx)); if (memtx == NULL) { diag_set(OutOfMemory, sizeof(*memtx), "malloc", "struct memtx_engine"); return NULL; } + assert(allocator != NULL); + if (!strcmp(allocator, "small")) { + small_allocator_choice(memtx); + } else if (!strcmp(allocator, "system")) { + system_allocator_choice(memtx); + } else { + diag_set(IllegalParams, "Bad memory allocator name"); + free(memtx); + return NULL; + } + xdir_create(&memtx->snap_dir, snap_dirname, SNAP, &INSTANCE_UUID, &xlog_opts_default); memtx->snap_dir.force_recovery = force_recovery; @@ -1078,7 +1169,7 @@ memtx_engine_new(const char *snap_dirname, bool force_recovery, * So if the local directory isn't empty, read the snapshot * signature right now to initialize the instance UUID. */ - int64_t snap_signature = xdir_last_vclock(&memtx->snap_dir, NULL); + snap_signature = xdir_last_vclock(&memtx->snap_dir, NULL); if (snap_signature >= 0) { struct xlog_cursor cursor; if (xdir_open_cursor(&memtx->snap_dir, @@ -1108,9 +1199,10 @@ memtx_engine_new(const char *snap_dirname, bool force_recovery, quota_init(&memtx->quota, tuple_arena_max_size); tuple_arena_create(&memtx->arena, &memtx->quota, tuple_arena_max_size, SLAB_SIZE, dontdump, "memtx"); - slab_cache_create(&memtx->slab_cache, &memtx->arena); - small_alloc_create(&memtx->alloc, &memtx->slab_cache, - objsize_min, alloc_factor); + if (memtx->type == SMALL_ALLOCATOR_TYPE) + small_alloc.init(&memtx->arena, objsize_min, alloc_factor); + else + system_alloc.init(&memtx->quota); /* Initialize index extent allocator. */ slab_cache_create(&memtx->index_slab_cache, &memtx->arena); @@ -1170,22 +1262,42 @@ memtx_engine_set_max_tuple_size(struct memtx_engine *memtx, size_t max_size) memtx->max_tuple_size = max_size; } +template +void +memtx_allocator_stats(struct allocator_stats *stats, ...) +{ + va_list argptr; + va_start(argptr, stats); + Alloc::stats(stats, argptr); + va_end(argptr); +} + +template +void +memtx_mem_check() +{ + return Alloc::mem_check(); +} + +template void memtx_enter_delayed_free_mode(struct memtx_engine *memtx) { memtx->snapshot_version++; if (memtx->delayed_free_mode++ == 0) - small_alloc_setopt(&memtx->alloc, SMALL_DELAYED_FREE_MODE, true); + Alloc::enter_delayed_free_mode(); } +template void memtx_leave_delayed_free_mode(struct memtx_engine *memtx) { assert(memtx->delayed_free_mode > 0); if (--memtx->delayed_free_mode == 0) - small_alloc_setopt(&memtx->alloc, SMALL_DELAYED_FREE_MODE, false); + Alloc::leave_delayed_free_mode(); } +template struct tuple * memtx_tuple_new(struct tuple_format *format, const char *data, const char *end) { @@ -1195,15 +1307,19 @@ memtx_tuple_new(struct tuple_format *format, const char *data, const char *end) struct region *region = &fiber()->gc; size_t region_svp = region_used(region); struct field_map_builder builder; + uint32_t field_map_size, data_offset; + size_t tuple_len, total; + char *raw; + if (tuple_field_map_create(format, data, true, &builder) != 0) goto end; - uint32_t field_map_size = field_map_build_size(&builder); + field_map_size = field_map_build_size(&builder); /* * Data offset is calculated from the begin of the struct * tuple base, not from memtx_tuple, because the struct * tuple is not the first field of the memtx_tuple. */ - uint32_t data_offset = sizeof(struct tuple) + field_map_size; + data_offset = sizeof(struct tuple) + field_map_size; if (data_offset > INT16_MAX) { /** tuple->data_offset is 15 bits */ diag_set(ClientError, ER_TUPLE_METADATA_IS_TOO_BIG, @@ -1211,8 +1327,8 @@ memtx_tuple_new(struct tuple_format *format, const char *data, const char *end) goto end; } - size_t tuple_len = end - data; - size_t total = sizeof(struct memtx_tuple) + field_map_size + tuple_len; + tuple_len = end - data; + total = sizeof(struct memtx_tuple) + field_map_size + tuple_len; ERROR_INJECT(ERRINJ_TUPLE_ALLOC, { diag_set(OutOfMemory, total, "slab allocator", "memtx_tuple"); @@ -1225,7 +1341,7 @@ memtx_tuple_new(struct tuple_format *format, const char *data, const char *end) } struct memtx_tuple *memtx_tuple; - while ((memtx_tuple = smalloc(&memtx->alloc, total)) == NULL) { + while ((memtx_tuple = (struct memtx_tuple *)Alloc::alloc(total)) == NULL) { bool stop; memtx_engine_run_gc(memtx, &stop); if (stop) @@ -1244,7 +1360,7 @@ memtx_tuple_new(struct tuple_format *format, const char *data, const char *end) tuple_format_ref(format); tuple->data_offset = data_offset; tuple->is_dirty = false; - char *raw = (char *) tuple + tuple->data_offset; + raw = (char *) tuple + tuple->data_offset; field_map_build(&builder, raw - field_map_size); memcpy(raw, data, tuple_len); say_debug("%s(%zu) = %p", __func__, tuple_len, memtx_tuple); @@ -1253,6 +1369,7 @@ end: return tuple; } +template void memtx_tuple_delete(struct tuple_format *format, struct tuple *tuple) { @@ -1262,34 +1379,35 @@ memtx_tuple_delete(struct tuple_format *format, struct tuple *tuple) struct memtx_tuple *memtx_tuple = container_of(tuple, struct memtx_tuple, base); size_t total = tuple_size(tuple) + offsetof(struct memtx_tuple, base); - if (memtx->alloc.free_mode != SMALL_DELAYED_FREE || - memtx_tuple->version == memtx->snapshot_version || + if (memtx_tuple->version == memtx->snapshot_version || format->is_temporary) - smfree(&memtx->alloc, memtx_tuple, total); + Alloc::free(memtx_tuple, total); else - smfree_delayed(&memtx->alloc, memtx_tuple, total); + Alloc::free_delayed(memtx_tuple, total); tuple_format_unref(format); } +template void metmx_tuple_chunk_delete(struct tuple_format *format, const char *data) { - struct memtx_engine *memtx = (struct memtx_engine *)format->engine; + (void)format; struct tuple_chunk *tuple_chunk = container_of((const char (*)[0])data, struct tuple_chunk, data); uint32_t sz = tuple_chunk_sz(tuple_chunk->data_sz); - smfree(&memtx->alloc, tuple_chunk, sz); + Alloc::free(tuple_chunk, sz); } +template const char * memtx_tuple_chunk_new(struct tuple_format *format, struct tuple *tuple, const char *data, uint32_t data_sz) { - struct memtx_engine *memtx = (struct memtx_engine *)format->engine; + (void)format; uint32_t sz = tuple_chunk_sz(data_sz); struct tuple_chunk *tuple_chunk = - (struct tuple_chunk *) smalloc(&memtx->alloc, sz); + (struct tuple_chunk *) Alloc::alloc(sz); if (tuple == NULL) { diag_set(OutOfMemory, sz, "smalloc", "tuple"); return NULL; @@ -1299,13 +1417,6 @@ memtx_tuple_chunk_new(struct tuple_format *format, struct tuple *tuple, return tuple_chunk->data; } -struct tuple_format_vtab memtx_tuple_format_vtab = { - memtx_tuple_delete, - memtx_tuple_new, - metmx_tuple_chunk_delete, - memtx_tuple_chunk_new, -}; - /** * Allocate a block of size MEMTX_EXTENT_SIZE for memtx index */ diff --git a/src/box/memtx_engine.h b/src/box/memtx_engine.h index 8b380bf3c..652f634fb 100644 --- a/src/box/memtx_engine.h +++ b/src/box/memtx_engine.h @@ -49,6 +49,7 @@ struct index; struct fiber; struct tuple; struct tuple_format; +struct allocator_stats; /** * The state of memtx recovery process. @@ -133,10 +134,6 @@ struct memtx_engine { * is reflected in box.slab.info(), @sa lua/slab.c. */ struct slab_arena arena; - /** Slab cache for allocating tuples. */ - struct slab_cache slab_cache; - /** Tuple allocator. */ - struct small_alloc alloc; /** Slab cache for allocating index extents. */ struct slab_cache index_slab_cache; /** Index extent allocator. */ @@ -178,6 +175,8 @@ struct memtx_engine { * memtx_gc_task::link. */ struct stailq gc_queue; + /** Memtx allocator type */ + int type; }; struct memtx_gc_task; @@ -213,7 +212,7 @@ struct memtx_engine * memtx_engine_new(const char *snap_dirname, bool force_recovery, uint64_t tuple_arena_max_size, uint32_t objsize_min, bool dontdump, - float alloc_factor); + const char *allocator, float alloc_factor); int memtx_engine_recover_snapshot(struct memtx_engine *memtx, @@ -235,23 +234,35 @@ memtx_engine_set_max_tuple_size(struct memtx_engine *memtx, size_t max_size); * times from the same or different fibers - one just has to leave * the delayed free mode the same amount of times then. */ -void -memtx_enter_delayed_free_mode(struct memtx_engine *memtx); +typedef void +(*global_memtx_enter_delayed_free_mode)(struct memtx_engine *memtx); +extern global_memtx_enter_delayed_free_mode g_memtx_enter_delayed_free_mode; /** * Leave tuple delayed free mode. This function undoes the effect * of memtx_enter_delayed_free_mode(). */ -void -memtx_leave_delayed_free_mode(struct memtx_engine *memtx); +typedef void +(*global_memtx_leave_delayed_free_mode)(struct memtx_engine *memtx); +extern global_memtx_leave_delayed_free_mode g_memtx_leave_delayed_free_mode; /** Allocate a memtx tuple. @sa tuple_new(). */ -struct tuple * -memtx_tuple_new(struct tuple_format *format, const char *data, const char *end); +typedef struct tuple * +(*global_memtx_tuple_new)(struct tuple_format *format, const char *data, const char *end); +extern global_memtx_tuple_new g_memtx_tuple_new; /** Free a memtx tuple. @sa tuple_delete(). */ -void -memtx_tuple_delete(struct tuple_format *format, struct tuple *tuple); +typedef void +(*global_memtx_tuple_delete)(struct tuple_format *format, struct tuple *tuple); +extern global_memtx_tuple_delete g_memtx_tuple_delete; + +typedef void +(*global_memtx_mem_check)(); +extern global_memtx_mem_check g_memtx_mem_check; + +typedef void +(*global_memtx_allocator_stats)(struct allocator_stats *stats, ...); +extern global_memtx_allocator_stats g_memtx_allocator_stats; /** Tuple format vtab for memtx engine. */ extern struct tuple_format_vtab memtx_tuple_format_vtab; @@ -299,13 +310,13 @@ static inline struct memtx_engine * memtx_engine_new_xc(const char *snap_dirname, bool force_recovery, uint64_t tuple_arena_max_size, uint32_t objsize_min, bool dontdump, - float alloc_factor) + const char *allocator, float alloc_factor) { struct memtx_engine *memtx; memtx = memtx_engine_new(snap_dirname, force_recovery, tuple_arena_max_size, objsize_min, dontdump, - alloc_factor); + allocator, alloc_factor); if (memtx == NULL) diag_raise(); return memtx; diff --git a/src/box/memtx_hash.c b/src/box/memtx_hash.c index ed4dba90a..4d620e318 100644 --- a/src/box/memtx_hash.c +++ b/src/box/memtx_hash.c @@ -458,7 +458,7 @@ hash_snapshot_iterator_free(struct snapshot_iterator *iterator) assert(iterator->free == hash_snapshot_iterator_free); struct hash_snapshot_iterator *it = (struct hash_snapshot_iterator *) iterator; - memtx_leave_delayed_free_mode((struct memtx_engine *) + g_memtx_leave_delayed_free_mode((struct memtx_engine *) it->index->base.engine); light_index_iterator_destroy(&it->index->hash_table, &it->iterator); index_unref(&it->index->base); @@ -523,7 +523,7 @@ memtx_hash_index_create_snapshot_iterator(struct index *base) index_ref(base); light_index_iterator_begin(&index->hash_table, &it->iterator); light_index_iterator_freeze(&index->hash_table, &it->iterator); - memtx_enter_delayed_free_mode((struct memtx_engine *)base->engine); + g_memtx_enter_delayed_free_mode((struct memtx_engine *)base->engine); return (struct snapshot_iterator *) it; } diff --git a/src/box/memtx_space.c b/src/box/memtx_space.c index 73b4c450e..66950bb7e 100644 --- a/src/box/memtx_space.c +++ b/src/box/memtx_space.c @@ -327,7 +327,7 @@ memtx_space_execute_replace(struct space *space, struct txn *txn, struct memtx_space *memtx_space = (struct memtx_space *)space; struct txn_stmt *stmt = txn_current_stmt(txn); enum dup_replace_mode mode = dup_replace_mode(request->type); - stmt->new_tuple = memtx_tuple_new(space->format, request->tuple, + stmt->new_tuple = g_memtx_tuple_new(space->format, request->tuple, request->tuple_end); if (stmt->new_tuple == NULL) return -1; @@ -412,7 +412,7 @@ memtx_space_execute_update(struct space *space, struct txn *txn, if (new_data == NULL) return -1; - stmt->new_tuple = memtx_tuple_new(format, new_data, + stmt->new_tuple = g_memtx_tuple_new(format, new_data, new_data + new_size); if (stmt->new_tuple == NULL) return -1; @@ -483,7 +483,7 @@ memtx_space_execute_upsert(struct space *space, struct txn *txn, format, request->index_base) != 0) { return -1; } - stmt->new_tuple = memtx_tuple_new(format, request->tuple, + stmt->new_tuple = g_memtx_tuple_new(format, request->tuple, request->tuple_end); if (stmt->new_tuple == NULL) return -1; @@ -507,7 +507,7 @@ memtx_space_execute_upsert(struct space *space, struct txn *txn, if (new_data == NULL) return -1; - stmt->new_tuple = memtx_tuple_new(format, new_data, + stmt->new_tuple = g_memtx_tuple_new(format, new_data, new_data + new_size); if (stmt->new_tuple == NULL) return -1; @@ -559,14 +559,14 @@ memtx_space_ephemeral_replace(struct space *space, const char *tuple, const char *tuple_end) { struct memtx_space *memtx_space = (struct memtx_space *)space; - struct tuple *new_tuple = memtx_tuple_new(space->format, tuple, + struct tuple *new_tuple = g_memtx_tuple_new(space->format, tuple, tuple_end); if (new_tuple == NULL) return -1; struct tuple *old_tuple; if (memtx_space->replace(space, NULL, new_tuple, DUP_REPLACE_OR_INSERT, &old_tuple) != 0) { - memtx_tuple_delete(space->format, new_tuple); + g_memtx_tuple_delete(space->format, new_tuple); return -1; } if (old_tuple != NULL) diff --git a/src/box/memtx_tree.cc b/src/box/memtx_tree.cc index 44bdc86e6..0c73bd676 100644 --- a/src/box/memtx_tree.cc +++ b/src/box/memtx_tree.cc @@ -1441,7 +1441,7 @@ tree_snapshot_iterator_free(struct snapshot_iterator *iterator) assert(iterator->free == &tree_snapshot_iterator_free); struct tree_snapshot_iterator *it = (struct tree_snapshot_iterator *)iterator; - memtx_leave_delayed_free_mode((struct memtx_engine *) + g_memtx_leave_delayed_free_mode((struct memtx_engine *) it->index->base.engine); memtx_tree_iterator_destroy(&it->index->tree, &it->tree_iterator); index_unref(&it->index->base); @@ -1516,7 +1516,7 @@ memtx_tree_index_create_snapshot_iterator(struct index *base) index_ref(base); it->tree_iterator = memtx_tree_iterator_first(&index->tree); memtx_tree_iterator_freeze(&index->tree, &it->tree_iterator); - memtx_enter_delayed_free_mode((struct memtx_engine *)base->engine); + g_memtx_enter_delayed_free_mode((struct memtx_engine *)base->engine); return (struct snapshot_iterator *) it; } diff --git a/src/box/system_allocator.h b/src/box/system_allocator.h new file mode 100644 index 000000000..8d7bb8067 --- /dev/null +++ b/src/box/system_allocator.h @@ -0,0 +1,218 @@ +#pragma once +/* + * Copyright 2010-2020, Tarantool AUTHORS, please see AUTHORS file. + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * 1. Redistributions of source code must retain the above + * copyright notice, this list of conditions and the + * following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL + * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, + * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF + * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ +#include +#include +#include + +#if defined(__cplusplus) +extern "C" { +#endif /* defined(__cplusplus) */ + +#if HAVE_MALLOC_SIZE_DARWIN +#include +static inline size_t +portable_malloc_usable_size(void *p) +{ + return malloc_size(p); +} +#elif HAVE_MALLOC_USABLE_SIZE_BSD +#include +static inline size_t +portable_malloc_usable_size(void *p) +{ + return malloc_usable_size(p); +} +#elif HAVE_MALLOC_USABLE_SIZE_LINUX +#include +static inline size_t +portable_malloc_usable_size(void *p) +{ + return malloc_usable_size(p); +} +#else +#error "Undefined system type" +#endif + +/** + * Free mode + */ +enum system_free_mode { + /** Free objects immediately. */ + SYSTEM_FREE, + /** Collect garbage after delayed free. */ + SYSTEM_COLLECT_GARBAGE, + /** Postpone deletion of objects. */ + SYSTEM_DELAYED_FREE, +}; + +struct system_alloc { + /** + * Bytes allocated by system allocator + */ + uint64_t used_bytes; + /** + * Allocator quota + */ + struct quota *quota; + /** + * Free mode. + */ + enum system_free_mode free_mode; + /** + * List of pointers for delayed free. + */ + struct lifo delayed; +}; + +struct system_stats { + size_t used; + size_t total; +}; + +enum system_opt { + SYSTEM_DELAYED_FREE_MODE +}; + +static inline void +sysfree(struct system_alloc *alloc, void *ptr, MAYBE_UNUSED unsigned bytes) +{ + size_t size = portable_malloc_usable_size(ptr); + uint32_t s = size % QUOTA_UNIT_SIZE, units = size / QUOTA_UNIT_SIZE; + size_t used_bytes = pm_atomic_fetch_sub(&alloc->used_bytes, size); + if (small_align(used_bytes, QUOTA_UNIT_SIZE) > + small_align(used_bytes - s, QUOTA_UNIT_SIZE)) + units++; + if (units > 0) + quota_release(alloc->quota, units * QUOTA_UNIT_SIZE); + free(ptr); +} + +static inline void +system_collect_garbage(struct system_alloc *alloc) +{ + if (alloc->free_mode != SYSTEM_COLLECT_GARBAGE) + return; + + const int BATCH = 100; + if (!lifo_is_empty(&alloc->delayed)) { + for (int i = 0; i < BATCH; i++) { + void *item = lifo_pop(&alloc->delayed); + if (item == NULL) + break; + sysfree(alloc, item, 0 /* unused parameter */); + } + } else { + /* Finish garbage collection and switch to regular mode */ + alloc->free_mode = SYSTEM_FREE; + } +} + +static inline void +system_alloc_setopt(struct system_alloc *alloc, enum system_opt opt, bool val) +{ + switch (opt) { + case SYSTEM_DELAYED_FREE_MODE: + alloc->free_mode = val ? SYSTEM_DELAYED_FREE : + SYSTEM_COLLECT_GARBAGE; + break; + default: + assert(false); + break; + } +} + +static inline void +system_stats(struct system_alloc *alloc, struct system_stats *totals) +{ + totals->used = pm_atomic_load_explicit(&alloc->used_bytes, + pm_memory_order_relaxed); + totals->total = quota_total(alloc->quota); +} + +static inline void +system_alloc_create(struct system_alloc *alloc, struct quota *quota) +{ + alloc->used_bytes = 0; + alloc->quota = quota; + lifo_init(&alloc->delayed); +} + +static inline void +system_alloc_destroy(MAYBE_UNUSED struct system_alloc *alloc) +{ + +} + +static inline void +sysfree_delayed(struct system_alloc *alloc, void *ptr, unsigned bytes) +{ + if (alloc->free_mode == SYSTEM_DELAYED_FREE && ptr) { + lifo_push(&alloc->delayed, ptr); + } else { + sysfree(alloc, ptr, bytes); + } +} + +static inline void * +sysalloc(struct system_alloc *alloc, unsigned bytes) +{ + system_collect_garbage(alloc); + + void *ptr = malloc(bytes); + if (!ptr) + return NULL; + size_t size = portable_malloc_usable_size(ptr); + uint32_t s = size % QUOTA_UNIT_SIZE, units = size / QUOTA_UNIT_SIZE; + while (1) { + size_t used_bytes = pm_atomic_load(&alloc->used_bytes); + if (small_align(used_bytes, QUOTA_UNIT_SIZE) < + small_align(used_bytes + s, QUOTA_UNIT_SIZE)) + units++; + if (units > 0) { + if (quota_use(alloc->quota, + units * QUOTA_UNIT_SIZE) < 0) { + free(ptr); + return NULL; + } + } + if (pm_atomic_compare_exchange_strong(&alloc->used_bytes, + &used_bytes, used_bytes + size)) + break; + if (units > 0) + quota_release(alloc->quota, units * QUOTA_UNIT_SIZE); + } + return ptr; +} + +#if defined(__cplusplus) +} /* extern "C" */ +#endif /* defined(__cplusplus) */ diff --git a/src/trivia/config.h.cmake b/src/trivia/config.h.cmake index 89e0d39c6..107cd8049 100644 --- a/src/trivia/config.h.cmake +++ b/src/trivia/config.h.cmake @@ -169,6 +169,9 @@ #cmakedefine HAVE_POSIX_FADVISE 1 #cmakedefine HAVE_FALLOCATE 1 #cmakedefine HAVE_MREMAP 1 +#cmakedefine HAVE_MALLOC_USABLE_SIZE_LINUX 1 +#cmakedefine HAVE_MALLOC_USABLE_SIZE_BSD 1 +#cmakedefine HAVE_MALLOC_SIZE_DARWIN 1 #cmakedefine HAVE_SYNC_FILE_RANGE 1 #cmakedefine HAVE_MSG_NOSIGNAL 1 diff --git a/test/app-tap/init_script.result b/test/app-tap/init_script.result index 72aa67db2..3b5211a90 100644 --- a/test/app-tap/init_script.result +++ b/test/app-tap/init_script.result @@ -3,6 +3,7 @@ -- box.cfg +allocator:small background:false checkpoint_count:2 checkpoint_interval:3600 diff --git a/test/box/admin.result b/test/box/admin.result index e05440f66..9e4813133 100644 --- a/test/box/admin.result +++ b/test/box/admin.result @@ -27,7 +27,9 @@ help() ... cfg_filter(box.cfg) --- -- - - background +- - - allocator + - small + - - background - false - - checkpoint_count - 2 diff --git a/test/box/cfg.result b/test/box/cfg.result index 10fef006c..d23255872 100644 --- a/test/box/cfg.result +++ b/test/box/cfg.result @@ -15,7 +15,9 @@ box.cfg.nosuchoption = 1 | ... cfg_filter(box.cfg) | --- - | - - - background + | - - - allocator + | - small + | - - background | - false | - - checkpoint_count | - 2 @@ -128,7 +130,9 @@ box.cfg() | ... cfg_filter(box.cfg) | --- - | - - - background + | - - - allocator + | - small + | - - background | - false | - - checkpoint_count | - 2 diff --git a/test/box/choose_memtx_allocator.lua b/test/box/choose_memtx_allocator.lua new file mode 100644 index 000000000..77a0ec638 --- /dev/null +++ b/test/box/choose_memtx_allocator.lua @@ -0,0 +1,9 @@ +#!/usr/bin/env tarantool + +require('console').listen(os.getenv('ADMIN')) + +box.cfg({ + listen = os.getenv("LISTEN"), + allocator=arg[1], + checkpoint_interval=10 +}) diff --git a/test/box/choose_memtx_allocator.result b/test/box/choose_memtx_allocator.result new file mode 100644 index 000000000..dab316b93 --- /dev/null +++ b/test/box/choose_memtx_allocator.result @@ -0,0 +1,135 @@ +-- test-run result file version 2 + +-- write data recover from latest snapshot +env = require('test_run') + | --- + | ... +test_run = env.new() + | --- + | ... +test_run:cmd('create server test with script="box/choose_memtx_allocator.lua"') + | --- + | - true + | ... +--test small allocator +test_run:cmd('start server test with args="small"') + | --- + | - true + | ... +test_run:cmd('switch test') + | --- + | - true + | ... +space = box.schema.space.create('test') + | --- + | ... +space:format({ {name = 'id', type = 'unsigned'}, {name = 'year', type = 'unsigned'} }) + | --- + | ... +s = space:create_index('primary', { parts = {'id'} }) + | --- + | ... +for key = 1, 1000 do space:insert({key, key + 1000}) end + | --- + | ... +for key = 1, 1000 do space:replace({key, key + 5000}) end + | --- + | ... +for key = 1, 1000 do space:delete(key) end + | --- + | ... +space:drop() + | --- + | ... +test_run:cmd('switch default') + | --- + | - true + | ... +test_run:cmd('stop server test') + | --- + | - true + | ... +--test system(malloc) allocator +test_run:cmd('start server test with args="system"') + | --- + | - true + | ... +test_run:cmd('switch test') + | --- + | - true + | ... +space = box.schema.space.create('test') + | --- + | ... +space:format({ {name = 'id', type = 'unsigned'}, {name = 'year', type = 'unsigned'} }) + | --- + | ... +s = space:create_index('primary', { parts = {'id'} }) + | --- + | ... +for key = 1, 500000 do space:insert({key, key + 1000}) end + | --- + | ... +for key = 1, 500000 do space:replace({key, key + 5000}) end + | --- + | ... +for key = 1, 500000 do space:delete(key) end + | --- + | ... +space:drop() + | --- + | ... +test_run:cmd('switch default') + | --- + | - true + | ... +test_run:cmd('stop server test') + | --- + | - true + | ... +--test default (small) allocator +test_run:cmd('start server test') + | --- + | - true + | ... +test_run:cmd('switch test') + | --- + | - true + | ... +space = box.schema.space.create('test') + | --- + | ... +space:format({ {name = 'id', type = 'unsigned'}, {name = 'year', type = 'unsigned'} }) + | --- + | ... +s = space:create_index('primary', { parts = {'id'} }) + | --- + | ... +for key = 1, 1000 do space:insert({key, key + 1000}) end + | --- + | ... +for key = 1, 1000 do space:replace({key, key + 5000}) end + | --- + | ... +for key = 1, 1000 do space:delete(key) end + | --- + | ... +space:drop() + | --- + | ... +test_run:cmd('switch default') + | --- + | - true + | ... +test_run:cmd('stop server test') + | --- + | - true + | ... +test_run:cmd('cleanup server test') + | --- + | - true + | ... +test_run:cmd('delete server test') + | --- + | - true + | ... diff --git a/test/box/choose_memtx_allocator.test.lua b/test/box/choose_memtx_allocator.test.lua new file mode 100644 index 000000000..007b01d80 --- /dev/null +++ b/test/box/choose_memtx_allocator.test.lua @@ -0,0 +1,43 @@ + +-- write data recover from latest snapshot +env = require('test_run') +test_run = env.new() +test_run:cmd('create server test with script="box/choose_memtx_allocator.lua"') +--test small allocator +test_run:cmd('start server test with args="small"') +test_run:cmd('switch test') +space = box.schema.space.create('test') +space:format({ {name = 'id', type = 'unsigned'}, {name = 'year', type = 'unsigned'} }) +s = space:create_index('primary', { parts = {'id'} }) +for key = 1, 1000 do space:insert({key, key + 1000}) end +for key = 1, 1000 do space:replace({key, key + 5000}) end +for key = 1, 1000 do space:delete(key) end +space:drop() +test_run:cmd('switch default') +test_run:cmd('stop server test') +--test system(malloc) allocator +test_run:cmd('start server test with args="system"') +test_run:cmd('switch test') +space = box.schema.space.create('test') +space:format({ {name = 'id', type = 'unsigned'}, {name = 'year', type = 'unsigned'} }) +s = space:create_index('primary', { parts = {'id'} }) +for key = 1, 500000 do space:insert({key, key + 1000}) end +for key = 1, 500000 do space:replace({key, key + 5000}) end +for key = 1, 500000 do space:delete(key) end +space:drop() +test_run:cmd('switch default') +test_run:cmd('stop server test') +--test default (small) allocator +test_run:cmd('start server test') +test_run:cmd('switch test') +space = box.schema.space.create('test') +space:format({ {name = 'id', type = 'unsigned'}, {name = 'year', type = 'unsigned'} }) +s = space:create_index('primary', { parts = {'id'} }) +for key = 1, 1000 do space:insert({key, key + 1000}) end +for key = 1, 1000 do space:replace({key, key + 5000}) end +for key = 1, 1000 do space:delete(key) end +space:drop() +test_run:cmd('switch default') +test_run:cmd('stop server test') +test_run:cmd('cleanup server test') +test_run:cmd('delete server test') -- 2.20.1