From: Vladimir Davydov <vdavydov.dev@gmail.com> To: kostja@tarantool.org Cc: tarantool-patches@freelists.org Subject: [PATCH 4/8] memtx: move all global variables to engine Date: Tue, 22 May 2018 14:46:12 +0300 [thread overview] Message-ID: <d8a5500aad0ee235c0351c6e63477c39c77f46c4.1526987033.git.vdavydov.dev@gmail.com> (raw) In-Reply-To: <cover.1526987033.git.vdavydov.dev@gmail.com> In-Reply-To: <cover.1526987033.git.vdavydov.dev@gmail.com> All functions that need them are now explicitly passed engine so we can consolidate all variables related to memtx engine state in one place. --- src/box/lua/slab.c | 35 ++++++++------- src/box/memtx_engine.c | 114 ++++++++++++++++--------------------------------- src/box/memtx_engine.h | 32 ++++++++++++++ 3 files changed, 89 insertions(+), 92 deletions(-) diff --git a/src/box/lua/slab.c b/src/box/lua/slab.c index 1367a4eb..9f5e7e95 100644 --- a/src/box/lua/slab.c +++ b/src/box/lua/slab.c @@ -41,9 +41,8 @@ #include "small/small.h" #include "small/quota.h" #include "memory.h" - -extern struct small_alloc memtx_alloc; -extern struct mempool memtx_index_extent_pool; +#include "box/engine.h" +#include "box/memtx_engine.h" static int small_stats_noop_cb(const struct mempool_stats *stats, void *cb_ctx) @@ -106,15 +105,18 @@ small_stats_lua_cb(const struct mempool_stats *stats, void *cb_ctx) static int lbox_slab_stats(struct lua_State *L) { + struct memtx_engine *memtx; + memtx = (struct memtx_engine *)engine_by_name("memtx"); + struct small_stats totals; lua_newtable(L); /* * List all slabs used for tuples and slabs used for * indexes, with their stats. */ - small_stats(&memtx_alloc, &totals, small_stats_lua_cb, L); + small_stats(&memtx->alloc, &totals, small_stats_lua_cb, L); struct mempool_stats index_stats; - mempool_stats(&memtx_index_extent_pool, &index_stats); + mempool_stats(&memtx->index_extent_pool, &index_stats); small_stats_lua_cb(&index_stats, L); return 1; @@ -123,6 +125,9 @@ lbox_slab_stats(struct lua_State *L) static int lbox_slab_info(struct lua_State *L) { + struct memtx_engine *memtx; + memtx = (struct memtx_engine *)engine_by_name("memtx"); + struct small_stats totals; /* @@ -130,12 +135,10 @@ lbox_slab_info(struct lua_State *L) * indexes, with their stats. */ lua_newtable(L); - small_stats(&memtx_alloc, &totals, small_stats_noop_cb, L); + small_stats(&memtx->alloc, &totals, small_stats_noop_cb, L); struct mempool_stats index_stats; - mempool_stats(&memtx_index_extent_pool, &index_stats); + mempool_stats(&memtx->index_extent_pool, &index_stats); - struct slab_arena *tuple_arena = memtx_alloc.cache->arena; - struct quota *memtx_quota = tuple_arena->quota; double ratio; char ratio_buf[32]; @@ -176,7 +179,7 @@ lbox_slab_info(struct lua_State *L) * quota_used_ratio > 0.9 work as an indicator * for reaching Tarantool memory limit. */ - size_t arena_size = tuple_arena->used; + size_t arena_size = memtx->arena.used; luaL_pushuint64(L, arena_size); lua_settable(L, -3); /** @@ -200,7 +203,7 @@ lbox_slab_info(struct lua_State *L) * box.cfg.slab_alloc_arena, but in bytes */ lua_pushstring(L, "quota_size"); - luaL_pushuint64(L, quota_total(memtx_quota)); + luaL_pushuint64(L, quota_total(&memtx->quota)); lua_settable(L, -3); /* @@ -208,7 +211,7 @@ lbox_slab_info(struct lua_State *L) * size of slabs in various slab caches. */ lua_pushstring(L, "quota_used"); - luaL_pushuint64(L, quota_used(memtx_quota)); + luaL_pushuint64(L, quota_used(&memtx->quota)); lua_settable(L, -3); /** @@ -217,8 +220,8 @@ lbox_slab_info(struct lua_State *L) * factor, it's the quota that give you OOM error in the * end of the day. */ - ratio = 100 * ((double) quota_used(memtx_quota) / - ((double) quota_total(memtx_quota) + 0.0001)); + ratio = 100 * ((double) quota_used(&memtx->quota) / + ((double) quota_total(&memtx->quota) + 0.0001)); snprintf(ratio_buf, sizeof(ratio_buf), "%0.2lf%%", ratio); lua_pushstring(L, "quota_used_ratio"); @@ -254,7 +257,9 @@ lbox_runtime_info(struct lua_State *L) static int lbox_slab_check(MAYBE_UNUSED struct lua_State *L) { - slab_cache_check(memtx_alloc.cache); + struct memtx_engine *memtx; + memtx = (struct memtx_engine *)engine_by_name("memtx"); + slab_cache_check(memtx->alloc.cache); return 0; } diff --git a/src/box/memtx_engine.c b/src/box/memtx_engine.c index 70478d75..b1a9b157 100644 --- a/src/box/memtx_engine.c +++ b/src/box/memtx_engine.c @@ -50,41 +50,6 @@ #include "schema.h" #include "gc.h" -/* - * If you decide to use memtx_arena for anything other than - * memtx_alloc or memtx_index_extent_pool, make sure this - * is reflected in box.slab.info(), @sa lua/slab.c. - */ - -/** Common quota for memtx tuples and indexes. */ -static struct quota memtx_quota; -/** Common slab arena for memtx tuples and indexes. */ -static struct slab_arena memtx_arena; -/** Slab cache for allocating memtx tuples. */ -static struct slab_cache memtx_slab_cache; -/** Memtx tuple allocator. */ -struct small_alloc memtx_alloc; /* used by box.slab.info() */ -/** Slab cache for allocating memtx index extents. */ -static struct slab_cache memtx_index_slab_cache; -/** Memtx index extent allocator. */ -struct mempool memtx_index_extent_pool; /* used by box.slab.info() */ - -/** - * To ensure proper statement-level rollback in case - * of out of memory conditions, we maintain a number - * of slack memory extents reserved before a statement - * is begun. If there isn't enough slack memory, - * we don't begin the statement. - */ -static int memtx_index_num_reserved_extents; -static void *memtx_index_reserved_extents; - -/** Maximal allowed tuple size, box.cfg.memtx_max_tuple_size. */ -static size_t memtx_max_tuple_size = 1 * 1024 * 1024; - -/** Incremented with each next snapshot. */ -uint32_t snapshot_version; - static void txn_on_yield_or_stop(struct trigger *trigger, void *event) { @@ -108,6 +73,7 @@ struct memtx_tuple { enum { OBJSIZE_MIN = 16, SLAB_SIZE = 16 * 1024 * 1024, + MAX_TUPLE_SIZE = 1 * 1024 * 1024, }; static int @@ -701,8 +667,8 @@ memtx_engine_begin_checkpoint(struct engine *engine) } /* increment snapshot version; set tuple deletion to delayed mode */ - snapshot_version++; - small_alloc_setopt(&memtx_alloc, SMALL_DELAYED_FREE_MODE, true); + memtx->snapshot_version++; + small_alloc_setopt(&memtx->alloc, SMALL_DELAYED_FREE_MODE, true); return 0; } @@ -748,7 +714,7 @@ memtx_engine_commit_checkpoint(struct engine *engine, struct vclock *vclock) /* waitCheckpoint() must have been done. */ assert(!memtx->checkpoint->waiting_for_snap_thread); - small_alloc_setopt(&memtx_alloc, SMALL_DELAYED_FREE_MODE, false); + small_alloc_setopt(&memtx->alloc, SMALL_DELAYED_FREE_MODE, false); if (!memtx->checkpoint->touch) { int64_t lsn = vclock_sum(memtx->checkpoint->vclock); @@ -791,7 +757,7 @@ memtx_engine_abort_checkpoint(struct engine *engine) memtx->checkpoint->waiting_for_snap_thread = false; } - small_alloc_setopt(&memtx_alloc, SMALL_DELAYED_FREE_MODE, false); + small_alloc_setopt(&memtx->alloc, SMALL_DELAYED_FREE_MODE, false); /** Remove garbage .inprogress file. */ char *filename = @@ -915,11 +881,11 @@ small_stats_noop_cb(const struct mempool_stats *stats, void *cb_ctx) static void memtx_engine_memory_stat(struct engine *engine, struct engine_memory_stat *stat) { - (void)engine; + struct memtx_engine *memtx = (struct memtx_engine *)engine; struct small_stats data_stats; struct mempool_stats index_stats; - mempool_stats(&memtx_index_extent_pool, &index_stats); - small_stats(&memtx_alloc, &data_stats, small_stats_noop_cb, NULL); + mempool_stats(&memtx->index_extent_pool, &index_stats); + small_stats(&memtx->alloc, &data_stats, small_stats_noop_cb, NULL); stat->data += data_stats.used; stat->index += index_stats.totals.used; } @@ -1007,21 +973,22 @@ memtx_engine_new(const char *snap_dirname, bool force_recovery, objsize_min = OBJSIZE_MIN; /* Initialize tuple allocator. */ - quota_init(&memtx_quota, tuple_arena_max_size); - tuple_arena_create(&memtx_arena, &memtx_quota, tuple_arena_max_size, + quota_init(&memtx->quota, tuple_arena_max_size); + tuple_arena_create(&memtx->arena, &memtx->quota, tuple_arena_max_size, SLAB_SIZE, "memtx"); - slab_cache_create(&memtx_slab_cache, &memtx_arena); - small_alloc_create(&memtx_alloc, &memtx_slab_cache, + slab_cache_create(&memtx->slab_cache, &memtx->arena); + small_alloc_create(&memtx->alloc, &memtx->slab_cache, objsize_min, alloc_factor); /* Initialize index extent allocator. */ - slab_cache_create(&memtx_index_slab_cache, &memtx_arena); - mempool_create(&memtx_index_extent_pool, &memtx_index_slab_cache, + slab_cache_create(&memtx->index_slab_cache, &memtx->arena); + mempool_create(&memtx->index_extent_pool, &memtx->index_slab_cache, MEMTX_EXTENT_SIZE); - memtx_index_num_reserved_extents = 0; - memtx_index_reserved_extents = NULL; + memtx->num_reserved_extents = 0; + memtx->reserved_extents = NULL; memtx->state = MEMTX_INITIALIZED; + memtx->max_tuple_size = MAX_TUPLE_SIZE; memtx->force_recovery = force_recovery; memtx->base.vtab = &memtx_engine_vtab; @@ -1052,15 +1019,13 @@ memtx_engine_set_snap_io_rate_limit(struct memtx_engine *memtx, double limit) void memtx_engine_set_max_tuple_size(struct memtx_engine *memtx, size_t max_size) { - (void)memtx; - memtx_max_tuple_size = max_size; + memtx->max_tuple_size = max_size; } struct tuple * memtx_tuple_new(struct tuple_format *format, const char *data, const char *end) { struct memtx_engine *memtx = (struct memtx_engine *)format->engine_data; - (void)memtx; assert(mp_typeof(*data) == MP_ARRAY); size_t tuple_len = end - data; size_t meta_size = tuple_format_meta_size(format); @@ -1070,20 +1035,20 @@ memtx_tuple_new(struct tuple_format *format, const char *data, const char *end) diag_set(OutOfMemory, total, "slab allocator", "memtx_tuple"); return NULL; }); - if (unlikely(total > memtx_max_tuple_size)) { + if (unlikely(total > memtx->max_tuple_size)) { diag_set(ClientError, ER_MEMTX_MAX_TUPLE_SIZE, total); error_log(diag_last_error(diag_get())); return NULL; } - struct memtx_tuple *memtx_tuple = smalloc(&memtx_alloc, total); + struct memtx_tuple *memtx_tuple = smalloc(&memtx->alloc, total); if (memtx_tuple == NULL) { diag_set(OutOfMemory, total, "slab allocator", "memtx_tuple"); return NULL; } struct tuple *tuple = &memtx_tuple->base; tuple->refs = 0; - memtx_tuple->version = snapshot_version; + memtx_tuple->version = memtx->snapshot_version; assert(tuple_len <= UINT32_MAX); /* bsize is UINT32_MAX */ tuple->bsize = tuple_len; tuple->format_id = tuple_format_id(format); @@ -1109,7 +1074,6 @@ void memtx_tuple_delete(struct tuple_format *format, struct tuple *tuple) { struct memtx_engine *memtx = (struct memtx_engine *)format->engine_data; - (void)memtx; say_debug("%s(%p)", __func__, tuple); assert(tuple->refs == 0); #ifndef NDEBUG @@ -1126,11 +1090,11 @@ memtx_tuple_delete(struct tuple_format *format, struct tuple *tuple) tuple_format_unref(format); struct memtx_tuple *memtx_tuple = container_of(tuple, struct memtx_tuple, base); - if (memtx_alloc.free_mode != SMALL_DELAYED_FREE || - memtx_tuple->version == snapshot_version) - smfree(&memtx_alloc, memtx_tuple, total); + if (memtx->alloc.free_mode != SMALL_DELAYED_FREE || + memtx_tuple->version == memtx->snapshot_version) + smfree(&memtx->alloc, memtx_tuple, total); else - smfree_delayed(&memtx_alloc, memtx_tuple, total); + smfree_delayed(&memtx->alloc, memtx_tuple, total); } struct tuple_format_vtab memtx_tuple_format_vtab = { @@ -1145,13 +1109,11 @@ void * memtx_index_extent_alloc(void *ctx) { struct memtx_engine *memtx = (struct memtx_engine *)ctx; - (void)memtx; - if (memtx_index_reserved_extents) { - assert(memtx_index_num_reserved_extents > 0); - memtx_index_num_reserved_extents--; - void *result = memtx_index_reserved_extents; - memtx_index_reserved_extents = *(void **) - memtx_index_reserved_extents; + if (memtx->reserved_extents) { + assert(memtx->num_reserved_extents > 0); + memtx->num_reserved_extents--; + void *result = memtx->reserved_extents; + memtx->reserved_extents = *(void **)memtx->reserved_extents; return result; } ERROR_INJECT(ERRINJ_INDEX_ALLOC, { @@ -1160,7 +1122,7 @@ memtx_index_extent_alloc(void *ctx) "mempool", "new slab"); return NULL; }); - void *ret = mempool_alloc(&memtx_index_extent_pool); + void *ret = mempool_alloc(&memtx->index_extent_pool); if (ret == NULL) diag_set(OutOfMemory, MEMTX_EXTENT_SIZE, "mempool", "new slab"); @@ -1174,8 +1136,7 @@ void memtx_index_extent_free(void *ctx, void *extent) { struct memtx_engine *memtx = (struct memtx_engine *)ctx; - (void)memtx; - return mempool_free(&memtx_index_extent_pool, extent); + return mempool_free(&memtx->index_extent_pool, extent); } /** @@ -1185,23 +1146,22 @@ memtx_index_extent_free(void *ctx, void *extent) int memtx_index_extent_reserve(struct memtx_engine *memtx, int num) { - (void)memtx; ERROR_INJECT(ERRINJ_INDEX_ALLOC, { /* same error as in mempool_alloc */ diag_set(OutOfMemory, MEMTX_EXTENT_SIZE, "mempool", "new slab"); return -1; }); - while (memtx_index_num_reserved_extents < num) { - void *ext = mempool_alloc(&memtx_index_extent_pool); + while (memtx->num_reserved_extents < num) { + void *ext = mempool_alloc(&memtx->index_extent_pool); if (ext == NULL) { diag_set(OutOfMemory, MEMTX_EXTENT_SIZE, "mempool", "new slab"); return -1; } - *(void **)ext = memtx_index_reserved_extents; - memtx_index_reserved_extents = ext; - memtx_index_num_reserved_extents++; + *(void **)ext = memtx->reserved_extents; + memtx->reserved_extents = ext; + memtx->num_reserved_extents++; } return 0; } diff --git a/src/box/memtx_engine.h b/src/box/memtx_engine.h index 389314ba..0bcd24ac 100644 --- a/src/box/memtx_engine.h +++ b/src/box/memtx_engine.h @@ -33,6 +33,8 @@ #include <stdbool.h> #include <stddef.h> #include <stdint.h> +#include <small/quota.h> +#include <small/small.h> #include <small/mempool.h> #include "engine.h" @@ -97,6 +99,36 @@ struct memtx_engine { uint64_t snap_io_rate_limit; /** Skip invalid snapshot records if this flag is set. */ bool force_recovery; + /** Common quota for tuples and indexes. */ + struct quota quota; + /** + * Common slab arena for tuples and indexes. + * If you decide to use it for anything other than + * tuple_alloc or index_extent_pool, make sure this + * is reflected in box.slab.info(), @sa lua/slab.c. + */ + struct slab_arena arena; + /** Slab cache for allocating tuples. */ + struct slab_cache slab_cache; + /** Tuple allocator. */ + struct small_alloc alloc; + /** Slab cache for allocating index extents. */ + struct slab_cache index_slab_cache; + /** Index extent allocator. */ + struct mempool index_extent_pool; + /** + * To ensure proper statement-level rollback in case + * of out of memory conditions, we maintain a number + * of slack memory extents reserved before a statement + * is begun. If there isn't enough slack memory, + * we don't begin the statement. + */ + int num_reserved_extents; + void *reserved_extents; + /** Maximal allowed tuple size, box.cfg.memtx_max_tuple_size. */ + size_t max_tuple_size; + /** Incremented with each next snapshot. */ + uint32_t snapshot_version; /** Memory pool for tree index iterator. */ struct mempool tree_iterator_pool; /** Memory pool for rtree index iterator. */ -- 2.11.0
next prev parent reply other threads:[~2018-05-22 11:46 UTC|newest] Thread overview: 21+ messages / expand[flat|nested] mbox.gz Atom feed top 2018-05-22 11:46 [PATCH 0/8] Follow-up on async memtx index cleanup Vladimir Davydov 2018-05-22 11:46 ` [PATCH 1/8] memtx: init index extent allocator in engine constructor Vladimir Davydov 2018-05-22 13:43 ` Konstantin Osipov 2018-05-22 11:46 ` [PATCH 2/8] memtx: fold memtx_tuple.cc into memtx_engine.c Vladimir Davydov 2018-05-22 13:45 ` Konstantin Osipov 2018-05-22 11:46 ` [PATCH 3/8] memtx: pass engine to memory allocation functions Vladimir Davydov 2018-05-22 13:47 ` Konstantin Osipov 2018-05-22 14:39 ` Vladimir Davydov 2018-05-22 11:46 ` Vladimir Davydov [this message] 2018-05-22 13:48 ` [PATCH 4/8] memtx: move all global variables to engine Konstantin Osipov 2018-05-22 11:46 ` [PATCH 5/8] memtx: destroy slab arena on engine shutdown Vladimir Davydov 2018-05-22 13:50 ` Konstantin Osipov 2018-05-22 16:26 ` Vladimir Davydov 2018-05-22 11:46 ` [PATCH 6/8] memtx: embed light hash into memtx_hash_index Vladimir Davydov 2018-05-22 13:51 ` Konstantin Osipov 2018-05-22 11:46 ` [PATCH 7/8] memtx: rework background garbage collection procedure Vladimir Davydov 2018-05-22 13:56 ` Konstantin Osipov 2018-05-22 14:49 ` Vladimir Davydov 2018-05-22 16:42 ` Konstantin Osipov 2018-05-22 11:46 ` [PATCH 8/8] memtx: run garbage collection on demand Vladimir Davydov 2018-05-22 14:00 ` Konstantin Osipov
Reply instructions: You may reply publicly to this message via plain-text email using any one of the following methods: * Save the following mbox file, import it into your mail client, and reply-to-all from there: mbox Avoid top-posting and favor interleaved quoting: https://en.wikipedia.org/wiki/Posting_style#Interleaved_style * Reply using the --to, --cc, and --in-reply-to switches of git-send-email(1): git send-email \ --in-reply-to=d8a5500aad0ee235c0351c6e63477c39c77f46c4.1526987033.git.vdavydov.dev@gmail.com \ --to=vdavydov.dev@gmail.com \ --cc=kostja@tarantool.org \ --cc=tarantool-patches@freelists.org \ --subject='Re: [PATCH 4/8] memtx: move all global variables to engine' \ /path/to/YOUR_REPLY https://kernel.org/pub/software/scm/git/docs/git-send-email.html * If your mail client supports setting the In-Reply-To header via mailto: links, try the mailto: link
This is a public inbox, see mirroring instructions for how to clone and mirror all data and code used for this inbox