[PATCH] box/memtx: Allow to skip tuple memory from coredump
Vladimir Davydov
vdavydov.dev at gmail.com
Mon May 13 13:21:21 MSK 2019
[Cc += Alexander]
I don't quite like the name: memtx_memory_dontdump. We might want to
extend it to work on vinyl memory as well one day - I don't think we'd
want to introduce another cfg parameter for that. What about strip_core?
Alexander, what do you think about the new configuration parameter name?
Do you have a better name in mind?
Cyrill, please see a few comments inline.
On Tue, May 07, 2019 at 08:26:11PM +0300, Cyrill Gorcunov wrote:
> In case if there are huge amount of tuples the whole
> memory goes to coredump file even if we don't need it
> for problem investigation. In result coredump may
> blow up to gigabytes in size.
>
> Lets allow to exclude this memory from dumping via
> box.cfg::memtx_memory_dontdump boolean parameter.
>
> Note that the tuple's arena is used not only for tuples
> themselves but for memtx->index_extent_pool and
> memtx->iterator_pool as well, so they are affected
> too.
>
> Fixes #3509
Please write a doc bot request in the end of the commit message
(after Fixes ...). Take a look at this for a reference:
https://github.com/tarantool/tarantool/commit/22db9c264c9119d19a3130e6ad8e7fded0e16c6d
> ---
> https://github.com/tarantool/tarantool/issues/3509
>
> Note this patch depends on small engine extension I've sent
> earlier which is not yet merged thus for review only.
>
> src/box/box.cc | 5 +++++
> src/box/lua/load_cfg.lua | 2 ++
> src/box/memtx_engine.c | 4 ++--
> src/box/memtx_engine.h | 9 ++++++---
> src/box/tuple.c | 4 ++--
> src/box/tuple.h | 2 +-
> src/box/vy_mem.c | 2 +-
> 7 files changed, 19 insertions(+), 9 deletions(-)
Please run all tests. I think box/cfg will break after this.
>
> diff --git a/src/box/box.cc b/src/box/box.cc
> index 7828f575b..e66b5200c 100644
> --- a/src/box/box.cc
> +++ b/src/box/box.cc
> @@ -1670,6 +1670,10 @@ box_free(void)
> static void
> engine_init()
> {
> + uint32_t slab_flags = SLAB_ARENA_PRIVATE;
> + if (cfg_geti("memtx_memory_dontdump"))
> + slab_flags |= SLAB_ARENA_DONTDUMP;
> +
Interpreting dontdump option here and converting it to slab_flags kinda
breaks encapsulation. I'd pass cfg parameter to memtx_engine_new and let
it interpret it.
> /*
> * Sic: order is important here, since
> * memtx must be the first to participate
> @@ -1681,6 +1685,7 @@ engine_init()
> cfg_geti("force_recovery"),
> cfg_getd("memtx_memory"),
> cfg_geti("memtx_min_tuple_size"),
> + slab_flags,
> cfg_getd("slab_alloc_factor"));
> engine_register((struct engine *)memtx);
> box_set_memtx_max_tuple_size();
> diff --git a/src/box/lua/load_cfg.lua b/src/box/lua/load_cfg.lua
> index 5530b2caa..8d4073539 100644
> --- a/src/box/lua/load_cfg.lua
> +++ b/src/box/lua/load_cfg.lua
> @@ -25,6 +25,7 @@ end
> local default_cfg = {
> listen = nil,
> memtx_memory = 256 * 1024 *1024,
> + memtx_memory_dontdump = false,
> memtx_min_tuple_size = 16,
> memtx_max_tuple_size = 1024 * 1024,
> slab_alloc_factor = 1.05,
> @@ -88,6 +89,7 @@ local default_cfg = {
> local template_cfg = {
> listen = 'string, number',
> memtx_memory = 'number',
> + memtx_memory_dontdump = 'boolean',
> memtx_min_tuple_size = 'number',
> memtx_max_tuple_size = 'number',
> slab_alloc_factor = 'number',
> diff --git a/src/box/memtx_engine.c b/src/box/memtx_engine.c
> index 210f3c22c..ff70cd98f 100644
> --- a/src/box/memtx_engine.c
> +++ b/src/box/memtx_engine.c
> @@ -1011,7 +1011,7 @@ memtx_engine_gc_f(va_list va)
> struct memtx_engine *
> memtx_engine_new(const char *snap_dirname, bool force_recovery,
> uint64_t tuple_arena_max_size, uint32_t objsize_min,
> - float alloc_factor)
> + uint32_t slab_flags, float alloc_factor)
> {
> struct memtx_engine *memtx = calloc(1, sizeof(*memtx));
> if (memtx == NULL) {
> @@ -1066,7 +1066,7 @@ memtx_engine_new(const char *snap_dirname, bool force_recovery,
> /* Initialize tuple allocator. */
> quota_init(&memtx->quota, tuple_arena_max_size);
> tuple_arena_create(&memtx->arena, &memtx->quota, tuple_arena_max_size,
> - SLAB_SIZE, "memtx");
> + SLAB_SIZE, slab_flags, "memtx");
> slab_cache_create(&memtx->slab_cache, &memtx->arena);
> small_alloc_create(&memtx->alloc, &memtx->slab_cache,
> objsize_min, alloc_factor);
> diff --git a/src/box/memtx_engine.h b/src/box/memtx_engine.h
> index 8f4ce7cdd..0546924a4 100644
> --- a/src/box/memtx_engine.h
> +++ b/src/box/memtx_engine.h
> @@ -189,7 +189,8 @@ memtx_engine_schedule_gc(struct memtx_engine *memtx,
> struct memtx_engine *
> memtx_engine_new(const char *snap_dirname, bool force_recovery,
> uint64_t tuple_arena_max_size,
> - uint32_t objsize_min, float alloc_factor);
> + uint32_t objsize_min, uint32_t slab_flags,
> + float alloc_factor);
>
> int
> memtx_engine_recover_snapshot(struct memtx_engine *memtx,
> @@ -257,12 +258,14 @@ memtx_index_def_change_requires_rebuild(struct index *index,
> static inline struct memtx_engine *
> memtx_engine_new_xc(const char *snap_dirname, bool force_recovery,
> uint64_t tuple_arena_max_size,
> - uint32_t objsize_min, float alloc_factor)
> + uint32_t objsize_min, uint32_t slab_flags,
> + float alloc_factor)
> {
> struct memtx_engine *memtx;
> memtx = memtx_engine_new(snap_dirname, force_recovery,
> tuple_arena_max_size,
> - objsize_min, alloc_factor);
> + objsize_min, slab_flags,
> + alloc_factor);
> if (memtx == NULL)
> diag_raise();
> return memtx;
> diff --git a/src/box/tuple.c b/src/box/tuple.c
> index c325f58c9..b2640dc83 100644
> --- a/src/box/tuple.c
> +++ b/src/box/tuple.c
> @@ -324,7 +324,7 @@ tuple_init(field_name_hash_f hash)
> void
> tuple_arena_create(struct slab_arena *arena, struct quota *quota,
> uint64_t arena_max_size, uint32_t slab_size,
> - const char *arena_name)
> + uint32_t slab_flags, const char *arena_name)
> {
> /*
> * Ensure that quota is a multiple of slab_size, to
> @@ -336,7 +336,7 @@ tuple_arena_create(struct slab_arena *arena, struct quota *quota,
> arena_name);
>
> if (slab_arena_create(arena, quota, prealloc, slab_size,
> - MAP_PRIVATE) != 0) {
> + slab_flags) != 0) {
> if (errno == ENOMEM) {
> panic("failed to preallocate %zu bytes: Cannot "\
> "allocate memory, check option '%s_memory' in box.cfg(..)", prealloc,
> diff --git a/src/box/tuple.h b/src/box/tuple.h
> index d261d4cc9..ec579e95e 100644
> --- a/src/box/tuple.h
> +++ b/src/box/tuple.h
> @@ -69,7 +69,7 @@ tuple_free(void);
> void
> tuple_arena_create(struct slab_arena *arena, struct quota *quota,
> uint64_t arena_max_size, uint32_t slab_size,
> - const char *arena_name);
> + uint32_t slab_flags, const char *arena_name);
>
> void
> tuple_arena_destroy(struct slab_arena *arena);
> diff --git a/src/box/vy_mem.c b/src/box/vy_mem.c
> index a4fae26e2..de893858a 100644
> --- a/src/box/vy_mem.c
> +++ b/src/box/vy_mem.c
> @@ -54,7 +54,7 @@ vy_mem_env_create(struct vy_mem_env *env, size_t memory)
> /* Vinyl memory is limited by vy_quota. */
> quota_init(&env->quota, QUOTA_MAX);
> tuple_arena_create(&env->arena, &env->quota, memory,
> - SLAB_SIZE, "vinyl");
> + SLAB_SIZE, SLAB_ARENA_PRIVATE, "vinyl");
I think it's safe to skip dumping this arena, too.
> lsregion_create(&env->allocator, &env->arena);
> env->tree_extent_size = 0;
> }
> --
> 2.20.1
>
More information about the Tarantool-patches
mailing list