* [PATCH] box/memtx: Allow to skip tuple memory from coredump
@ 2019-05-07 17:26 Cyrill Gorcunov
2019-05-13 10:21 ` Vladimir Davydov
0 siblings, 1 reply; 4+ messages in thread
From: Cyrill Gorcunov @ 2019-05-07 17:26 UTC (permalink / raw)
To: tml; +Cc: Vladimir Davydov, Cyrill Gorcunov
In case if there are huge amount of tuples the whole
memory goes to coredump file even if we don't need it
for problem investigation. In result coredump may
blow up to gigabytes in size.
Lets allow to exclude this memory from dumping via
box.cfg::memtx_memory_dontdump boolean parameter.
Note that the tuple's arena is used not only for tuples
themselves but for memtx->index_extent_pool and
memtx->iterator_pool as well, so they are affected
too.
Fixes #3509
---
https://github.com/tarantool/tarantool/issues/3509
Note this patch depends on small engine extension I've sent
earlier which is not yet merged thus for review only.
src/box/box.cc | 5 +++++
src/box/lua/load_cfg.lua | 2 ++
src/box/memtx_engine.c | 4 ++--
src/box/memtx_engine.h | 9 ++++++---
src/box/tuple.c | 4 ++--
src/box/tuple.h | 2 +-
src/box/vy_mem.c | 2 +-
7 files changed, 19 insertions(+), 9 deletions(-)
diff --git a/src/box/box.cc b/src/box/box.cc
index 7828f575b..e66b5200c 100644
--- a/src/box/box.cc
+++ b/src/box/box.cc
@@ -1670,6 +1670,10 @@ box_free(void)
static void
engine_init()
{
+ uint32_t slab_flags = SLAB_ARENA_PRIVATE;
+ if (cfg_geti("memtx_memory_dontdump"))
+ slab_flags |= SLAB_ARENA_DONTDUMP;
+
/*
* Sic: order is important here, since
* memtx must be the first to participate
@@ -1681,6 +1685,7 @@ engine_init()
cfg_geti("force_recovery"),
cfg_getd("memtx_memory"),
cfg_geti("memtx_min_tuple_size"),
+ slab_flags,
cfg_getd("slab_alloc_factor"));
engine_register((struct engine *)memtx);
box_set_memtx_max_tuple_size();
diff --git a/src/box/lua/load_cfg.lua b/src/box/lua/load_cfg.lua
index 5530b2caa..8d4073539 100644
--- a/src/box/lua/load_cfg.lua
+++ b/src/box/lua/load_cfg.lua
@@ -25,6 +25,7 @@ end
local default_cfg = {
listen = nil,
memtx_memory = 256 * 1024 *1024,
+ memtx_memory_dontdump = false,
memtx_min_tuple_size = 16,
memtx_max_tuple_size = 1024 * 1024,
slab_alloc_factor = 1.05,
@@ -88,6 +89,7 @@ local default_cfg = {
local template_cfg = {
listen = 'string, number',
memtx_memory = 'number',
+ memtx_memory_dontdump = 'boolean',
memtx_min_tuple_size = 'number',
memtx_max_tuple_size = 'number',
slab_alloc_factor = 'number',
diff --git a/src/box/memtx_engine.c b/src/box/memtx_engine.c
index 210f3c22c..ff70cd98f 100644
--- a/src/box/memtx_engine.c
+++ b/src/box/memtx_engine.c
@@ -1011,7 +1011,7 @@ memtx_engine_gc_f(va_list va)
struct memtx_engine *
memtx_engine_new(const char *snap_dirname, bool force_recovery,
uint64_t tuple_arena_max_size, uint32_t objsize_min,
- float alloc_factor)
+ uint32_t slab_flags, float alloc_factor)
{
struct memtx_engine *memtx = calloc(1, sizeof(*memtx));
if (memtx == NULL) {
@@ -1066,7 +1066,7 @@ memtx_engine_new(const char *snap_dirname, bool force_recovery,
/* Initialize tuple allocator. */
quota_init(&memtx->quota, tuple_arena_max_size);
tuple_arena_create(&memtx->arena, &memtx->quota, tuple_arena_max_size,
- SLAB_SIZE, "memtx");
+ SLAB_SIZE, slab_flags, "memtx");
slab_cache_create(&memtx->slab_cache, &memtx->arena);
small_alloc_create(&memtx->alloc, &memtx->slab_cache,
objsize_min, alloc_factor);
diff --git a/src/box/memtx_engine.h b/src/box/memtx_engine.h
index 8f4ce7cdd..0546924a4 100644
--- a/src/box/memtx_engine.h
+++ b/src/box/memtx_engine.h
@@ -189,7 +189,8 @@ memtx_engine_schedule_gc(struct memtx_engine *memtx,
struct memtx_engine *
memtx_engine_new(const char *snap_dirname, bool force_recovery,
uint64_t tuple_arena_max_size,
- uint32_t objsize_min, float alloc_factor);
+ uint32_t objsize_min, uint32_t slab_flags,
+ float alloc_factor);
int
memtx_engine_recover_snapshot(struct memtx_engine *memtx,
@@ -257,12 +258,14 @@ memtx_index_def_change_requires_rebuild(struct index *index,
static inline struct memtx_engine *
memtx_engine_new_xc(const char *snap_dirname, bool force_recovery,
uint64_t tuple_arena_max_size,
- uint32_t objsize_min, float alloc_factor)
+ uint32_t objsize_min, uint32_t slab_flags,
+ float alloc_factor)
{
struct memtx_engine *memtx;
memtx = memtx_engine_new(snap_dirname, force_recovery,
tuple_arena_max_size,
- objsize_min, alloc_factor);
+ objsize_min, slab_flags,
+ alloc_factor);
if (memtx == NULL)
diag_raise();
return memtx;
diff --git a/src/box/tuple.c b/src/box/tuple.c
index c325f58c9..b2640dc83 100644
--- a/src/box/tuple.c
+++ b/src/box/tuple.c
@@ -324,7 +324,7 @@ tuple_init(field_name_hash_f hash)
void
tuple_arena_create(struct slab_arena *arena, struct quota *quota,
uint64_t arena_max_size, uint32_t slab_size,
- const char *arena_name)
+ uint32_t slab_flags, const char *arena_name)
{
/*
* Ensure that quota is a multiple of slab_size, to
@@ -336,7 +336,7 @@ tuple_arena_create(struct slab_arena *arena, struct quota *quota,
arena_name);
if (slab_arena_create(arena, quota, prealloc, slab_size,
- MAP_PRIVATE) != 0) {
+ slab_flags) != 0) {
if (errno == ENOMEM) {
panic("failed to preallocate %zu bytes: Cannot "\
"allocate memory, check option '%s_memory' in box.cfg(..)", prealloc,
diff --git a/src/box/tuple.h b/src/box/tuple.h
index d261d4cc9..ec579e95e 100644
--- a/src/box/tuple.h
+++ b/src/box/tuple.h
@@ -69,7 +69,7 @@ tuple_free(void);
void
tuple_arena_create(struct slab_arena *arena, struct quota *quota,
uint64_t arena_max_size, uint32_t slab_size,
- const char *arena_name);
+ uint32_t slab_flags, const char *arena_name);
void
tuple_arena_destroy(struct slab_arena *arena);
diff --git a/src/box/vy_mem.c b/src/box/vy_mem.c
index a4fae26e2..de893858a 100644
--- a/src/box/vy_mem.c
+++ b/src/box/vy_mem.c
@@ -54,7 +54,7 @@ vy_mem_env_create(struct vy_mem_env *env, size_t memory)
/* Vinyl memory is limited by vy_quota. */
quota_init(&env->quota, QUOTA_MAX);
tuple_arena_create(&env->arena, &env->quota, memory,
- SLAB_SIZE, "vinyl");
+ SLAB_SIZE, SLAB_ARENA_PRIVATE, "vinyl");
lsregion_create(&env->allocator, &env->arena);
env->tree_extent_size = 0;
}
--
2.20.1
^ permalink raw reply [flat|nested] 4+ messages in thread
* Re: [PATCH] box/memtx: Allow to skip tuple memory from coredump
2019-05-07 17:26 [PATCH] box/memtx: Allow to skip tuple memory from coredump Cyrill Gorcunov
@ 2019-05-13 10:21 ` Vladimir Davydov
2019-05-13 10:33 ` Cyrill Gorcunov
2019-05-13 14:36 ` Alexander Turenko
0 siblings, 2 replies; 4+ messages in thread
From: Vladimir Davydov @ 2019-05-13 10:21 UTC (permalink / raw)
To: Cyrill Gorcunov; +Cc: tml, Alexander Turenko
[Cc += Alexander]
I don't quite like the name: memtx_memory_dontdump. We might want to
extend it to work on vinyl memory as well one day - I don't think we'd
want to introduce another cfg parameter for that. What about strip_core?
Alexander, what do you think about the new configuration parameter name?
Do you have a better name in mind?
Cyrill, please see a few comments inline.
On Tue, May 07, 2019 at 08:26:11PM +0300, Cyrill Gorcunov wrote:
> In case if there are huge amount of tuples the whole
> memory goes to coredump file even if we don't need it
> for problem investigation. In result coredump may
> blow up to gigabytes in size.
>
> Lets allow to exclude this memory from dumping via
> box.cfg::memtx_memory_dontdump boolean parameter.
>
> Note that the tuple's arena is used not only for tuples
> themselves but for memtx->index_extent_pool and
> memtx->iterator_pool as well, so they are affected
> too.
>
> Fixes #3509
Please write a doc bot request in the end of the commit message
(after Fixes ...). Take a look at this for a reference:
https://github.com/tarantool/tarantool/commit/22db9c264c9119d19a3130e6ad8e7fded0e16c6d
> ---
> https://github.com/tarantool/tarantool/issues/3509
>
> Note this patch depends on small engine extension I've sent
> earlier which is not yet merged thus for review only.
>
> src/box/box.cc | 5 +++++
> src/box/lua/load_cfg.lua | 2 ++
> src/box/memtx_engine.c | 4 ++--
> src/box/memtx_engine.h | 9 ++++++---
> src/box/tuple.c | 4 ++--
> src/box/tuple.h | 2 +-
> src/box/vy_mem.c | 2 +-
> 7 files changed, 19 insertions(+), 9 deletions(-)
Please run all tests. I think box/cfg will break after this.
>
> diff --git a/src/box/box.cc b/src/box/box.cc
> index 7828f575b..e66b5200c 100644
> --- a/src/box/box.cc
> +++ b/src/box/box.cc
> @@ -1670,6 +1670,10 @@ box_free(void)
> static void
> engine_init()
> {
> + uint32_t slab_flags = SLAB_ARENA_PRIVATE;
> + if (cfg_geti("memtx_memory_dontdump"))
> + slab_flags |= SLAB_ARENA_DONTDUMP;
> +
Interpreting dontdump option here and converting it to slab_flags kinda
breaks encapsulation. I'd pass cfg parameter to memtx_engine_new and let
it interpret it.
> /*
> * Sic: order is important here, since
> * memtx must be the first to participate
> @@ -1681,6 +1685,7 @@ engine_init()
> cfg_geti("force_recovery"),
> cfg_getd("memtx_memory"),
> cfg_geti("memtx_min_tuple_size"),
> + slab_flags,
> cfg_getd("slab_alloc_factor"));
> engine_register((struct engine *)memtx);
> box_set_memtx_max_tuple_size();
> diff --git a/src/box/lua/load_cfg.lua b/src/box/lua/load_cfg.lua
> index 5530b2caa..8d4073539 100644
> --- a/src/box/lua/load_cfg.lua
> +++ b/src/box/lua/load_cfg.lua
> @@ -25,6 +25,7 @@ end
> local default_cfg = {
> listen = nil,
> memtx_memory = 256 * 1024 *1024,
> + memtx_memory_dontdump = false,
> memtx_min_tuple_size = 16,
> memtx_max_tuple_size = 1024 * 1024,
> slab_alloc_factor = 1.05,
> @@ -88,6 +89,7 @@ local default_cfg = {
> local template_cfg = {
> listen = 'string, number',
> memtx_memory = 'number',
> + memtx_memory_dontdump = 'boolean',
> memtx_min_tuple_size = 'number',
> memtx_max_tuple_size = 'number',
> slab_alloc_factor = 'number',
> diff --git a/src/box/memtx_engine.c b/src/box/memtx_engine.c
> index 210f3c22c..ff70cd98f 100644
> --- a/src/box/memtx_engine.c
> +++ b/src/box/memtx_engine.c
> @@ -1011,7 +1011,7 @@ memtx_engine_gc_f(va_list va)
> struct memtx_engine *
> memtx_engine_new(const char *snap_dirname, bool force_recovery,
> uint64_t tuple_arena_max_size, uint32_t objsize_min,
> - float alloc_factor)
> + uint32_t slab_flags, float alloc_factor)
> {
> struct memtx_engine *memtx = calloc(1, sizeof(*memtx));
> if (memtx == NULL) {
> @@ -1066,7 +1066,7 @@ memtx_engine_new(const char *snap_dirname, bool force_recovery,
> /* Initialize tuple allocator. */
> quota_init(&memtx->quota, tuple_arena_max_size);
> tuple_arena_create(&memtx->arena, &memtx->quota, tuple_arena_max_size,
> - SLAB_SIZE, "memtx");
> + SLAB_SIZE, slab_flags, "memtx");
> slab_cache_create(&memtx->slab_cache, &memtx->arena);
> small_alloc_create(&memtx->alloc, &memtx->slab_cache,
> objsize_min, alloc_factor);
> diff --git a/src/box/memtx_engine.h b/src/box/memtx_engine.h
> index 8f4ce7cdd..0546924a4 100644
> --- a/src/box/memtx_engine.h
> +++ b/src/box/memtx_engine.h
> @@ -189,7 +189,8 @@ memtx_engine_schedule_gc(struct memtx_engine *memtx,
> struct memtx_engine *
> memtx_engine_new(const char *snap_dirname, bool force_recovery,
> uint64_t tuple_arena_max_size,
> - uint32_t objsize_min, float alloc_factor);
> + uint32_t objsize_min, uint32_t slab_flags,
> + float alloc_factor);
>
> int
> memtx_engine_recover_snapshot(struct memtx_engine *memtx,
> @@ -257,12 +258,14 @@ memtx_index_def_change_requires_rebuild(struct index *index,
> static inline struct memtx_engine *
> memtx_engine_new_xc(const char *snap_dirname, bool force_recovery,
> uint64_t tuple_arena_max_size,
> - uint32_t objsize_min, float alloc_factor)
> + uint32_t objsize_min, uint32_t slab_flags,
> + float alloc_factor)
> {
> struct memtx_engine *memtx;
> memtx = memtx_engine_new(snap_dirname, force_recovery,
> tuple_arena_max_size,
> - objsize_min, alloc_factor);
> + objsize_min, slab_flags,
> + alloc_factor);
> if (memtx == NULL)
> diag_raise();
> return memtx;
> diff --git a/src/box/tuple.c b/src/box/tuple.c
> index c325f58c9..b2640dc83 100644
> --- a/src/box/tuple.c
> +++ b/src/box/tuple.c
> @@ -324,7 +324,7 @@ tuple_init(field_name_hash_f hash)
> void
> tuple_arena_create(struct slab_arena *arena, struct quota *quota,
> uint64_t arena_max_size, uint32_t slab_size,
> - const char *arena_name)
> + uint32_t slab_flags, const char *arena_name)
> {
> /*
> * Ensure that quota is a multiple of slab_size, to
> @@ -336,7 +336,7 @@ tuple_arena_create(struct slab_arena *arena, struct quota *quota,
> arena_name);
>
> if (slab_arena_create(arena, quota, prealloc, slab_size,
> - MAP_PRIVATE) != 0) {
> + slab_flags) != 0) {
> if (errno == ENOMEM) {
> panic("failed to preallocate %zu bytes: Cannot "\
> "allocate memory, check option '%s_memory' in box.cfg(..)", prealloc,
> diff --git a/src/box/tuple.h b/src/box/tuple.h
> index d261d4cc9..ec579e95e 100644
> --- a/src/box/tuple.h
> +++ b/src/box/tuple.h
> @@ -69,7 +69,7 @@ tuple_free(void);
> void
> tuple_arena_create(struct slab_arena *arena, struct quota *quota,
> uint64_t arena_max_size, uint32_t slab_size,
> - const char *arena_name);
> + uint32_t slab_flags, const char *arena_name);
>
> void
> tuple_arena_destroy(struct slab_arena *arena);
> diff --git a/src/box/vy_mem.c b/src/box/vy_mem.c
> index a4fae26e2..de893858a 100644
> --- a/src/box/vy_mem.c
> +++ b/src/box/vy_mem.c
> @@ -54,7 +54,7 @@ vy_mem_env_create(struct vy_mem_env *env, size_t memory)
> /* Vinyl memory is limited by vy_quota. */
> quota_init(&env->quota, QUOTA_MAX);
> tuple_arena_create(&env->arena, &env->quota, memory,
> - SLAB_SIZE, "vinyl");
> + SLAB_SIZE, SLAB_ARENA_PRIVATE, "vinyl");
I think it's safe to skip dumping this arena, too.
> lsregion_create(&env->allocator, &env->arena);
> env->tree_extent_size = 0;
> }
> --
> 2.20.1
>
^ permalink raw reply [flat|nested] 4+ messages in thread
* Re: [PATCH] box/memtx: Allow to skip tuple memory from coredump
2019-05-13 10:21 ` Vladimir Davydov
@ 2019-05-13 10:33 ` Cyrill Gorcunov
2019-05-13 14:36 ` Alexander Turenko
1 sibling, 0 replies; 4+ messages in thread
From: Cyrill Gorcunov @ 2019-05-13 10:33 UTC (permalink / raw)
To: Vladimir Davydov; +Cc: tml, Alexander Turenko
On Mon, May 13, 2019 at 01:21:21PM +0300, Vladimir Davydov wrote:
> [Cc += Alexander]
>
> I don't quite like the name: memtx_memory_dontdump. We might want to
> extend it to work on vinyl memory as well one day - I don't think we'd
> want to introduce another cfg parameter for that. What about strip_core?
I don't mind.
>
> Alexander, what do you think about the new configuration parameter name?
> Do you have a better name in mind?
>
> Cyrill, please see a few comments inline.
>
>
> Please write a doc bot request in the end of the commit message
> (after Fixes ...). Take a look at this for a reference:
>
> https://github.com/tarantool/tarantool/commit/22db9c264c9119d19a3130e6ad8e7fded0e16c6d
Thanks, will do.
> > 7 files changed, 19 insertions(+), 9 deletions(-)
>
> Please run all tests. I think box/cfg will break after this.
Will take a look.
> > static void
> > engine_init()
> > {
> > + uint32_t slab_flags = SLAB_ARENA_PRIVATE;
> > + if (cfg_geti("memtx_memory_dontdump"))
> > + slab_flags |= SLAB_ARENA_DONTDUMP;
> > +
>
> Interpreting dontdump option here and converting it to slab_flags kinda
> breaks encapsulation. I'd pass cfg parameter to memtx_engine_new and let
> it interpret it.
ok
> > @@ -54,7 +54,7 @@ vy_mem_env_create(struct vy_mem_env *env, size_t memory)
> > /* Vinyl memory is limited by vy_quota. */
> > quota_init(&env->quota, QUOTA_MAX);
> > tuple_arena_create(&env->arena, &env->quota, memory,
> > - SLAB_SIZE, "vinyl");
> > + SLAB_SIZE, SLAB_ARENA_PRIVATE, "vinyl");
>
> I think it's safe to skip dumping this arena, too.
ok
Cyrill
^ permalink raw reply [flat|nested] 4+ messages in thread
* Re: [PATCH] box/memtx: Allow to skip tuple memory from coredump
2019-05-13 10:21 ` Vladimir Davydov
2019-05-13 10:33 ` Cyrill Gorcunov
@ 2019-05-13 14:36 ` Alexander Turenko
1 sibling, 0 replies; 4+ messages in thread
From: Alexander Turenko @ 2019-05-13 14:36 UTC (permalink / raw)
To: Vladimir Davydov; +Cc: Cyrill Gorcunov, tml
> I don't quite like the name: memtx_memory_dontdump. We might want to
> extend it to work on vinyl memory as well one day - I don't think we'd
> want to introduce another cfg parameter for that. What about strip_core?
>
> Alexander, what do you think about the new configuration parameter name?
> Do you have a better name in mind?
I think strip_core is okay and as you pointed will allow us to extend
this feature in the future.
^ permalink raw reply [flat|nested] 4+ messages in thread
end of thread, other threads:[~2019-05-13 14:36 UTC | newest]
Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-05-07 17:26 [PATCH] box/memtx: Allow to skip tuple memory from coredump Cyrill Gorcunov
2019-05-13 10:21 ` Vladimir Davydov
2019-05-13 10:33 ` Cyrill Gorcunov
2019-05-13 14:36 ` Alexander Turenko
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox