From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from smtpng3.m.smailru.net (smtpng3.m.smailru.net [94.100.177.149]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by dev.tarantool.org (Postfix) with ESMTPS id C689F4696C6 for ; Fri, 14 Feb 2020 22:40:22 +0300 (MSK) From: Ilya Kosarev Date: Fri, 14 Feb 2020 22:40:17 +0300 Message-Id: <20200214194017.1800-1-i.kosarev@tarantool.org> Subject: [Tarantool-patches] [PATCH] arena: reserve memory for truncation List-Id: Tarantool development patches List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , To: tarantool-patches@dev.tarantool.org Trying to perform space:truncate() while reaching memtx_memory limit we could experience slab allocator failure. This behavior seems to be quite surprising for users. Now we are preallocating specific slab on arena at the start and then use it for truncation tuples in case we are running out of limit. --- Not aimed to be pushed to master! Branch: https://github.com/tarantool/small/tree/i.kosarev/gh-3807-truncation-reserve Issue: https://github.com/tarantool/tarantool/issues/3807 small/mempool.c | 3 +++ small/slab_arena.c | 24 +++++++++++++++++++++++- small/slab_arena.h | 14 ++++++++++++++ test/lsregion.c | 4 ++++ test/obuf.c | 2 ++ test/slab_arena.c | 5 +++++ test/slab_cache.c | 2 +- 7 files changed, 52 insertions(+), 2 deletions(-) diff --git a/small/mempool.c b/small/mempool.c index b20a416..f1c810b 100644 --- a/small/mempool.c +++ b/small/mempool.c @@ -187,6 +187,8 @@ mempool_alloc(struct mempool *pool) slab_get_with_order(pool->cache, pool->slab_order))) { mslab_create(slab, pool); + if (pool->cache->arena->truncating) + goto end; slab_list_add(&pool->slabs, &slab->slab, next_in_list); } else if (! rlist_empty(&pool->cold_slabs)) { slab = rlist_shift_entry(&pool->cold_slabs, struct mslab, @@ -199,6 +201,7 @@ mempool_alloc(struct mempool *pool) slab->in_hot_slabs = true; pool->first_hot_slab = slab; } +end: pool->slabs.stats.used += pool->objsize; void *ptr = mslab_alloc(pool, slab); assert(ptr != NULL); diff --git a/small/slab_arena.c b/small/slab_arena.c index 7661ef8..e54d9b9 100644 --- a/small/slab_arena.c +++ b/small/slab_arena.c @@ -210,6 +210,18 @@ slab_arena_create(struct slab_arena *arena, struct quota *quota, madvise_checked(arena->arena, arena->prealloc, arena->flags); + arena->truncating = false; + arena->trunc_alloc = small_align(arena->trunc_alloc, arena->slab_size); + if (arena->trunc_alloc) { + arena->trunc_reserve = mmap_checked(arena->trunc_alloc, + arena->slab_size, + arena->flags); + } else { + arena->trunc_reserve = NULL; + } + + madvise_checked(arena->trunc_reserve, arena->trunc_alloc, arena->flags); + return arena->prealloc && !arena->arena ? -1 : 0; } @@ -240,8 +252,18 @@ slab_map(struct slab_arena *arena) return ptr; } - if (quota_use(arena->quota, arena->slab_size) < 0) + if (quota_use(arena->quota, arena->slab_size) < 0) { + if (arena->truncating) { + ptr = arena->trunc_reserve; + if (arena->trunc_alloc) { + pm_atomic_fetch_sub(&arena->trunc_alloc, arena->slab_size); + pm_atomic_fetch_add(&arena->used, arena->slab_size); + VALGRIND_MAKE_MEM_UNDEFINED(ptr, arena->slab_size); + } + return ptr; + } return NULL; + } /** Need to allocate a new slab. */ size_t used = pm_atomic_fetch_add(&arena->used, arena->slab_size); diff --git a/small/slab_arena.h b/small/slab_arena.h index e4b1ae7..23a5732 100644 --- a/small/slab_arena.h +++ b/small/slab_arena.h @@ -86,11 +86,25 @@ struct slab_arena { struct lf_lifo cache; /** A preallocated arena of size = prealloc. */ void *arena; + /** + * A preallocated block for truncation tuples of size + * trunc_alloc. + */ + void *trunc_reserve; /** * How much memory is preallocated during initialization * of slab_arena. */ size_t prealloc; + /** + * How much memory is reserved for truncation tuples. + */ + size_t trunc_alloc; + /** + * True if we are inserting truncation tuple. + * Otherwise false. + */ + bool truncating; /** * How much memory in the arena has * already been initialized for slabs. diff --git a/test/lsregion.c b/test/lsregion.c index 1ed6a3a..300b181 100644 --- a/test/lsregion.c +++ b/test/lsregion.c @@ -30,6 +30,7 @@ test_basic() struct slab_arena arena; struct lsregion allocator; quota_init("a, 4 * SLAB_MIN_SIZE); + arena.trunc_alloc = 0; is(slab_arena_create(&arena, "a, 0, 1024, MAP_PRIVATE), 0, "init"); lsregion_create(&allocator, &arena); @@ -182,6 +183,7 @@ test_many_allocs_one_slab() struct slab_arena arena; struct lsregion allocator; quota_init("a, 4 * SLAB_MIN_SIZE); + arena.trunc_alloc = 0; is(slab_arena_create(&arena, "a, 0, 0, MAP_PRIVATE), 0, "init"); lsregion_create(&allocator, &arena); @@ -235,6 +237,7 @@ test_many_allocs_many_slabs() struct slab_arena arena; struct lsregion allocator; quota_init("a, 4 * SLAB_MIN_SIZE); + arena.trunc_alloc = 0; is(slab_arena_create(&arena, "a, 0, 0, MAP_PRIVATE), 0, "init"); lsregion_create(&allocator, &arena); @@ -314,6 +317,7 @@ test_big_data_small_slabs() struct slab_arena arena; struct lsregion allocator; quota_init("a, 16 * SLAB_MIN_SIZE); + arena.trunc_alloc = 0; is(slab_arena_create(&arena, "a, 0, 0, MAP_PRIVATE), 0, "init"); lsregion_create(&allocator, &arena); diff --git a/test/obuf.c b/test/obuf.c index 13754b4..1affaf9 100644 --- a/test/obuf.c +++ b/test/obuf.c @@ -71,6 +71,8 @@ int main() quota_init("a, UINT_MAX); + arena.trunc_alloc = 0; + slab_arena_create(&arena, "a, 0, 4000000, MAP_PRIVATE); slab_cache_create(&cache, &arena); diff --git a/test/slab_arena.c b/test/slab_arena.c index 7a7c243..4661b81 100644 --- a/test/slab_arena.c +++ b/test/slab_arena.c @@ -97,6 +97,7 @@ slab_test_madvise(void) * preallocated area has been madvised. */ quota_init("a, 2000000); + arena.trunc_alloc = 0; slab_arena_create(&arena, "a, 3000000, 1, SLAB_ARENA_PRIVATE | SLAB_ARENA_DONTDUMP); @@ -119,6 +120,7 @@ slab_test_madvise(void) * A new slab for dynamic allocation. */ quota_init("a, 2000000); + arena.trunc_alloc = 0; slab_arena_create(&arena, "a, 0, 0x10000, SLAB_ARENA_PRIVATE | SLAB_ARENA_DONTDUMP); @@ -149,11 +151,13 @@ int main() struct slab_arena arena; quota_init("a, 0); + arena.trunc_alloc = 0; slab_arena_create(&arena, "a, 0, 0, MAP_PRIVATE); slab_arena_print(&arena); slab_arena_destroy(&arena); quota_init("a, SLAB_MIN_SIZE); + arena.trunc_alloc = 0; slab_arena_create(&arena, "a, 1, 1, MAP_PRIVATE); slab_arena_print(&arena); void *ptr = slab_map(&arena); @@ -167,6 +171,7 @@ int main() slab_arena_destroy(&arena); quota_init("a, 2000000); + arena.trunc_alloc = 0; slab_arena_create(&arena, "a, 3000000, 1, MAP_PRIVATE); slab_arena_print(&arena); slab_arena_destroy(&arena); diff --git a/test/slab_cache.c b/test/slab_cache.c index 4d0707e..d3a5033 100644 --- a/test/slab_cache.c +++ b/test/slab_cache.c @@ -19,7 +19,7 @@ int main() struct slab_cache cache; quota_init("a, UINT_MAX); - + arena.trunc_alloc = 0; slab_arena_create(&arena, "a, 0, 4000000, MAP_PRIVATE); slab_cache_create(&cache, &arena); -- 2.17.1