* [Tarantool-patches] [PATCH v4] small: changed small allocator pool management
2020-12-25 11:45 [Tarantool-patches] [PATCH v4] memtx: change small allocator behavior mechanik20051988
@ 2020-12-25 11:45 ` mechanik20051988
2020-12-28 12:11 ` [Tarantool-patches] [PATCH v4] memtx: change small allocator behavior Vladislav Shpilevoy
` (2 subsequent siblings)
3 siblings, 0 replies; 5+ messages in thread
From: mechanik20051988 @ 2020-12-25 11:45 UTC (permalink / raw)
To: v.shpilevoy; +Cc: tarantool-patches
Branch: https://github.com/tarantool/small/tree/mechanik20051988/gh-5216-fix-strange-allocator-behavior
Cnanges in v4:
Add actual_alloc_factor parameter to small_class_create
In previous version allocator created new pool if necessary
and inserted it in the pools tree. Now we allocate pools on
the stage of allocator creation according alloc_factor.
We use small_alloc class for this purpose also we use it
to find necessary pool when we alloc memory. This is faster
then previous allocator behavior and also fixes #5216.
Closes #5216
---
perf/small_alloc_perf.c | 11 +-
small/small.c | 219 ++++++++++------------------------------
small/small.h | 56 ++++------
small/small_class.c | 5 +-
small/small_class.h | 3 +-
test/small_alloc.c | 3 +-
test/small_class.c | 9 +-
7 files changed, 94 insertions(+), 212 deletions(-)
diff --git a/perf/small_alloc_perf.c b/perf/small_alloc_perf.c
index b980e38..210bcc9 100644
--- a/perf/small_alloc_perf.c
+++ b/perf/small_alloc_perf.c
@@ -209,8 +209,10 @@ small_alloc_basic(unsigned int slab_size)
slab_arena_create(&arena, "a, 0, slab_size, MAP_PRIVATE);
slab_cache_create(&cache, &arena);
for (unsigned int i = 0; i < SZR(slab_alloc_factor); i++) {
+ float actual_alloc_factor;
small_alloc_create(&alloc, &cache,
- OBJSIZE_MIN, slab_alloc_factor[i]);
+ OBJSIZE_MIN, slab_alloc_factor[i],
+ &actual_alloc_factor);
int size_min = OBJSIZE_MIN;
int size_max = (int)alloc.objsize_max - 1;
fail_unless(clock_gettime (CLOCK_MONOTONIC, &tm1) == 0);
@@ -248,8 +250,10 @@ small_alloc_basic(unsigned int slab_size)
print_json_test_header("exponent");
}
for (unsigned int i = 0; i < SZR(slab_alloc_factor); i++) {
+ float actual_alloc_factor;
small_alloc_create(&alloc, &cache,
- OBJSIZE_MIN, slab_alloc_factor[i]);
+ OBJSIZE_MIN, slab_alloc_factor[i],
+ &actual_alloc_factor);
int size_min = OBJSIZE_MIN;
int size_max = (int)alloc.objsize_max - 1;
fail_unless(clock_gettime (CLOCK_MONOTONIC, &tm1) == 0);
@@ -295,8 +299,9 @@ small_alloc_large()
print_json_test_header("large");
}
for (unsigned int i = 0; i < SZR(slab_alloc_factor); i++) {
+ float actual_alloc_factor;
small_alloc_create(&alloc, &cache, OBJSIZE_MIN,
- slab_alloc_factor[i]);
+ slab_alloc_factor[i], &actual_alloc_factor);
fail_unless(clock_gettime (CLOCK_MONOTONIC, &tm1) == 0);
small_alloc_test(large_size_min, large_size_max, 200, 1, 25);
fail_unless(clock_gettime (CLOCK_MONOTONIC, &tm2) == 0);
diff --git a/small/small.c b/small/small.c
index 48085fb..e2798fb 100644
--- a/small/small.c
+++ b/small/small.c
@@ -33,122 +33,59 @@
#include <string.h>
#include <stdio.h>
-enum {
- /** Step size for stepped pools, in bytes */
- STEP_SIZE = 8,
- /**
- * LB stands for logarithm with binary base, this constant
- * is used for bit shifts, when we need to divide by
- * STEP_SIZE.
- */
- STEP_SIZE_LB = 3,
-};
-
-rb_proto(, factor_tree_, factor_tree_t, struct factor_pool)
-
-/** Used for search in the tree. */
-static inline int
-factor_pool_cmp(const struct factor_pool *a, const struct factor_pool *b)
+static inline struct factor_pool *
+factor_pool_search(struct small_alloc *alloc, size_t size)
{
- return a->pool.objsize > b->pool.objsize ? 1 :
- a->pool.objsize < b->pool.objsize ? -1 : 0;
+ if (size > alloc->objsize_max)
+ return NULL;
+ unsigned cls = small_class_calc_offset_by_size(&alloc->small_class, size);
+ struct factor_pool *pool = &alloc->factor_pool_cache[cls];
+ return pool;
}
-rb_gen(, factor_tree_, factor_tree_t, struct factor_pool, node,
- factor_pool_cmp)
-
-static inline struct factor_pool *
-factor_pool_create(struct small_alloc *alloc,
- struct factor_pool *upper_bound,
- size_t size)
+static inline void
+factor_pool_create(struct small_alloc *alloc)
{
- assert(size > alloc->step_pool_objsize_max);
- assert(size <= alloc->objsize_max);
-
- if (alloc->factor_pool_next == NULL) {
- /**
- * Too many factored pools already, fall back
- * to an imperfect one.
- */
- return upper_bound;
+ size_t objsize = 0;
+ for (alloc->factor_pool_cache_size = 0;
+ objsize < alloc->objsize_max && alloc->factor_pool_cache_size < FACTOR_POOL_MAX;
+ alloc->factor_pool_cache_size++) {
+ size_t prevsize = objsize;
+ objsize = small_class_calc_size_by_offset(&alloc->small_class,
+ alloc->factor_pool_cache_size);
+ if (objsize > alloc->objsize_max)
+ objsize = alloc->objsize_max;
+ struct factor_pool *pool =
+ &alloc->factor_pool_cache[alloc->factor_pool_cache_size];
+ mempool_create(&pool->pool, alloc->cache, objsize);
+ pool->objsize_min = prevsize + 1;
}
- size_t objsize = alloc->step_pool_objsize_max;
- size_t prevsize;
- do {
- prevsize = objsize;
- /*
- * Align objsize after each multiplication to
- * ensure that the distance between objsizes of
- * factored pools is a multiple of STEP_SIZE.
- */
- objsize = small_align(objsize * alloc->factor,
- sizeof(intptr_t));
- assert(objsize > alloc->step_pool_objsize_max);
- } while (objsize < size);
- if (objsize > alloc->objsize_max)
- objsize = alloc->objsize_max;
- struct factor_pool *pool = alloc->factor_pool_next;
- alloc->factor_pool_next = pool->next;
- mempool_create(&pool->pool, alloc->cache, objsize);
- pool->objsize_min = prevsize + 1;
- factor_tree_insert(&alloc->factor_pools, pool);
- return pool;
+ alloc->objsize_max = objsize;
}
/** Initialize the small allocator. */
void
small_alloc_create(struct small_alloc *alloc, struct slab_cache *cache,
- uint32_t objsize_min, float alloc_factor)
+ uint32_t objsize_min, float alloc_factor,
+ float *actual_alloc_factor)
{
alloc->cache = cache;
/* Align sizes. */
- objsize_min = small_align(objsize_min, STEP_SIZE);
- alloc->step_pool0_step_count = (objsize_min - 1) >> STEP_SIZE_LB;
+ objsize_min = small_align(objsize_min, sizeof(intptr_t));
/* Make sure at least 4 largest objects can fit in a slab. */
alloc->objsize_max =
mempool_objsize_max(slab_order_size(cache, cache->order_max));
- if (!(alloc->objsize_max > objsize_min + STEP_POOL_MAX * STEP_SIZE)) {
- fprintf(stderr, "Can't create small alloc, small "
- "object min size should not be greather than %u\n",
- alloc->objsize_max - (STEP_POOL_MAX + 1) * STEP_SIZE);
- abort();
- }
+ assert(alloc_factor > 1. && alloc_factor <= 2.);
- struct mempool *step_pool;
- for (step_pool = alloc->step_pools;
- step_pool < alloc->step_pools + STEP_POOL_MAX;
- step_pool++) {
- mempool_create(step_pool, alloc->cache, objsize_min);
- objsize_min += STEP_SIZE;
- }
- alloc->step_pool_objsize_max = (step_pool - 1)->objsize;
- if (alloc_factor > 2.0)
- alloc_factor = 2.0;
+ alloc->factor = alloc_factor;
/*
- * Correct the user-supplied alloc_factor to ensure that
- * it actually produces growing object sizes.
+ * Second parameter (uintptr_t) - granularity,
+ * determines alignment.
*/
- if (alloc->step_pool_objsize_max * alloc_factor <
- alloc->step_pool_objsize_max + STEP_SIZE) {
-
- alloc_factor =
- (alloc->step_pool_objsize_max + STEP_SIZE + 0.5)/
- alloc->step_pool_objsize_max;
- }
- alloc->factor = alloc_factor;
-
- /* Initialize the factored pool cache. */
- struct factor_pool *factor_pool = alloc->factor_pool_cache;
- do {
- factor_pool->next = factor_pool + 1;
- factor_pool++;
- } while (factor_pool !=
- alloc->factor_pool_cache + FACTOR_POOL_MAX - 1);
- factor_pool->next = NULL;
- alloc->factor_pool_next = alloc->factor_pool_cache;
- factor_tree_new(&alloc->factor_pools);
- (void) factor_pool_create(alloc, NULL, alloc->objsize_max);
+ small_class_create(&alloc->small_class, sizeof(intptr_t),
+ alloc->factor, objsize_min, actual_alloc_factor);
+ factor_pool_create(alloc);
lifo_init(&alloc->delayed);
lifo_init(&alloc->delayed_large);
@@ -225,72 +162,27 @@ smalloc(struct small_alloc *alloc, size_t size)
{
small_collect_garbage(alloc);
- struct mempool *pool;
- int idx = (size - 1) >> STEP_SIZE_LB;
- idx = (idx > (int) alloc->step_pool0_step_count) ? idx - alloc->step_pool0_step_count : 0;
- if (idx < STEP_POOL_MAX) {
- /* Allocate in a stepped pool. */
- pool = &alloc->step_pools[idx];
- assert(size <= pool->objsize &&
- (size + STEP_SIZE > pool->objsize || idx == 0));
- } else {
- struct factor_pool pattern;
- pattern.pool.objsize = size;
- struct factor_pool *upper_bound =
- factor_tree_nsearch(&alloc->factor_pools, &pattern);
- if (upper_bound == NULL) {
- /* Object is too large, fallback to slab_cache */
- struct slab *slab = slab_get_large(alloc->cache, size);
- if (slab == NULL)
- return NULL;
- return slab_data(slab);
- }
-
- if (size < upper_bound->objsize_min)
- upper_bound = factor_pool_create(alloc, upper_bound,
- size);
- pool = &upper_bound->pool;
+ struct factor_pool *upper_bound = factor_pool_search(alloc, size);
+ if (upper_bound == NULL) {
+ /* Object is too large, fallback to slab_cache */
+ struct slab *slab = slab_get_large(alloc->cache, size);
+ if (slab == NULL)
+ return NULL;
+ return slab_data(slab);
}
+ struct mempool *pool = &upper_bound->pool;
assert(size <= pool->objsize);
return mempool_alloc(pool);
}
-static void
-small_recycle_pool(struct small_alloc *alloc, struct mempool *pool)
-{
- if (mempool_used(pool) == 0 &&
- pool->objsize > alloc->step_pool_objsize_max &&
- alloc->factor_pool_next == NULL) {
- struct factor_pool *factor_pool = (struct factor_pool *)
- ((char *) pool - (intptr_t)
- &((struct factor_pool *) NULL)->pool);
- factor_tree_remove(&alloc->factor_pools, factor_pool);
- mempool_destroy(pool);
- alloc->factor_pool_next = factor_pool;
- }
-}
-
static inline struct mempool *
mempool_find(struct small_alloc *alloc, size_t size)
{
- struct mempool *pool;
- int idx = (size - 1) >> STEP_SIZE_LB;
- idx = (idx > (int) alloc->step_pool0_step_count) ? idx - alloc->step_pool0_step_count : 0;
- if (idx < STEP_POOL_MAX) {
- /* Allocated in a stepped pool. */
- pool = &alloc->step_pools[idx];
- assert((size + STEP_SIZE > pool->objsize) || (idx == 0));
- } else {
- /* Allocated in a factor pool. */
- struct factor_pool pattern;
- pattern.pool.objsize = size;
- struct factor_pool *upper_bound =
- factor_tree_nsearch(&alloc->factor_pools, &pattern);
- if (upper_bound == NULL)
- return NULL; /* Allocated by slab_cache. */
- assert(size >= upper_bound->objsize_min);
- pool = &upper_bound->pool;
- }
+ struct factor_pool *upper_bound = factor_pool_search(alloc, size);
+ if (upper_bound == NULL)
+ return NULL; /* Allocated by slab_cache. */
+ assert(size >= upper_bound->objsize_min);
+ struct mempool *pool = &upper_bound->pool;
assert(size <= pool->objsize);
return pool;
}
@@ -319,8 +211,6 @@ smfree(struct small_alloc *alloc, void *ptr, size_t size)
/* Regular allocation in mempools */
mempool_free(pool, ptr);
- if (mempool_used(pool) == 0)
- small_recycle_pool(alloc, pool);
}
/**
@@ -351,8 +241,7 @@ smfree_delayed(struct small_alloc *alloc, void *ptr, size_t size)
struct mempool_iterator
{
struct small_alloc *alloc;
- struct mempool *step_pool;
- struct factor_tree_iterator factor_iterator;
+ uint32_t factor_iterator;
};
void
@@ -360,19 +249,19 @@ mempool_iterator_create(struct mempool_iterator *it,
struct small_alloc *alloc)
{
it->alloc = alloc;
- it->step_pool = alloc->step_pools;
- factor_tree_ifirst(&alloc->factor_pools, &it->factor_iterator);
+ it->factor_iterator = 0;
}
struct mempool *
mempool_iterator_next(struct mempool_iterator *it)
{
- if (it->step_pool < it->alloc->step_pools + STEP_POOL_MAX)
- return it->step_pool++;
- struct factor_pool *factor_pool = factor_tree_inext(&it->factor_iterator);
- if (factor_pool) {
+ struct factor_pool *factor_pool = NULL;
+ if (it->factor_iterator < it->alloc->factor_pool_cache_size)
+ factor_pool =
+ &it->alloc->factor_pool_cache[(it->factor_iterator)++];
+ if (factor_pool)
return &(factor_pool->pool);
- }
+
return NULL;
}
diff --git a/small/small.h b/small/small.h
index 376c600..3a44b6c 100644
--- a/small/small.h
+++ b/small/small.h
@@ -34,6 +34,7 @@
#include "mempool.h"
#include "slab_arena.h"
#include "lifo.h"
+#include "small_class.h"
#if defined(__cplusplus)
extern "C" {
@@ -86,10 +87,8 @@ extern "C" {
/** Basic constants of small object allocator. */
enum {
- /** How many stepped pools there is. */
- STEP_POOL_MAX = 32,
/** How many factored pools there can be. */
- FACTOR_POOL_MAX = 256,
+ FACTOR_POOL_MAX = 1024,
};
enum small_opt {
@@ -107,8 +106,6 @@ enum small_opt {
*/
struct factor_pool
{
- /** rb_tree entry */
- rb_node(struct factor_pool) node;
/** the pool itself. */
struct mempool pool;
/**
@@ -117,12 +114,8 @@ struct factor_pool
* pool.
*/
size_t objsize_min;
- /** next free factor pool in the cache. */
- struct factor_pool *next;
};
-typedef rb_tree(struct factor_pool) factor_tree_t;
-
/**
* Free mode
*/
@@ -138,26 +131,10 @@ enum small_free_mode {
/** A slab allocator for a wide range of object sizes. */
struct small_alloc {
struct slab_cache *cache;
- uint32_t step_pool_objsize_max;
- /**
- * All slabs in all pools must be of the same order,
- * otherwise small_free() has no way to derive from
- * pointer its slab and then the pool.
- */
- /**
- * An array of "stepped" pools, pool->objsize of adjacent
- * pools differ by a fixed size (step).
- */
- struct mempool step_pools[STEP_POOL_MAX];
/** A cache for nodes in the factor_pools tree. */
struct factor_pool factor_pool_cache[FACTOR_POOL_MAX];
- /** First free element in factor_pool_cache. */
- struct factor_pool *factor_pool_next;
- /**
- * A red-black tree with "factored" pools, i.e.
- * each pool differs from its neighbor by a factor.
- */
- factor_tree_t factor_pools;
+ /* factor_pool_cache array real size */
+ uint32_t factor_pool_cache_size;
/**
* List of mempool which objects to be freed if delayed free mode.
*/
@@ -171,22 +148,29 @@ struct small_alloc {
* Is provided during initialization.
*/
float factor;
+ /** Small class for this allocator */
+ struct small_class small_class;
uint32_t objsize_max;
/**
* Free mode.
*/
enum small_free_mode free_mode;
- /**
- * Object size of step pool 0 divided by STEP_SIZE, to
- * quickly find the right stepped pool given object size.
- */
- uint32_t step_pool0_step_count;
};
-/** Initialize a small memory allocator. */
+/**
+ * Initialize a small memory allocator.
+ * @param alloc - instance to create.
+ * @param cache - pointer to used slab cache.
+ * @param objsize_min - minimal object size.
+ * @param alloc_factor - desired factor of growth object size.
+ * Must be in (1, 2] range.
+ * @param actual_alloc_factor real allocation factor calculated the basis of
+ * desired alloc_factor
+ */
void
small_alloc_create(struct small_alloc *alloc, struct slab_cache *cache,
- uint32_t objsize_min, float alloc_factor);
+ uint32_t objsize_min, float alloc_factor,
+ float *actual_alloc_factor);
/**
* Enter or leave delayed mode - in delayed mode smfree_delayed()
@@ -213,10 +197,6 @@ smalloc(struct small_alloc *alloc, size_t size);
*
* This boils down to finding the object's mempool and delegating
* to mempool_free().
- *
- * If the pool becomes completely empty, and it's a factored pool,
- * and the factored pool's cache is empty, put back the empty
- * factored pool into the factored pool cache.
*/
void
smfree(struct small_alloc *alloc, void *ptr, size_t size);
diff --git a/small/small_class.c b/small/small_class.c
index 99915a8..07df5d2 100644
--- a/small/small_class.c
+++ b/small/small_class.c
@@ -32,16 +32,18 @@
#include "small_class.h"
#include <assert.h>
#include <math.h>
+#include <stddef.h>
void
small_class_create(struct small_class *sc, unsigned granularity,
- float desired_factor, unsigned min_alloc)
+ float desired_factor, unsigned min_alloc, float *actual_factor)
{
assert(granularity > 0); /* size cannot be multiple of zero. */
assert((granularity & (granularity - 1)) == 0); /* must power of 2. */
assert(desired_factor > 1.f);
assert(desired_factor <= 2.f);
assert(min_alloc > 0); /* Cannot allocate zero. */
+ assert(actual_factor != NULL);
sc->granularity = granularity;
sc->ignore_bits_count = __builtin_ctz(granularity);
@@ -53,4 +55,5 @@ small_class_create(struct small_class *sc, unsigned granularity,
sc->size_shift_plus_1 = sc->size_shift + 1;
sc->actual_factor = powf(2, 1.f / powf(2, sc->effective_bits));
+ *actual_factor = sc->actual_factor;
}
diff --git a/small/small_class.h b/small/small_class.h
index e6b0a7c..b787aa6 100644
--- a/small/small_class.h
+++ b/small/small_class.h
@@ -138,10 +138,11 @@ struct small_class {
* Must be in (1, 2] range. Actual factor can be different.
* @param min_alloc - the lowest class size, must be greater than zero.
* The good choice is the same value as granularity.
+ * @param actual_factor calculated on the basis of desired factor
*/
void
small_class_create(struct small_class *sc, unsigned granularity,
- float desired_factor, unsigned min_alloc);
+ float desired_factor, unsigned min_alloc, float *actual_factor);
/**
* Find position of the most significant bit in a value.
diff --git a/test/small_alloc.c b/test/small_alloc.c
index 421442a..65f081b 100644
--- a/test/small_alloc.c
+++ b/test/small_alloc.c
@@ -78,7 +78,8 @@ static void
small_alloc_test(int size_min, int size_max, int objects_max,
int oscillation_max, int iterations_max)
{
- small_alloc_create(&alloc, &cache, OBJSIZE_MIN, 1.3);
+ float actual_alloc_factor;
+ small_alloc_create(&alloc, &cache, OBJSIZE_MIN, 1.3, &actual_alloc_factor);
for (int i = 0; i < iterations_max; i++) {
small_alloc_setopt(&alloc, SMALL_DELAYED_FREE_MODE, i % 5 == 0);
diff --git a/test/small_class.c b/test/small_class.c
index 48cdac9..cfb9b47 100644
--- a/test/small_class.c
+++ b/test/small_class.c
@@ -43,7 +43,8 @@ test_class(void)
plan(0);
struct small_class sc;
- small_class_create(&sc, 2, 1.2, 12);
+ float actual_factor;
+ small_class_create(&sc, 2, 1.2, 12, &actual_factor);
unsigned class = small_class_calc_offset_by_size(&sc, 0);
unsigned class_size = small_class_calc_size_by_offset(&sc, class);
@@ -106,7 +107,8 @@ check_expectation()
}
struct small_class sc;
- small_class_create(&sc, granularity, factor, min_alloc);
+ float actual_factor;
+ small_class_create(&sc, granularity, factor, min_alloc, &actual_factor);
fail_unless(sc.effective_size == eff_size);
for (unsigned s = 0; s <= test_sizes; s++) {
@@ -135,7 +137,8 @@ check_factor()
for (unsigned granularity = 1; granularity <= 4; granularity *= 4) {
for(float factor = 1.01; factor < 1.995; factor += 0.01) {
struct small_class sc;
- small_class_create(&sc, granularity, factor, granularity);
+ float actual_factor;
+ small_class_create(&sc, granularity, factor, granularity, &actual_factor);
float k = powf(factor, 0.5f);
fail_unless(sc.actual_factor >= factor / k && sc.actual_factor <= factor * k);
--
2.20.1
^ permalink raw reply [flat|nested] 5+ messages in thread
* Re: [Tarantool-patches] [PATCH v4] memtx: change small allocator behavior
2020-12-25 11:45 [Tarantool-patches] [PATCH v4] memtx: change small allocator behavior mechanik20051988
2020-12-25 11:45 ` [Tarantool-patches] [PATCH v4] small: changed small allocator pool management mechanik20051988
@ 2020-12-28 12:11 ` Vladislav Shpilevoy
2020-12-28 14:11 ` Alexander V. Tikhonov
2020-12-29 10:43 ` Kirill Yukhin
3 siblings, 0 replies; 5+ messages in thread
From: Vladislav Shpilevoy @ 2020-12-28 12:11 UTC (permalink / raw)
To: mechanik20051988; +Cc: mechanik20051988, tarantool-patches
Hi! Thanks for the patch!
Something seems to be broken in your editor or whatever
you use to prepare emails.
This thread has 2 commits, while really your have 4 commits -
1 in tarantool/tarantool and 3 in tarantool/small.
Consider my review fixes on top of this branch in a separate
commit. Squash them if all is fine.
On 25.12.2020 14:45, mechanik20051988 via Tarantool-patches wrote:
> From: mechanik20051988 <mechanik20.05.1988@gmail.com>
>
> Branch: https://github.com/tarantool/tarantool/tree/mechanik20051988/gh-5216-fix-strange-allocator-behavior
>
> Cnanges in v4:
> Add actual_alloc_factor parameter to small_alloc_create
>
> Previously, in small allocator, memory pools
> were allocated at the request, which in the case
> of a small slab_alloc_factor led to use
> pools with incorrect sizes. This patch changed
> small allocator behavior, now we allocate pools
> on the stage of allocator creation. Also we use
> special function to find appropriate pool, which
> is faster, then previous version with rbtree.
> This change fixes #5216.
>
> Also moved a check, that the slab_alloc_factor is in
> the range (1.0, 2.0] from small allocator to memtx_engine.
> If factor is not in range change it to 1.0001 or 2.0 respectively
>
> Closes #5216
> ---
> src/box/memtx_engine.c | 12 +++-
> src/box/tuple.c | 3 +-
> src/lib/small | 2 +-
> test/engine/engine.cfg | 3 +
> test/engine/gh-5216-small_alloc_factor.lua | 11 +++
> test/engine/gh-5216-small_alloc_factor.result | 70 +++++++++++++++++++
> .../gh-5216-small_alloc_factor.test.lua | 20 ++++++
> 7 files changed, 118 insertions(+), 3 deletions(-)
> create mode 100644 test/engine/gh-5216-small_alloc_factor.lua
> create mode 100644 test/engine/gh-5216-small_alloc_factor.result
> create mode 100644 test/engine/gh-5216-small_alloc_factor.test.lua
>
> diff --git a/src/box/memtx_engine.c b/src/box/memtx_engine.c
> index 4f1fe3a3f..023402888 100644
> --- a/src/box/memtx_engine.c
> +++ b/src/box/memtx_engine.c
> @@ -1116,13 +1116,23 @@ memtx_engine_new(const char *snap_dirname, bool force_recovery,
> if (objsize_min < OBJSIZE_MIN)
> objsize_min = OBJSIZE_MIN;
>
> + if (alloc_factor > 2) {
> + say_error("Alloc factor must be less than or equal to 2.0. It will be reduced to 2.0");
> + alloc_factor = 2.0;
> + } else if (alloc_factor <= 1.0) {
> + say_error("Alloc factor must be greater than 1.0. It will be increased to 1.001");
> + alloc_factor = 1.001;
> + }
> +
> /* Initialize tuple allocator. */
> quota_init(&memtx->quota, tuple_arena_max_size);
> tuple_arena_create(&memtx->arena, &memtx->quota, tuple_arena_max_size,
> SLAB_SIZE, dontdump, "memtx");
> slab_cache_create(&memtx->slab_cache, &memtx->arena);
> + float actual_alloc_factor;
> small_alloc_create(&memtx->alloc, &memtx->slab_cache,
> - objsize_min, alloc_factor);
> + objsize_min, alloc_factor, &actual_alloc_factor);
> + say_info("Actual alloc factor calculated on the basis of desired alloc_factor = %f", actual_alloc_factor);
All the messages run out of 80 symbols line width limit, which is
stated here: https://github.com/tarantool/tarantool/wiki/Code-review-procedure.
I see you still didn't read it, right? I suggest to do it, otherwise you
are going to face all the problems listed in there, which will make the
reviews longer, and will waste more time.
I also renamed 'alloc_factor' to 'slab_alloc_factor' to be consistent
with box.cfg.slab_alloc_factor.
My fixes, also in a separate commit on top of your branch.
====================
diff --git a/src/box/memtx_engine.c b/src/box/memtx_engine.c
index 023402888..f79f14b4f 100644
--- a/src/box/memtx_engine.c
+++ b/src/box/memtx_engine.c
@@ -1117,10 +1117,12 @@ memtx_engine_new(const char *snap_dirname, bool force_recovery,
objsize_min = OBJSIZE_MIN;
if (alloc_factor > 2) {
- say_error("Alloc factor must be less than or equal to 2.0. It will be reduced to 2.0");
+ say_error("Alloc factor must be less than or equal to 2.0. It "
+ "will be reduced to 2.0");
alloc_factor = 2.0;
} else if (alloc_factor <= 1.0) {
- say_error("Alloc factor must be greater than 1.0. It will be increased to 1.001");
+ say_error("Alloc factor must be greater than 1.0. It will be "
+ "increased to 1.001");
alloc_factor = 1.001;
}
@@ -1132,7 +1134,8 @@ memtx_engine_new(const char *snap_dirname, bool force_recovery,
float actual_alloc_factor;
small_alloc_create(&memtx->alloc, &memtx->slab_cache,
objsize_min, alloc_factor, &actual_alloc_factor);
- say_info("Actual alloc factor calculated on the basis of desired alloc_factor = %f", actual_alloc_factor);
+ say_info("Actual slab_alloc_factor calculated on the basis of desired "
+ "slab_alloc_factor = %f", actual_alloc_factor);
/* Initialize index extent allocator. */
slab_cache_create(&memtx->index_slab_cache, &memtx->arena);
^ permalink raw reply [flat|nested] 5+ messages in thread
* Re: [Tarantool-patches] [PATCH v4] memtx: change small allocator behavior
2020-12-25 11:45 [Tarantool-patches] [PATCH v4] memtx: change small allocator behavior mechanik20051988
2020-12-25 11:45 ` [Tarantool-patches] [PATCH v4] small: changed small allocator pool management mechanik20051988
2020-12-28 12:11 ` [Tarantool-patches] [PATCH v4] memtx: change small allocator behavior Vladislav Shpilevoy
@ 2020-12-28 14:11 ` Alexander V. Tikhonov
2020-12-29 10:43 ` Kirill Yukhin
3 siblings, 0 replies; 5+ messages in thread
From: Alexander V. Tikhonov @ 2020-12-28 14:11 UTC (permalink / raw)
To: mechanik20051988; +Cc: tarantool-patches
Hi Evgeny, thanks for the patchset, as I see no new degradation found in
gitlab-ci testing commit criteria pipeline [1], patchset LGTM.
[1] - https://gitlab.com/tarantool/tarantool/-/pipelines/235049916
On Fri, Dec 25, 2020 at 02:45:19PM +0300, mechanik20051988 via Tarantool-patches wrote:
> From: mechanik20051988 <mechanik20.05.1988@gmail.com>
>
> Branch: https://github.com/tarantool/tarantool/tree/mechanik20051988/gh-5216-fix-strange-allocator-behavior
>
> Cnanges in v4:
> Add actual_alloc_factor parameter to small_alloc_create
>
> Previously, in small allocator, memory pools
> were allocated at the request, which in the case
> of a small slab_alloc_factor led to use
> pools with incorrect sizes. This patch changed
> small allocator behavior, now we allocate pools
> on the stage of allocator creation. Also we use
> special function to find appropriate pool, which
> is faster, then previous version with rbtree.
> This change fixes #5216.
>
> Also moved a check, that the slab_alloc_factor is in
> the range (1.0, 2.0] from small allocator to memtx_engine.
> If factor is not in range change it to 1.0001 or 2.0 respectively
>
> Closes #5216
> ---
> src/box/memtx_engine.c | 12 +++-
> src/box/tuple.c | 3 +-
> src/lib/small | 2 +-
> test/engine/engine.cfg | 3 +
> test/engine/gh-5216-small_alloc_factor.lua | 11 +++
> test/engine/gh-5216-small_alloc_factor.result | 70 +++++++++++++++++++
> .../gh-5216-small_alloc_factor.test.lua | 20 ++++++
> 7 files changed, 118 insertions(+), 3 deletions(-)
> create mode 100644 test/engine/gh-5216-small_alloc_factor.lua
> create mode 100644 test/engine/gh-5216-small_alloc_factor.result
> create mode 100644 test/engine/gh-5216-small_alloc_factor.test.lua
>
> diff --git a/src/box/memtx_engine.c b/src/box/memtx_engine.c
> index 4f1fe3a3f..023402888 100644
> --- a/src/box/memtx_engine.c
> +++ b/src/box/memtx_engine.c
> @@ -1116,13 +1116,23 @@ memtx_engine_new(const char *snap_dirname, bool force_recovery,
> if (objsize_min < OBJSIZE_MIN)
> objsize_min = OBJSIZE_MIN;
>
> + if (alloc_factor > 2) {
> + say_error("Alloc factor must be less than or equal to 2.0. It will be reduced to 2.0");
> + alloc_factor = 2.0;
> + } else if (alloc_factor <= 1.0) {
> + say_error("Alloc factor must be greater than 1.0. It will be increased to 1.001");
> + alloc_factor = 1.001;
> + }
> +
> /* Initialize tuple allocator. */
> quota_init(&memtx->quota, tuple_arena_max_size);
> tuple_arena_create(&memtx->arena, &memtx->quota, tuple_arena_max_size,
> SLAB_SIZE, dontdump, "memtx");
> slab_cache_create(&memtx->slab_cache, &memtx->arena);
> + float actual_alloc_factor;
> small_alloc_create(&memtx->alloc, &memtx->slab_cache,
> - objsize_min, alloc_factor);
> + objsize_min, alloc_factor, &actual_alloc_factor);
> + say_info("Actual alloc factor calculated on the basis of desired alloc_factor = %f", actual_alloc_factor);
>
> /* Initialize index extent allocator. */
> slab_cache_create(&memtx->index_slab_cache, &memtx->arena);
> diff --git a/src/box/tuple.c b/src/box/tuple.c
> index e83377c3a..c2023e4e8 100644
> --- a/src/box/tuple.c
> +++ b/src/box/tuple.c
> @@ -313,8 +313,9 @@ tuple_init(field_name_hash_f hash)
> /* Make sure this one stays around. */
> tuple_format_ref(tuple_format_runtime);
>
> + float actual_alloc_factor;
> small_alloc_create(&runtime_alloc, &cord()->slabc, OBJSIZE_MIN,
> - ALLOC_FACTOR);
> + ALLOC_FACTOR, &actual_alloc_factor);
>
> mempool_create(&tuple_iterator_pool, &cord()->slabc,
> sizeof(struct tuple_iterator));
> diff --git a/src/lib/small b/src/lib/small
> index fcac155db..bdbe73cd3 160000
> --- a/src/lib/small
> +++ b/src/lib/small
> @@ -1 +1 @@
> -Subproject commit fcac155dba18e97b10bc668d1a2fae01184adc13
> +Subproject commit bdbe73cd35865160dd9f80bfc8bfa3817f4980aa
> diff --git a/test/engine/engine.cfg b/test/engine/engine.cfg
> index f017a03ec..3bfc44dcf 100644
> --- a/test/engine/engine.cfg
> +++ b/test/engine/engine.cfg
> @@ -8,6 +8,9 @@
> },
> "gh-4973-concurrent-alter-fails.test.lua": {
> "memtx": {"engine": "memtx"}
> + },
> + "gh-5216-small_alloc_factor.test.lua" : {
> + "memtx": {"engine": "memtx"}
> }
> }
>
> diff --git a/test/engine/gh-5216-small_alloc_factor.lua b/test/engine/gh-5216-small_alloc_factor.lua
> new file mode 100644
> index 000000000..a303a95d4
> --- /dev/null
> +++ b/test/engine/gh-5216-small_alloc_factor.lua
> @@ -0,0 +1,11 @@
> +#!/usr/bin/env tarantool
> +
> +require('console').listen(os.getenv('ADMIN'))
> +
> +box.cfg({
> + wal_mode = 'none',
> + memtx_memory = 2*1024*1024*1024,
> + listen = os.getenv("LISTEN"),
> + memtx_min_tuple_size=32,
> + slab_alloc_factor=1.03
> +})
> diff --git a/test/engine/gh-5216-small_alloc_factor.result b/test/engine/gh-5216-small_alloc_factor.result
> new file mode 100644
> index 000000000..5cf69a8f3
> --- /dev/null
> +++ b/test/engine/gh-5216-small_alloc_factor.result
> @@ -0,0 +1,70 @@
> +env = require('test_run')
> +---
> +...
> +test_run = env.new()
> +---
> +...
> +test_run:cmd('create server test with script="engine/gh-5216-small_alloc_factor.lua"')
> +---
> +- true
> +...
> +test_run:cmd('start server test with args="none"')
> +---
> +- true
> +...
> +test_run:cmd('switch test')
> +---
> +- true
> +...
> +s = box.schema.space.create('test')
> +---
> +...
> +_ = s:create_index('test')
> +---
> +...
> +function str(i) return string.rep('a', math.floor(256 * math.pow(1.03, i))) end
> +---
> +...
> +for i=1,276 do _ = s:replace{i+200, str(i)} end
> +---
> +...
> +for i=1,274 do _ = s:delete{i+200} end
> +---
> +...
> +collectgarbage('collect')
> +---
> +- 0
> +...
> +_ = s:replace{200+277, str(275)}
> +---
> +...
> +_ = s:delete{200+275}
> +---
> +...
> +collectgarbage('collect')
> +---
> +- 0
> +...
> +_ = s:delete{200+276}
> +---
> +...
> +collectgarbage('collect')
> +---
> +- 0
> +...
> +test_run:cmd('switch default')
> +---
> +- true
> +...
> +test_run:cmd('stop server test')
> +---
> +- true
> +...
> +test_run:cmd('cleanup server test')
> +---
> +- true
> +...
> +test_run:cmd('delete server test')
> +---
> +- true
> +...
> diff --git a/test/engine/gh-5216-small_alloc_factor.test.lua b/test/engine/gh-5216-small_alloc_factor.test.lua
> new file mode 100644
> index 000000000..83e51d19c
> --- /dev/null
> +++ b/test/engine/gh-5216-small_alloc_factor.test.lua
> @@ -0,0 +1,20 @@
> +env = require('test_run')
> +test_run = env.new()
> +test_run:cmd('create server test with script="engine/gh-5216-small_alloc_factor.lua"')
> +test_run:cmd('start server test with args="none"')
> +test_run:cmd('switch test')
> +s = box.schema.space.create('test')
> +_ = s:create_index('test')
> +function str(i) return string.rep('a', math.floor(256 * math.pow(1.03, i))) end
> +for i=1,276 do _ = s:replace{i+200, str(i)} end
> +for i=1,274 do _ = s:delete{i+200} end
> +collectgarbage('collect')
> +_ = s:replace{200+277, str(275)}
> +_ = s:delete{200+275}
> +collectgarbage('collect')
> +_ = s:delete{200+276}
> +collectgarbage('collect')
> +test_run:cmd('switch default')
> +test_run:cmd('stop server test')
> +test_run:cmd('cleanup server test')
> +test_run:cmd('delete server test')
> --
> 2.20.1
>
^ permalink raw reply [flat|nested] 5+ messages in thread