[Tarantool-patches] [PATCH] quota: add is_enabled field

Ilya Kosarev i.kosarev at tarantool.org
Fri Dec 11 18:37:03 MSK 2020


By default the quota is enabled. If it is set to false, quota_use will
allow to overuse the total available memory limit.
In case of disabled quota smalloc() allocates (and frees) large slabs
using malloc so that the quota will be able to shrink back after those
slabs are freed. Test introduced.

Part of tarantool/tarantool#3807
---
Branch: https://github.com/tarantool/small/tree/i.kosarev/gh-3807-safe-alloc-on-truncation
Issue: https://github.com/tarantool/tarantool/issues/3807

 small/quota.h      | 21 ++++++++++++++++++++-
 small/slab_cache.c |  6 ++++++
 small/slab_cache.h |  3 +++
 small/small.c      | 10 ++++++----
 test/small_alloc.c | 32 ++++++++++++++++++++++++++++++++
 5 files changed, 67 insertions(+), 5 deletions(-)

diff --git a/small/quota.h b/small/quota.h
index 3d3b4f0..c71ee5d 100644
--- a/small/quota.h
+++ b/small/quota.h
@@ -57,6 +57,12 @@ struct quota {
 	 * QUOTA_UNIT_SIZE.
 	 */
 	uint64_t value;
+	/**
+	 * By default the quota is enabled. If it is set to
+	 * false, quota_use will allow to overuse the total
+	 * available memory limit.
+	 */
+	bool is_enabled;
 };
 
 /**
@@ -68,6 +74,7 @@ quota_init(struct quota *quota, size_t total)
 	uint64_t new_total = (total + (QUOTA_UNIT_SIZE - 1)) /
 				QUOTA_UNIT_SIZE;
 	quota->value = new_total << 32;
+	quota->is_enabled = true;
 }
 
 /**
@@ -122,6 +129,18 @@ quota_set(struct quota *quota, size_t new_total)
 	return new_total_in_units * QUOTA_UNIT_SIZE;
 }
 
+static inline void
+quota_enable(struct quota *quota)
+{
+	quota->is_enabled = true;
+}
+
+static inline void
+quota_disable(struct quota *quota)
+{
+	quota->is_enabled = false;
+}
+
 /**
  * Use up a quota
  * @retval > 0 aligned value on success
@@ -143,7 +162,7 @@ quota_use(struct quota *quota, size_t size)
 		uint32_t new_used_in_units = used_in_units + size_in_units;
 		assert(new_used_in_units > used_in_units);
 
-		if (new_used_in_units > total_in_units)
+		if (new_used_in_units > total_in_units && quota->is_enabled)
 			return -1;
 
 		uint64_t new_value =
diff --git a/small/slab_cache.c b/small/slab_cache.c
index 2ba56c5..b3b7004 100644
--- a/small/slab_cache.c
+++ b/small/slab_cache.c
@@ -462,3 +462,9 @@ slab_cache_check(struct slab_cache *cache)
 		return;
 	abort();
 }
+
+bool
+slab_cache_quota_enabled(struct slab_cache *cache)
+{
+	return cache->arena->quota->is_enabled;
+}
diff --git a/small/slab_cache.h b/small/slab_cache.h
index d81c1c5..bbfdc74 100644
--- a/small/slab_cache.h
+++ b/small/slab_cache.h
@@ -271,6 +271,9 @@ slab_from_data(void *data)
 void
 slab_cache_check(struct slab_cache *cache);
 
+bool
+slab_cache_quota_enabled(struct slab_cache *cache);
+
 /**
  * Find the nearest power of 2 size capable of containing
  * a chunk of the given size. Adjust for cache->order0_size
diff --git a/small/small.c b/small/small.c
index 48085fb..154a0e9 100644
--- a/small/small.c
+++ b/small/small.c
@@ -228,7 +228,7 @@ smalloc(struct small_alloc *alloc, size_t size)
 	struct mempool *pool;
 	int idx = (size - 1) >> STEP_SIZE_LB;
 	idx = (idx > (int) alloc->step_pool0_step_count) ? idx - alloc->step_pool0_step_count : 0;
-	if (idx < STEP_POOL_MAX) {
+	if (idx < STEP_POOL_MAX && slab_cache_quota_enabled(alloc->cache)) {
 		/* Allocate in a stepped pool. */
 		pool = &alloc->step_pools[idx];
 		assert(size <= pool->objsize &&
@@ -238,7 +238,8 @@ smalloc(struct small_alloc *alloc, size_t size)
 		pattern.pool.objsize = size;
 		struct factor_pool *upper_bound =
 			factor_tree_nsearch(&alloc->factor_pools, &pattern);
-		if (upper_bound == NULL) {
+		if (upper_bound == NULL ||
+		    !slab_cache_quota_enabled(alloc->cache)) {
 			/* Object is too large, fallback to slab_cache */
 			struct slab *slab = slab_get_large(alloc->cache, size);
 			if (slab == NULL)
@@ -276,7 +277,7 @@ mempool_find(struct small_alloc *alloc, size_t size)
 	struct mempool *pool;
 	int idx = (size - 1) >> STEP_SIZE_LB;
 	idx = (idx > (int) alloc->step_pool0_step_count) ? idx - alloc->step_pool0_step_count : 0;
-	if (idx < STEP_POOL_MAX) {
+	if (idx < STEP_POOL_MAX && slab_cache_quota_enabled(alloc->cache)) {
 		/* Allocated in a stepped pool. */
 			pool = &alloc->step_pools[idx];
 			assert((size + STEP_SIZE > pool->objsize) || (idx == 0));
@@ -286,7 +287,8 @@ mempool_find(struct small_alloc *alloc, size_t size)
 		pattern.pool.objsize = size;
 		struct factor_pool *upper_bound =
 			factor_tree_nsearch(&alloc->factor_pools, &pattern);
-		if (upper_bound == NULL)
+		if (upper_bound == NULL ||
+		    !slab_cache_quota_enabled(alloc->cache))
 			return NULL; /* Allocated by slab_cache. */
 		assert(size >= upper_bound->objsize_min);
 		pool = &upper_bound->pool;
diff --git a/test/small_alloc.c b/test/small_alloc.c
index 421442a..1563cc9 100644
--- a/test/small_alloc.c
+++ b/test/small_alloc.c
@@ -128,6 +128,37 @@ small_alloc_large(void)
 	footer();
 }
 
+static void
+small_quota_release(void)
+{
+	header();
+
+	size_t total, used;
+	size_t alloc_size = 1000;
+	quota_get_total_and_used(&quota, &total, &used);
+	size_t amount = (total - used) / alloc_size;
+
+	int **a = calloc(amount, sizeof(int *));
+	int i = 0;
+	while (quota_use(&quota, 1) >= 0) {
+		quota_disable(&quota);
+		a[i++] = smalloc(&alloc, alloc_size);
+		quota_enable(&quota);
+	}
+
+	quota_get_total_and_used(&quota, &total, &used);
+	fail_unless((int)total - (int)used < 0);
+
+	quota_disable(&quota);
+	smfree(&alloc, a[0], alloc_size);
+	quota_enable(&quota);
+	free(a);
+
+	quota_get_total_and_used(&quota, &total, &used);
+	fail_unless((int)total - (int)used > 0);
+	footer();
+}
+
 int main()
 {
 	seed = time(0);
@@ -142,6 +173,7 @@ int main()
 
 	small_alloc_basic();
 	small_alloc_large();
+	small_quota_release();
 
 	slab_cache_destroy(&cache);
 }
-- 
2.17.1



More information about the Tarantool-patches mailing list