[Tarantool-patches] [PATCH small 1/2] lsregion: introduce lsregion_reserve()

Vladislav Shpilevoy v.shpilevoy at tarantool.org
Fri May 15 02:31:03 MSK 2020


So far lsregion provided only lsregion_alloc(). Even though all
the other allocators provide both alloc() and reserve().

Reservation is useful when need to reserve more, and allocate
less. In that way the unused tail has a chance of becoming a part
of a next allocation. This will be the case for upcoming
lsregion_aligned_alloc(), which will reserve more than requested
and use probably only part of it depending on result address
alignment.

Needed for https://github.com/tarantool/tarantool/issues/4609
---
 small/lsregion.c | 12 ++----------
 small/lsregion.h | 49 +++++++++++++++++++++++++++++++++---------------
 test/lsregion.c  | 41 +++++++++++++++++++++++++++++++++++++++-
 3 files changed, 76 insertions(+), 26 deletions(-)

diff --git a/small/lsregion.c b/small/lsregion.c
index 6bcc043..ff4a7aa 100644
--- a/small/lsregion.c
+++ b/small/lsregion.c
@@ -32,7 +32,7 @@
 #include "lsregion.h"
 
 void *
-lsregion_alloc_slow(struct lsregion *lsregion, size_t size, int64_t id)
+lsregion_reserve_slow(struct lsregion *lsregion, size_t size)
 {
 	struct lslab *slab = NULL;
 	size_t slab_size = lsregion->arena->slab_size;
@@ -77,14 +77,6 @@ lsregion_alloc_slow(struct lsregion *lsregion, size_t size, int64_t id)
 		}
 	}
 	assert(slab != NULL);
-	assert(slab->max_id <= id);
 	assert(size <= lslab_unused(slab));
-	void *res = lslab_pos(slab);
-	slab->slab_used += size;
-
-	/* Update the memory block meta info. */
-	assert(slab->max_id <= id);
-	slab->max_id = id;
-	lsregion->slabs.stats.used += size;
-	return res;
+	return lslab_pos(slab);
 }
diff --git a/small/lsregion.h b/small/lsregion.h
index 73d68dc..52431e7 100644
--- a/small/lsregion.h
+++ b/small/lsregion.h
@@ -153,22 +153,21 @@ lsregion_create(struct lsregion *lsregion, struct slab_arena *arena)
 	lsregion->cached = NULL;
 }
 
-/** @sa lsregion_alloc().  */
+/** @sa lsregion_reserve().  */
 void *
-lsregion_alloc_slow(struct lsregion *lsregion, size_t size, int64_t id);
+lsregion_reserve_slow(struct lsregion *lsregion, size_t size);
 
 /**
- * Allocate \p size bytes and assicoate the allocated block
- * with \p id.
+ * Make sure a next allocation of at least @a size bytes will not
+ * fail, and will return the same result as this call.
  * @param lsregion Allocator object.
  * @param size     Size to allocate.
- * @param id       Memory chunk identifier.
  *
  * @retval not NULL Success.
  * @retval NULL     Memory error.
  */
 static inline void *
-lsregion_alloc(struct lsregion *lsregion, size_t size, int64_t id)
+lsregion_reserve(struct lsregion *lsregion, size_t size)
 {
 	/* If there is an existing slab then try to use it. */
 	if (! rlist_empty(&lsregion->slabs.slabs)) {
@@ -176,16 +175,36 @@ lsregion_alloc(struct lsregion *lsregion, size_t size, int64_t id)
 		slab = rlist_last_entry(&lsregion->slabs.slabs, struct lslab,
 					next_in_list);
 		assert(slab != NULL);
-		assert(slab->max_id <= id);
-		if (size <= lslab_unused(slab)) {
-			void *res = lslab_pos(slab);
-			slab->slab_used += size;
-			slab->max_id = id;
-			lsregion->slabs.stats.used += size;
-			return res;
-		}
+		if (size <= lslab_unused(slab))
+			return lslab_pos(slab);
 	}
-	return lsregion_alloc_slow(lsregion, size, id);
+	return lsregion_reserve_slow(lsregion, size);
+}
+
+/**
+ * Allocate @a size bytes and associate the allocated block
+ * with @a id.
+ * @param lsregion Allocator object.
+ * @param size     Size to allocate.
+ * @param id       Memory chunk identifier.
+ *
+ * @retval not NULL Success.
+ * @retval NULL     Memory error.
+ */
+static inline void *
+lsregion_alloc(struct lsregion *lsregion, size_t size, int64_t id)
+{
+	void *res = lsregion_reserve(lsregion, size);
+	if (res == NULL)
+		return NULL;
+	struct lslab *slab = rlist_last_entry(&lsregion->slabs.slabs,
+					      struct lslab, next_in_list);
+	assert(size <= lslab_unused(slab));
+	assert(slab->max_id <= id);
+	slab->slab_used += size;
+	slab->max_id = id;
+	lsregion->slabs.stats.used += size;
+	return res;
 }
 
 /**
diff --git a/test/lsregion.c b/test/lsregion.c
index 90ad060..8a8ff56 100644
--- a/test/lsregion.c
+++ b/test/lsregion.c
@@ -358,15 +358,54 @@ test_big_data_small_slabs()
 	check_plan();
 }
 
+static void
+test_reserve(void)
+{
+	plan(10);
+	header();
+
+	struct quota quota;
+	struct slab_arena arena;
+	struct lsregion allocator;
+	quota_init(&quota, 16 * SLAB_MIN_SIZE);
+	is(slab_arena_create(&arena, &quota, 0, 0, MAP_PRIVATE), 0, "init");
+	lsregion_create(&allocator, &arena);
+
+	void *p1 = lsregion_reserve(&allocator, 100);
+	is(lsregion_used(&allocator), 0, "reserve does not occupy memory");
+	is(lsregion_total(&allocator), arena.slab_size, "reserve creates slabs");
+	void *p2 = lsregion_alloc(&allocator, 80, 1);
+	is(p1, p2, "alloc returns the same as reserve, even if size is less");
+	is(lsregion_used(&allocator), 80, "alloc updated 'used'");
+
+	p1 = lsregion_reserve(&allocator, arena.slab_size - lslab_sizeof());
+	is(lsregion_used(&allocator), 80, "next reserve didn't touch 'used'");
+	is(lsregion_total(&allocator), arena.slab_size * 2, "but changed "
+	   "'total' because second slab is allocated");
+	is(lsregion_slab_count(&allocator), 2, "slab count is 2 now");
+	lsregion_gc(&allocator, 1);
+
+	is(lsregion_used(&allocator), 0, "gc works fine with empty reserved "
+	   "slabs");
+	is(lsregion_slab_count(&allocator), 0, "all slabs are removed");
+
+	lsregion_destroy(&allocator);
+	slab_arena_destroy(&arena);
+
+	footer();
+	check_plan();
+}
+
 int
 main()
 {
-	plan(4);
+	plan(5);
 
 	test_basic();
 	test_many_allocs_one_slab();
 	test_many_allocs_many_slabs();
 	test_big_data_small_slabs();
+	test_reserve();
 
 	return check_plan();
 }
-- 
2.21.1 (Apple Git-122.3)



More information about the Tarantool-patches mailing list