Tarantool development patches archive
 help / color / mirror / Atom feed
* [Tarantool-patches] [PATCH v2 small 1/1] lsregion: introduce aligned alloc
@ 2020-05-15 23:21 Vladislav Shpilevoy
  2020-05-19 16:16 ` Aleksandr Lyapunov
  2020-05-19 16:21 ` Timur Safin
  0 siblings, 2 replies; 5+ messages in thread
From: Vladislav Shpilevoy @ 2020-05-15 23:21 UTC (permalink / raw)
  To: tarantool-patches, tsafin, alyapunov, gorcunov

lsregion_alloc() can return a not aligned address, even odd. This
is not good when the allocation is going to be used for a type,
which requires alignment, such as a number, or a struct having
numbers in it.

Unaligned access to objects saved into the returned memory is
either slower than aligned access, or can even lead to a crash on
certain architectures. Also it makes 'alignment' clang sanitizer
crazy.

The patch provides a function to make aligned allocations on
lsregion.

Part of https://github.com/tarantool/tarantool/issues/4609
---
Branch: http://github.com/tarantool/small/tree/gerold103/aligned-lsregion
Issue: https://github.com/tarantool/tarantool/issues/4609

Changes in v2:
- Made lsregion do not reserve extra bytes, when aligned
  allocation is requested, and the first available address is
  already aligned.

 small/lsregion.c     |  93 +++++++++++++++++-----------------
 small/lsregion.h     | 116 +++++++++++++++++++++++++++++++++++++------
 test/lsregion.c      |  95 ++++++++++++++++++++++++++++++++++-
 test/lsregion.result |  32 +++++++++++-
 4 files changed, 274 insertions(+), 62 deletions(-)

diff --git a/small/lsregion.c b/small/lsregion.c
index 6bcc043..e6e347c 100644
--- a/small/lsregion.c
+++ b/small/lsregion.c
@@ -32,59 +32,62 @@
 #include "lsregion.h"
 
 void *
-lsregion_alloc_slow(struct lsregion *lsregion, size_t size, int64_t id)
+lsregion_aligned_reserve_slow(struct lsregion *lsregion, size_t size,
+			      size_t alignment, void **unaligned)
 {
-	struct lslab *slab = NULL;
-	size_t slab_size = lsregion->arena->slab_size;
+	void *pos;
+	struct lslab *slab;
+	struct slab_arena *arena = lsregion->arena;
+	size_t slab_size = arena->slab_size;
 
 	/* If there is an existing slab then try to use it. */
 	if (! rlist_empty(&lsregion->slabs.slabs)) {
 		slab = rlist_last_entry(&lsregion->slabs.slabs, struct lslab,
 					next_in_list);
 		assert(slab != NULL);
+		*unaligned = lslab_pos(slab);
+		pos = (void *)small_align((size_t)*unaligned, alignment);
+		if (pos + size <= lslab_end(slab))
+			return pos;
 	}
-	if ((slab != NULL && size > lslab_unused(slab)) ||
-	    slab == NULL) {
-		if (size + lslab_sizeof() > slab_size) {
-			/* Large allocation, use malloc() */
-			slab_size = size + lslab_sizeof();
-			struct quota *quota = lsregion->arena->quota;
-			if (quota_use(quota, slab_size) < 0)
-				return NULL;
-			slab = malloc(slab_size);
-			if (slab == NULL) {
-				quota_release(quota, slab_size);
-				return NULL;
-			}
-			lslab_create(slab, slab_size);
-			rlist_add_tail_entry(&lsregion->slabs.slabs, slab,
-					     next_in_list);
-			lsregion->slabs.stats.total += slab_size;
-		} else if (lsregion->cached != NULL) {
-			/* If there is the cached slab then use it. */
-			slab = lsregion->cached;
-			lsregion->cached = NULL;
-			rlist_add_tail_entry(&lsregion->slabs.slabs, slab,
-					     next_in_list);
-		} else {
-			slab = (struct lslab *) slab_map(lsregion->arena);
-			if (slab == NULL)
-				return NULL;
-			lslab_create(slab, slab_size);
-			rlist_add_tail_entry(&lsregion->slabs.slabs, slab,
-					     next_in_list);
-			lsregion->slabs.stats.total += slab_size;
+	/*
+	 * Need to allocate more, since it can happen, that the
+	 * new slab won't be aligned by the needed alignment, and
+	 * after alignment its size won't be enough.
+	 */
+	size_t aligned_size = size + alignment - 1;
+	if (aligned_size + lslab_sizeof() > slab_size) {
+		/* Large allocation, use malloc() */
+		slab_size = aligned_size + lslab_sizeof();
+		struct quota *quota = arena->quota;
+		if (quota_use(quota, slab_size) < 0)
+			return NULL;
+		slab = malloc(slab_size);
+		if (slab == NULL) {
+			quota_release(quota, slab_size);
+			return NULL;
 		}
+		lslab_create(slab, slab_size);
+		rlist_add_tail_entry(&lsregion->slabs.slabs, slab,
+				     next_in_list);
+		lsregion->slabs.stats.total += slab_size;
+	} else if (lsregion->cached != NULL) {
+		/* If there is the cached slab then use it. */
+		slab = lsregion->cached;
+		lsregion->cached = NULL;
+		rlist_add_tail_entry(&lsregion->slabs.slabs, slab,
+				     next_in_list);
+	} else {
+		slab = (struct lslab *) slab_map(arena);
+		if (slab == NULL)
+			return NULL;
+		lslab_create(slab, slab_size);
+		rlist_add_tail_entry(&lsregion->slabs.slabs, slab,
+				     next_in_list);
+		lsregion->slabs.stats.total += slab_size;
 	}
-	assert(slab != NULL);
-	assert(slab->max_id <= id);
-	assert(size <= lslab_unused(slab));
-	void *res = lslab_pos(slab);
-	slab->slab_used += size;
-
-	/* Update the memory block meta info. */
-	assert(slab->max_id <= id);
-	slab->max_id = id;
-	lsregion->slabs.stats.used += size;
-	return res;
+	*unaligned = lslab_pos(slab);
+	pos = (void *)small_align((size_t)*unaligned, alignment);
+	assert(pos + size <= lslab_end(slab));
+	return pos;
 }
diff --git a/small/lsregion.h b/small/lsregion.h
index 73d68dc..2fc0888 100644
--- a/small/lsregion.h
+++ b/small/lsregion.h
@@ -127,6 +127,18 @@ lslab_unused(const struct lslab *slab)
 	return slab->slab_size - slab->slab_used;
 }
 
+/**
+ * Update slab statistics and meta according to new allocation.
+ */
+static inline void
+lslab_use(struct lslab *slab, size_t size, int64_t id)
+{
+	assert(size <= lslab_unused(slab));
+	assert(slab->max_id <= id);
+	slab->slab_used += size;
+	slab->max_id = id;
+}
+
 /**
  * Pointer to the end of the used part of the slab.
  * @param slab Slab container.
@@ -138,6 +150,13 @@ lslab_pos(struct lslab *slab)
 	return (char *) slab + slab->slab_used;
 }
 
+/** Pointer to the end of the slab memory. */
+static inline void *
+lslab_end(struct lslab *slab)
+{
+	return (char *)slab + slab->slab_size;
+}
+
 /**
  * Initialize log structured allocator.
  * @param lsregion Allocator object.
@@ -153,22 +172,30 @@ lsregion_create(struct lsregion *lsregion, struct slab_arena *arena)
 	lsregion->cached = NULL;
 }
 
-/** @sa lsregion_alloc().  */
+/** @sa lsregion_aligned_reserve(). */
 void *
-lsregion_alloc_slow(struct lsregion *lsregion, size_t size, int64_t id);
+lsregion_aligned_reserve_slow(struct lsregion *lsregion, size_t size,
+			      size_t alignment, void **unaligned);
 
 /**
- * Allocate \p size bytes and assicoate the allocated block
- * with \p id.
+ * Make sure a next allocation of at least @a size bytes will not
+ * fail, and will return the same result as this call, aligned by
+ * @a alignment.
  * @param lsregion Allocator object.
  * @param size     Size to allocate.
- * @param id       Memory chunk identifier.
+ * @param alignment Byte alignment required for the result
+ *        address.
+ * @param[out] unaligned When the function succeeds, it returns
+ *        aligned address, and stores base unaligned address to
+ *        this variable. That helps to find how many bytes were
+ *        wasted to make the alignment.
  *
  * @retval not NULL Success.
  * @retval NULL     Memory error.
  */
 static inline void *
-lsregion_alloc(struct lsregion *lsregion, size_t size, int64_t id)
+lsregion_aligned_reserve(struct lsregion *lsregion, size_t size,
+			 size_t alignment, void **unaligned)
 {
 	/* If there is an existing slab then try to use it. */
 	if (! rlist_empty(&lsregion->slabs.slabs)) {
@@ -176,16 +203,75 @@ lsregion_alloc(struct lsregion *lsregion, size_t size, int64_t id)
 		slab = rlist_last_entry(&lsregion->slabs.slabs, struct lslab,
 					next_in_list);
 		assert(slab != NULL);
-		assert(slab->max_id <= id);
-		if (size <= lslab_unused(slab)) {
-			void *res = lslab_pos(slab);
-			slab->slab_used += size;
-			slab->max_id = id;
-			lsregion->slabs.stats.used += size;
-			return res;
-		}
+		*unaligned = lslab_pos(slab);
+		void *pos = (void *)small_align((size_t)*unaligned, alignment);
+		if (pos + size <= lslab_end(slab))
+			return pos;
 	}
-	return lsregion_alloc_slow(lsregion, size, id);
+	return lsregion_aligned_reserve_slow(lsregion, size, alignment,
+					     unaligned);
+}
+
+/**
+ * Make sure a next allocation of at least @a size bytes will not
+ * fail, and will return the same result as this call.
+ * @param lsregion Allocator object.
+ * @param size Size to allocate.
+ *
+ * @retval not-NULL Success.
+ * @retval NULL Memory error.
+ */
+static inline void *
+lsregion_reserve(struct lsregion *lsregion, size_t size)
+{
+	void *unaligned = NULL;
+	void *res = lsregion_aligned_reserve(lsregion, size, 1, &unaligned);
+	assert(res == NULL || res == unaligned);
+	return res;
+}
+
+/**
+ * Allocate @a size bytes and associate the allocated block
+ * with @a id.
+ * @param lsregion Allocator object.
+ * @param size Size to allocate.
+ * @param id Memory chunk identifier.
+ *
+ * @retval not-NULL Success.
+ * @retval NULL Memory error.
+ */
+static inline void *
+lsregion_alloc(struct lsregion *lsregion, size_t size, int64_t id)
+{
+	void *res = lsregion_reserve(lsregion, size);
+	if (res == NULL)
+		return NULL;
+	struct lslab *slab = rlist_last_entry(&lsregion->slabs.slabs,
+					      struct lslab, next_in_list);
+	lslab_use(slab, size, id);
+	lsregion->slabs.stats.used += size;
+	return res;
+}
+
+/**
+ * The same as normal alloc, but the resulting pointer is aligned
+ * by @a alignment.
+ */
+static inline void *
+lsregion_aligned_alloc(struct lsregion *lsregion, size_t size, size_t alignment,
+		       int64_t id)
+{
+	void *unaligned;
+	void *res = lsregion_aligned_reserve(lsregion, size, alignment,
+					     &unaligned);
+	if (res == NULL)
+		return NULL;
+	struct lslab *slab = rlist_last_entry(&lsregion->slabs.slabs,
+					      struct lslab, next_in_list);
+	size += res - unaligned;
+	lslab_use(slab, size, id);
+	lsregion->slabs.stats.used += size;
+	return res;
 }
 
 /**
diff --git a/test/lsregion.c b/test/lsregion.c
index 90ad060..416c6fe 100644
--- a/test/lsregion.c
+++ b/test/lsregion.c
@@ -358,15 +358,108 @@ test_big_data_small_slabs()
 	check_plan();
 }
 
+static void
+test_reserve(void)
+{
+	plan(10);
+	header();
+
+	struct quota quota;
+	struct slab_arena arena;
+	struct lsregion allocator;
+	quota_init(&quota, 16 * SLAB_MIN_SIZE);
+	is(slab_arena_create(&arena, &quota, 0, 0, MAP_PRIVATE), 0, "init");
+	lsregion_create(&allocator, &arena);
+
+	void *p1 = lsregion_reserve(&allocator, 100);
+	is(lsregion_used(&allocator), 0, "reserve does not occupy memory");
+	is(lsregion_total(&allocator), arena.slab_size, "reserve creates slabs");
+	void *p2 = lsregion_alloc(&allocator, 80, 1);
+	is(p1, p2, "alloc returns the same as reserve, even if size is less");
+	is(lsregion_used(&allocator), 80, "alloc updated 'used'");
+
+	p1 = lsregion_reserve(&allocator, arena.slab_size - lslab_sizeof());
+	is(lsregion_used(&allocator), 80, "next reserve didn't touch 'used'");
+	is(lsregion_total(&allocator), arena.slab_size * 2, "but changed "
+	   "'total' because second slab is allocated");
+	is(lsregion_slab_count(&allocator), 2, "slab count is 2 now");
+	lsregion_gc(&allocator, 1);
+
+	is(lsregion_used(&allocator), 0, "gc works fine with empty reserved "
+	   "slabs");
+	is(lsregion_slab_count(&allocator), 0, "all slabs are removed");
+
+	lsregion_destroy(&allocator);
+	slab_arena_destroy(&arena);
+
+	footer();
+	check_plan();
+}
+
+static void
+test_aligned(void)
+{
+	plan(12);
+	header();
+
+	struct quota quota;
+	struct slab_arena arena;
+	struct lsregion allocator;
+	quota_init(&quota, 16 * SLAB_MIN_SIZE);
+	is(slab_arena_create(&arena, &quota, 0, 0, MAP_PRIVATE), 0, "init");
+	lsregion_create(&allocator, &arena);
+	int id = 0;
+
+	++id;
+	void *p1 = lsregion_aligned_alloc(&allocator, 8, 8, id);
+	ok((unsigned long)p1 % 8 == 0, "trivial aligned");
+	is(lsregion_used(&allocator), 8, "'used'");
+
+	++id;
+	void *p2 = lsregion_aligned_alloc(&allocator, 1, 16, id);
+	ok((unsigned long)p2 % 16 == 0, "16 byte alignment for 1 byte");
+	void *p3 = lsregion_aligned_alloc(&allocator, 1, 16, id);
+	ok(p3 == (char *)p2 + 16, "second 16 aligned alloc of 1 byte is far "
+	   "from first");
+	ok((unsigned long)p3 % 16 == 0, "aligned by 16 too");
+
+	is(lsregion_used(&allocator), (size_t)(p3 + 1 - p1), "'used'");
+
+	++id;
+	void *p4 = lsregion_aligned_alloc(&allocator, 3, 4, id);
+	ok(p4 == (char *)p3 + 4, "align next by 4 bytes, should be closer to "
+	   "previous");
+	ok((unsigned long)p4 % 4 == 0, "aligned by 4");
+	is(lsregion_used(&allocator), (size_t)(p4 + 3 - p1), "'used'");
+
+	lsregion_gc(&allocator, id);
+
+	++id;
+	p1 = lsregion_aligned_alloc(&allocator,
+				    arena.slab_size - lslab_sizeof(), 32, id);
+	ok((unsigned long)p1 % 32 == 0, "32 byte aligned alloc of slab size");
+
+	lsregion_gc(&allocator, id);
+	is(lsregion_used(&allocator), 0, "gc deleted all");
+
+	lsregion_destroy(&allocator);
+	slab_arena_destroy(&arena);
+
+	footer();
+	check_plan();
+}
+
 int
 main()
 {
-	plan(4);
+	plan(6);
 
 	test_basic();
 	test_many_allocs_one_slab();
 	test_many_allocs_many_slabs();
 	test_big_data_small_slabs();
+	test_reserve();
+	test_aligned();
 
 	return check_plan();
 }
diff --git a/test/lsregion.result b/test/lsregion.result
index 3157283..399c2a6 100644
--- a/test/lsregion.result
+++ b/test/lsregion.result
@@ -1,4 +1,4 @@
-1..4
+1..6
 # basic
     1..42
     ok 1 - init
@@ -76,3 +76,33 @@ ok 3 - subtests
     ok 6 - slab count after gc (id / 2)
     ok 7 - arena used after gc(id / 2)
 ok 4 - subtests
+    1..10
+	*** test_reserve ***
+    ok 1 - init
+    ok 2 - reserve does not occupy memory
+    ok 3 - reserve creates slabs
+    ok 4 - alloc returns the same as reserve, even if size is less
+    ok 5 - alloc updated 'used'
+    ok 6 - next reserve didn't touch 'used'
+    ok 7 - but changed 'total' because second slab is allocated
+    ok 8 - slab count is 2 now
+    ok 9 - gc works fine with empty reserved slabs
+    ok 10 - all slabs are removed
+	*** test_reserve: done ***
+ok 5 - subtests
+    1..12
+	*** test_aligned ***
+    ok 1 - init
+    ok 2 - trivial aligned
+    ok 3 - 'used'
+    ok 4 - 16 byte alignment for 1 byte
+    ok 5 - second 16 aligned alloc of 1 byte is far from first
+    ok 6 - aligned by 16 too
+    ok 7 - 'used'
+    ok 8 - align next by 4 bytes, should be closer to previous
+    ok 9 - aligned by 4
+    ok 10 - 'used'
+    ok 11 - 32 byte aligned alloc of slab size
+    ok 12 - gc deleted all
+	*** test_aligned: done ***
+ok 6 - subtests
-- 
2.21.1 (Apple Git-122.3)

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [Tarantool-patches] [PATCH v2 small 1/1] lsregion: introduce aligned alloc
  2020-05-15 23:21 [Tarantool-patches] [PATCH v2 small 1/1] lsregion: introduce aligned alloc Vladislav Shpilevoy
@ 2020-05-19 16:16 ` Aleksandr Lyapunov
  2020-05-19 16:21 ` Timur Safin
  1 sibling, 0 replies; 5+ messages in thread
From: Aleksandr Lyapunov @ 2020-05-19 16:16 UTC (permalink / raw)
  To: Vladislav Shpilevoy, tarantool-patches, tsafin, gorcunov

Thank for the changes! lgtm.

On 5/16/20 2:21 AM, Vladislav Shpilevoy wrote:
> lsregion_alloc() can return a not aligned address, even odd. This
> is not good when the allocation is going to be used for a type,
> which requires alignment, such as a number, or a struct having
> numbers in it.
>
> Unaligned access to objects saved into the returned memory is
> either slower than aligned access, or can even lead to a crash on
> certain architectures. Also it makes 'alignment' clang sanitizer
> crazy.
>
> The patch provides a function to make aligned allocations on
> lsregion.
>
> Part of https://github.com/tarantool/tarantool/issues/4609
> ---
> Branch: http://github.com/tarantool/small/tree/gerold103/aligned-lsregion
> Issue: https://github.com/tarantool/tarantool/issues/4609
>
> Changes in v2:
> - Made lsregion do not reserve extra bytes, when aligned
>    allocation is requested, and the first available address is
>    already aligned.

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [Tarantool-patches] [PATCH v2 small 1/1] lsregion: introduce aligned alloc
  2020-05-15 23:21 [Tarantool-patches] [PATCH v2 small 1/1] lsregion: introduce aligned alloc Vladislav Shpilevoy
  2020-05-19 16:16 ` Aleksandr Lyapunov
@ 2020-05-19 16:21 ` Timur Safin
  2020-05-19 21:29   ` Vladislav Shpilevoy
  1 sibling, 1 reply; 5+ messages in thread
From: Timur Safin @ 2020-05-19 16:21 UTC (permalink / raw)
  To: 'Vladislav Shpilevoy', tarantool-patches, alyapunov, gorcunov

Other than small note below it's looking great, but please keep in mind...

: From: Vladislav Shpilevoy <v.shpilevoy@tarantool.org>
: Subject: [PATCH v2 small 1/1] lsregion: introduce aligned alloc
: 


: @@ -32,59 +32,62 @@
:  #include "lsregion.h"
: 
:  void *
: -lsregion_alloc_slow(struct lsregion *lsregion, size_t size, int64_t id)
: +lsregion_aligned_reserve_slow(struct lsregion *lsregion, size_t size,
: +			      size_t alignment, void **unaligned)
:  {
: -	struct lslab *slab = NULL;
: -	size_t slab_size = lsregion->arena->slab_size;
: +	void *pos;
: +	struct lslab *slab;
: +	struct slab_arena *arena = lsregion->arena;
: +	size_t slab_size = arena->slab_size;
: 
:  	/* If there is an existing slab then try to use it. */
:  	if (! rlist_empty(&lsregion->slabs.slabs)) {
:  		slab = rlist_last_entry(&lsregion->slabs.slabs, struct lslab,
:  					next_in_list);
:  		assert(slab != NULL);
: +		*unaligned = lslab_pos(slab);
: +		pos = (void *)small_align((size_t)*unaligned, alignment);
: +		if (pos + size <= lslab_end(slab))
: +			return pos;

Void* pointer arithmetic is non-standard, gcc extension, and I'd 
avoid to use it in a longer run. There are compilers is which still 
not support it. 

Timur

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [Tarantool-patches] [PATCH v2 small 1/1] lsregion: introduce aligned alloc
  2020-05-19 16:21 ` Timur Safin
@ 2020-05-19 21:29   ` Vladislav Shpilevoy
  2020-05-19 21:48     ` Timur Safin
  0 siblings, 1 reply; 5+ messages in thread
From: Vladislav Shpilevoy @ 2020-05-19 21:29 UTC (permalink / raw)
  To: Timur Safin, tarantool-patches, alyapunov, gorcunov

Hi! Thanks for the review!

On 19/05/2020 18:21, Timur Safin wrote:
> Other than small note below it's looking great, but please keep in mind...
> 
> : From: Vladislav Shpilevoy <v.shpilevoy@tarantool.org>
> : Subject: [PATCH v2 small 1/1] lsregion: introduce aligned alloc
> : 
> 
> 
> : @@ -32,59 +32,62 @@
> :  #include "lsregion.h"
> : 
> :  void *
> : -lsregion_alloc_slow(struct lsregion *lsregion, size_t size, int64_t id)
> : +lsregion_aligned_reserve_slow(struct lsregion *lsregion, size_t size,
> : +			      size_t alignment, void **unaligned)
> :  {
> : -	struct lslab *slab = NULL;
> : -	size_t slab_size = lsregion->arena->slab_size;
> : +	void *pos;
> : +	struct lslab *slab;
> : +	struct slab_arena *arena = lsregion->arena;
> : +	size_t slab_size = arena->slab_size;
> : 
> :  	/* If there is an existing slab then try to use it. */
> :  	if (! rlist_empty(&lsregion->slabs.slabs)) {
> :  		slab = rlist_last_entry(&lsregion->slabs.slabs, struct lslab,
> :  					next_in_list);
> :  		assert(slab != NULL);
> : +		*unaligned = lslab_pos(slab);
> : +		pos = (void *)small_align((size_t)*unaligned, alignment);
> : +		if (pos + size <= lslab_end(slab))
> : +			return pos;
> 
> Void* pointer arithmetic is non-standard, gcc extension, and I'd 
> avoid to use it in a longer run. There are compilers is which still 
> not support it. 
> 
> Timur

Yeah, I missed this. Thanks for noticing.

====================
diff --git a/small/lsregion.c b/small/lsregion.c
index e6e347c..c01885d 100644
--- a/small/lsregion.c
+++ b/small/lsregion.c
@@ -47,7 +47,7 @@ lsregion_aligned_reserve_slow(struct lsregion *lsregion, size_t size,
 		assert(slab != NULL);
 		*unaligned = lslab_pos(slab);
 		pos = (void *)small_align((size_t)*unaligned, alignment);
-		if (pos + size <= lslab_end(slab))
+		if ((char *)pos + size <= (char *)lslab_end(slab))
 			return pos;
 	}
 	/*
@@ -88,6 +88,6 @@ lsregion_aligned_reserve_slow(struct lsregion *lsregion, size_t size,
 	}
 	*unaligned = lslab_pos(slab);
 	pos = (void *)small_align((size_t)*unaligned, alignment);
-	assert(pos + size <= lslab_end(slab));
+	assert((char *)pos + size <= (char *)lslab_end(slab));
 	return pos;
 }
diff --git a/small/lsregion.h b/small/lsregion.h
index 2fc0888..3b9a8d9 100644
--- a/small/lsregion.h
+++ b/small/lsregion.h
@@ -205,7 +205,7 @@ lsregion_aligned_reserve(struct lsregion *lsregion, size_t size,
 		assert(slab != NULL);
 		*unaligned = lslab_pos(slab);
 		void *pos = (void *)small_align((size_t)*unaligned, alignment);
-		if (pos + size <= lslab_end(slab))
+		if ((char *)pos + size <= (char *)lslab_end(slab))
 			return pos;
 	}
 	return lsregion_aligned_reserve_slow(lsregion, size, alignment,

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [Tarantool-patches] [PATCH v2 small 1/1] lsregion: introduce aligned alloc
  2020-05-19 21:29   ` Vladislav Shpilevoy
@ 2020-05-19 21:48     ` Timur Safin
  0 siblings, 0 replies; 5+ messages in thread
From: Timur Safin @ 2020-05-19 21:48 UTC (permalink / raw)
  To: 'Vladislav Shpilevoy', tarantool-patches, alyapunov, gorcunov

Now, I'm happy - LGTM

: -----Original Message-----
: From: Vladislav Shpilevoy <v.shpilevoy@tarantool.org>
: Sent: Wednesday, May 20, 2020 12:30 AM
: To: Timur Safin <tsafin@tarantool.org>; tarantool-
: patches@dev.tarantool.org; alyapunov@tarantool.org; gorcunov@gmail.com
: Subject: Re: [PATCH v2 small 1/1] lsregion: introduce aligned alloc
: 
: Hi! Thanks for the review!
: 
: On 19/05/2020 18:21, Timur Safin wrote:
: > Other than small note below it's looking great, but please keep in
: mind...
: >
: > : From: Vladislav Shpilevoy <v.shpilevoy@tarantool.org>
: > : Subject: [PATCH v2 small 1/1] lsregion: introduce aligned alloc
: > :
: >
: >
: > : @@ -32,59 +32,62 @@
: > :  #include "lsregion.h"
: > :
: > :  void *
: > : -lsregion_alloc_slow(struct lsregion *lsregion, size_t size, int64_t
: id)
: > : +lsregion_aligned_reserve_slow(struct lsregion *lsregion, size_t size,
: > : +			      size_t alignment, void **unaligned)
: > :  {
: > : -	struct lslab *slab = NULL;
: > : -	size_t slab_size = lsregion->arena->slab_size;
: > : +	void *pos;
: > : +	struct lslab *slab;
: > : +	struct slab_arena *arena = lsregion->arena;
: > : +	size_t slab_size = arena->slab_size;
: > :
: > :  	/* If there is an existing slab then try to use it. */
: > :  	if (! rlist_empty(&lsregion->slabs.slabs)) {
: > :  		slab = rlist_last_entry(&lsregion->slabs.slabs, struct lslab,
: > :  					next_in_list);
: > :  		assert(slab != NULL);
: > : +		*unaligned = lslab_pos(slab);
: > : +		pos = (void *)small_align((size_t)*unaligned, alignment);
: > : +		if (pos + size <= lslab_end(slab))
: > : +			return pos;
: >
: > Void* pointer arithmetic is non-standard, gcc extension, and I'd
: > avoid to use it in a longer run. There are compilers is which still
: > not support it.
: >
: > Timur
: 
: Yeah, I missed this. Thanks for noticing.
: 
: ====================
: diff --git a/small/lsregion.c b/small/lsregion.c
: index e6e347c..c01885d 100644
: --- a/small/lsregion.c
: +++ b/small/lsregion.c
: @@ -47,7 +47,7 @@ lsregion_aligned_reserve_slow(struct lsregion *lsregion,
: size_t size,
:  		assert(slab != NULL);
:  		*unaligned = lslab_pos(slab);
:  		pos = (void *)small_align((size_t)*unaligned, alignment);
: -		if (pos + size <= lslab_end(slab))
: +		if ((char *)pos + size <= (char *)lslab_end(slab))
:  			return pos;
:  	}
:  	/*
: @@ -88,6 +88,6 @@ lsregion_aligned_reserve_slow(struct lsregion *lsregion,
: size_t size,
:  	}
:  	*unaligned = lslab_pos(slab);
:  	pos = (void *)small_align((size_t)*unaligned, alignment);
: -	assert(pos + size <= lslab_end(slab));
: +	assert((char *)pos + size <= (char *)lslab_end(slab));
:  	return pos;
:  }
: diff --git a/small/lsregion.h b/small/lsregion.h
: index 2fc0888..3b9a8d9 100644
: --- a/small/lsregion.h
: +++ b/small/lsregion.h
: @@ -205,7 +205,7 @@ lsregion_aligned_reserve(struct lsregion *lsregion,
: size_t size,
:  		assert(slab != NULL);
:  		*unaligned = lslab_pos(slab);
:  		void *pos = (void *)small_align((size_t)*unaligned,
: alignment);
: -		if (pos + size <= lslab_end(slab))
: +		if ((char *)pos + size <= (char *)lslab_end(slab))
:  			return pos;
:  	}
:  	return lsregion_aligned_reserve_slow(lsregion, size, alignment,

^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2020-05-19 21:48 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-05-15 23:21 [Tarantool-patches] [PATCH v2 small 1/1] lsregion: introduce aligned alloc Vladislav Shpilevoy
2020-05-19 16:16 ` Aleksandr Lyapunov
2020-05-19 16:21 ` Timur Safin
2020-05-19 21:29   ` Vladislav Shpilevoy
2020-05-19 21:48     ` Timur Safin

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox