From: Vladislav Shpilevoy <v.shpilevoy@tarantool.org> To: tarantool-patches@dev.tarantool.org, korablev@tarantool.org, tsafin@tarantool.org, gorcunov@gmail.com Subject: [Tarantool-patches] [PATCH small 1/2] lsregion: introduce lsregion_reserve() Date: Fri, 15 May 2020 01:31:03 +0200 [thread overview] Message-ID: <769a21bcedae86c9b736e8ac04dc669b14bc64ef.1589498880.git.v.shpilevoy@tarantool.org> (raw) In-Reply-To: <cover.1589498880.git.v.shpilevoy@tarantool.org> So far lsregion provided only lsregion_alloc(). Even though all the other allocators provide both alloc() and reserve(). Reservation is useful when need to reserve more, and allocate less. In that way the unused tail has a chance of becoming a part of a next allocation. This will be the case for upcoming lsregion_aligned_alloc(), which will reserve more than requested and use probably only part of it depending on result address alignment. Needed for https://github.com/tarantool/tarantool/issues/4609 --- small/lsregion.c | 12 ++---------- small/lsregion.h | 49 +++++++++++++++++++++++++++++++++--------------- test/lsregion.c | 41 +++++++++++++++++++++++++++++++++++++++- 3 files changed, 76 insertions(+), 26 deletions(-) diff --git a/small/lsregion.c b/small/lsregion.c index 6bcc043..ff4a7aa 100644 --- a/small/lsregion.c +++ b/small/lsregion.c @@ -32,7 +32,7 @@ #include "lsregion.h" void * -lsregion_alloc_slow(struct lsregion *lsregion, size_t size, int64_t id) +lsregion_reserve_slow(struct lsregion *lsregion, size_t size) { struct lslab *slab = NULL; size_t slab_size = lsregion->arena->slab_size; @@ -77,14 +77,6 @@ lsregion_alloc_slow(struct lsregion *lsregion, size_t size, int64_t id) } } assert(slab != NULL); - assert(slab->max_id <= id); assert(size <= lslab_unused(slab)); - void *res = lslab_pos(slab); - slab->slab_used += size; - - /* Update the memory block meta info. */ - assert(slab->max_id <= id); - slab->max_id = id; - lsregion->slabs.stats.used += size; - return res; + return lslab_pos(slab); } diff --git a/small/lsregion.h b/small/lsregion.h index 73d68dc..52431e7 100644 --- a/small/lsregion.h +++ b/small/lsregion.h @@ -153,22 +153,21 @@ lsregion_create(struct lsregion *lsregion, struct slab_arena *arena) lsregion->cached = NULL; } -/** @sa lsregion_alloc(). */ +/** @sa lsregion_reserve(). */ void * -lsregion_alloc_slow(struct lsregion *lsregion, size_t size, int64_t id); +lsregion_reserve_slow(struct lsregion *lsregion, size_t size); /** - * Allocate \p size bytes and assicoate the allocated block - * with \p id. + * Make sure a next allocation of at least @a size bytes will not + * fail, and will return the same result as this call. * @param lsregion Allocator object. * @param size Size to allocate. - * @param id Memory chunk identifier. * * @retval not NULL Success. * @retval NULL Memory error. */ static inline void * -lsregion_alloc(struct lsregion *lsregion, size_t size, int64_t id) +lsregion_reserve(struct lsregion *lsregion, size_t size) { /* If there is an existing slab then try to use it. */ if (! rlist_empty(&lsregion->slabs.slabs)) { @@ -176,16 +175,36 @@ lsregion_alloc(struct lsregion *lsregion, size_t size, int64_t id) slab = rlist_last_entry(&lsregion->slabs.slabs, struct lslab, next_in_list); assert(slab != NULL); - assert(slab->max_id <= id); - if (size <= lslab_unused(slab)) { - void *res = lslab_pos(slab); - slab->slab_used += size; - slab->max_id = id; - lsregion->slabs.stats.used += size; - return res; - } + if (size <= lslab_unused(slab)) + return lslab_pos(slab); } - return lsregion_alloc_slow(lsregion, size, id); + return lsregion_reserve_slow(lsregion, size); +} + +/** + * Allocate @a size bytes and associate the allocated block + * with @a id. + * @param lsregion Allocator object. + * @param size Size to allocate. + * @param id Memory chunk identifier. + * + * @retval not NULL Success. + * @retval NULL Memory error. + */ +static inline void * +lsregion_alloc(struct lsregion *lsregion, size_t size, int64_t id) +{ + void *res = lsregion_reserve(lsregion, size); + if (res == NULL) + return NULL; + struct lslab *slab = rlist_last_entry(&lsregion->slabs.slabs, + struct lslab, next_in_list); + assert(size <= lslab_unused(slab)); + assert(slab->max_id <= id); + slab->slab_used += size; + slab->max_id = id; + lsregion->slabs.stats.used += size; + return res; } /** diff --git a/test/lsregion.c b/test/lsregion.c index 90ad060..8a8ff56 100644 --- a/test/lsregion.c +++ b/test/lsregion.c @@ -358,15 +358,54 @@ test_big_data_small_slabs() check_plan(); } +static void +test_reserve(void) +{ + plan(10); + header(); + + struct quota quota; + struct slab_arena arena; + struct lsregion allocator; + quota_init("a, 16 * SLAB_MIN_SIZE); + is(slab_arena_create(&arena, "a, 0, 0, MAP_PRIVATE), 0, "init"); + lsregion_create(&allocator, &arena); + + void *p1 = lsregion_reserve(&allocator, 100); + is(lsregion_used(&allocator), 0, "reserve does not occupy memory"); + is(lsregion_total(&allocator), arena.slab_size, "reserve creates slabs"); + void *p2 = lsregion_alloc(&allocator, 80, 1); + is(p1, p2, "alloc returns the same as reserve, even if size is less"); + is(lsregion_used(&allocator), 80, "alloc updated 'used'"); + + p1 = lsregion_reserve(&allocator, arena.slab_size - lslab_sizeof()); + is(lsregion_used(&allocator), 80, "next reserve didn't touch 'used'"); + is(lsregion_total(&allocator), arena.slab_size * 2, "but changed " + "'total' because second slab is allocated"); + is(lsregion_slab_count(&allocator), 2, "slab count is 2 now"); + lsregion_gc(&allocator, 1); + + is(lsregion_used(&allocator), 0, "gc works fine with empty reserved " + "slabs"); + is(lsregion_slab_count(&allocator), 0, "all slabs are removed"); + + lsregion_destroy(&allocator); + slab_arena_destroy(&arena); + + footer(); + check_plan(); +} + int main() { - plan(4); + plan(5); test_basic(); test_many_allocs_one_slab(); test_many_allocs_many_slabs(); test_big_data_small_slabs(); + test_reserve(); return check_plan(); } -- 2.21.1 (Apple Git-122.3)
next prev parent reply other threads:[~2020-05-14 23:31 UTC|newest] Thread overview: 10+ messages / expand[flat|nested] mbox.gz Atom feed top 2020-05-14 23:31 [Tarantool-patches] [PATCH small 0/2] Aligned lsregion Vladislav Shpilevoy 2020-05-14 23:31 ` Vladislav Shpilevoy [this message] 2020-05-15 12:35 ` [Tarantool-patches] [PATCH small 1/2] lsregion: introduce lsregion_reserve() Aleksandr Lyapunov 2020-05-14 23:31 ` [Tarantool-patches] [PATCH small 2/2] lsregion: provide aligned version of alloc Vladislav Shpilevoy 2020-05-15 13:03 ` Aleksandr Lyapunov 2020-05-15 23:24 ` Vladislav Shpilevoy 2020-05-15 12:26 ` [Tarantool-patches] [PATCH small 0/2] Aligned lsregion Aleksandr Lyapunov 2020-05-15 23:22 ` Vladislav Shpilevoy 2020-05-16 19:09 ` Aleksandr Lyapunov 2020-05-17 13:56 ` Vladislav Shpilevoy
Reply instructions: You may reply publicly to this message via plain-text email using any one of the following methods: * Save the following mbox file, import it into your mail client, and reply-to-all from there: mbox Avoid top-posting and favor interleaved quoting: https://en.wikipedia.org/wiki/Posting_style#Interleaved_style * Reply using the --to, --cc, and --in-reply-to switches of git-send-email(1): git send-email \ --in-reply-to=769a21bcedae86c9b736e8ac04dc669b14bc64ef.1589498880.git.v.shpilevoy@tarantool.org \ --to=v.shpilevoy@tarantool.org \ --cc=gorcunov@gmail.com \ --cc=korablev@tarantool.org \ --cc=tarantool-patches@dev.tarantool.org \ --cc=tsafin@tarantool.org \ --subject='Re: [Tarantool-patches] [PATCH small 1/2] lsregion: introduce lsregion_reserve()' \ /path/to/YOUR_REPLY https://kernel.org/pub/software/scm/git/docs/git-send-email.html * If your mail client supports setting the In-Reply-To header via mailto: links, try the mailto: link
This is a public inbox, see mirroring instructions for how to clone and mirror all data and code used for this inbox