From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: From: Kirill Shcherbatov Subject: [PATCH v3 1/7] memtx: introduce universal iterator_pool Date: Fri, 22 Feb 2019 18:42:26 +0300 Message-Id: <236d59ddf2ed9bb9c9e112763ca2dbd27424482a.1550849496.git.kshcherbatov@tarantool.org> In-Reply-To: References: MIME-Version: 1.0 Content-Transfer-Encoding: 8bit To: tarantool-patches@freelists.org, vdavydov.dev@gmail.com Cc: Kirill Shcherbatov List-ID: Memtx uses separate mempools for iterators of different types. Due to the fact that there will be more iterators of different sizes in a series of upcoming changes, let's always allocate the iterator of the largest size. Needed for #3961 --- src/box/memtx_bitset.c | 16 ++++++++++------ src/box/memtx_engine.c | 10 ++-------- src/box/memtx_engine.h | 17 +++++++++-------- src/box/memtx_hash.c | 15 +++++++++------ src/box/memtx_rtree.c | 14 +++++++++----- src/box/memtx_tree.c | 13 ++++++++----- 6 files changed, 47 insertions(+), 38 deletions(-) diff --git a/src/box/memtx_bitset.c b/src/box/memtx_bitset.c index cd7362ee1..9dbc4141d 100644 --- a/src/box/memtx_bitset.c +++ b/src/box/memtx_bitset.c @@ -162,6 +162,10 @@ struct bitset_index_iterator { struct mempool *pool; }; +static_assert(sizeof(struct bitset_index_iterator) <= MEMTX_ITERATOR_SIZE, + "bitset_index_iterator must be less or equal than " + "MEMTX_ITERATOR_SIZE"); + static struct bitset_index_iterator * bitset_index_iterator(struct iterator *it) { @@ -318,7 +322,7 @@ memtx_bitset_index_create_iterator(struct index *base, enum iterator_type type, (void) part_count; struct bitset_index_iterator *it; - it = mempool_alloc(&memtx->bitset_iterator_pool); + it = mempool_alloc(&memtx->iterator_pool); if (!it) { diag_set(OutOfMemory, sizeof(*it), "memtx_bitset_index", "iterator"); @@ -326,7 +330,7 @@ memtx_bitset_index_create_iterator(struct index *base, enum iterator_type type, } iterator_create(&it->base, base); - it->pool = &memtx->bitset_iterator_pool; + it->pool = &memtx->iterator_pool; it->base.next = bitset_index_iterator_next; it->base.free = bitset_index_iterator_free; @@ -389,7 +393,7 @@ memtx_bitset_index_create_iterator(struct index *base, enum iterator_type type, return (struct iterator *)it; fail: tt_bitset_expr_destroy(&expr); - mempool_free(&memtx->bitset_iterator_pool, it); + mempool_free(&memtx->iterator_pool, it); return NULL; } @@ -493,9 +497,9 @@ memtx_bitset_index_new(struct memtx_engine *memtx, struct index_def *def) assert(def->iid > 0); assert(!def->opts.is_unique); - if (!mempool_is_initialized(&memtx->bitset_iterator_pool)) { - mempool_create(&memtx->bitset_iterator_pool, cord_slab_cache(), - sizeof(struct bitset_index_iterator)); + if (!mempool_is_initialized(&memtx->iterator_pool)) { + mempool_create(&memtx->iterator_pool, cord_slab_cache(), + MEMTX_ITERATOR_SIZE); } struct memtx_bitset_index *index = diff --git a/src/box/memtx_engine.c b/src/box/memtx_engine.c index 64f43456e..646001cf8 100644 --- a/src/box/memtx_engine.c +++ b/src/box/memtx_engine.c @@ -176,14 +176,8 @@ static void memtx_engine_shutdown(struct engine *engine) { struct memtx_engine *memtx = (struct memtx_engine *)engine; - if (mempool_is_initialized(&memtx->tree_iterator_pool)) - mempool_destroy(&memtx->tree_iterator_pool); - if (mempool_is_initialized(&memtx->rtree_iterator_pool)) - mempool_destroy(&memtx->rtree_iterator_pool); - if (mempool_is_initialized(&memtx->hash_iterator_pool)) - mempool_destroy(&memtx->hash_iterator_pool); - if (mempool_is_initialized(&memtx->bitset_iterator_pool)) - mempool_destroy(&memtx->bitset_iterator_pool); + if (mempool_is_initialized(&memtx->iterator_pool)) + mempool_destroy(&memtx->iterator_pool); mempool_destroy(&memtx->index_extent_pool); slab_cache_destroy(&memtx->index_slab_cache); small_alloc_destroy(&memtx->alloc); diff --git a/src/box/memtx_engine.h b/src/box/memtx_engine.h index 0f8e92ee4..2cd4ba771 100644 --- a/src/box/memtx_engine.h +++ b/src/box/memtx_engine.h @@ -87,6 +87,13 @@ enum memtx_recovery_state { /** Memtx extents pool, available to statistics. */ extern struct mempool memtx_index_extent_pool; +/** + * The size of the biggest memtx iterator. Used with + * mempool_create. This is the size of the block that will be + * allocated for each iterator. + */ +#define MEMTX_ITERATOR_SIZE (696) + struct memtx_engine { struct engine base; /** Engine recovery state. */ @@ -129,14 +136,8 @@ struct memtx_engine { size_t max_tuple_size; /** Incremented with each next snapshot. */ uint32_t snapshot_version; - /** Memory pool for tree index iterator. */ - struct mempool tree_iterator_pool; - /** Memory pool for rtree index iterator. */ - struct mempool rtree_iterator_pool; - /** Memory pool for hash index iterator. */ - struct mempool hash_iterator_pool; - /** Memory pool for bitset index iterator. */ - struct mempool bitset_iterator_pool; + /** Memory pool for index iterator. */ + struct mempool iterator_pool; /** * Garbage collection fiber. Used for asynchronous * destruction of dropped indexes. diff --git a/src/box/memtx_hash.c b/src/box/memtx_hash.c index 511d0e515..b35a52528 100644 --- a/src/box/memtx_hash.c +++ b/src/box/memtx_hash.c @@ -49,6 +49,9 @@ struct hash_iterator { struct mempool *pool; }; +static_assert(sizeof(struct hash_iterator) <= MEMTX_ITERATOR_SIZE, + "hash_iterator must be less or equal than MEMTX_ITERATOR_SIZE"); + static void hash_iterator_free(struct iterator *iterator) { @@ -311,14 +314,14 @@ memtx_hash_index_create_iterator(struct index *base, enum iterator_type type, assert(part_count == 0 || key != NULL); - struct hash_iterator *it = mempool_alloc(&memtx->hash_iterator_pool); + struct hash_iterator *it = mempool_alloc(&memtx->iterator_pool); if (it == NULL) { diag_set(OutOfMemory, sizeof(struct hash_iterator), "memtx_hash_index", "iterator"); return NULL; } iterator_create(&it->base, base); - it->pool = &memtx->hash_iterator_pool; + it->pool = &memtx->iterator_pool; it->base.free = hash_iterator_free; it->hash_table = &index->hash_table; light_index_iterator_begin(it->hash_table, &it->iterator); @@ -347,7 +350,7 @@ memtx_hash_index_create_iterator(struct index *base, enum iterator_type type, default: diag_set(UnsupportedIndexFeature, base->def, "requested iterator type"); - mempool_free(&memtx->hash_iterator_pool, it); + mempool_free(&memtx->iterator_pool, it); return NULL; } return (struct iterator *)it; @@ -450,9 +453,9 @@ static const struct index_vtab memtx_hash_index_vtab = { struct memtx_hash_index * memtx_hash_index_new(struct memtx_engine *memtx, struct index_def *def) { - if (!mempool_is_initialized(&memtx->hash_iterator_pool)) { - mempool_create(&memtx->hash_iterator_pool, cord_slab_cache(), - sizeof(struct hash_iterator)); + if (!mempool_is_initialized(&memtx->iterator_pool)) { + mempool_create(&memtx->iterator_pool, cord_slab_cache(), + MEMTX_ITERATOR_SIZE); } struct memtx_hash_index *index = diff --git a/src/box/memtx_rtree.c b/src/box/memtx_rtree.c index 0f5e0ac53..9cb93f150 100644 --- a/src/box/memtx_rtree.c +++ b/src/box/memtx_rtree.c @@ -127,6 +127,10 @@ struct index_rtree_iterator { struct mempool *pool; }; +static_assert(sizeof(struct index_rtree_iterator) <= MEMTX_ITERATOR_SIZE, + "index_rtree_iterator must be less or equal than " + "MEMTX_ITERATOR_SIZE"); + static void index_rtree_iterator_free(struct iterator *i) { @@ -284,14 +288,14 @@ memtx_rtree_index_create_iterator(struct index *base, enum iterator_type type, return NULL; } - struct index_rtree_iterator *it = mempool_alloc(&memtx->rtree_iterator_pool); + struct index_rtree_iterator *it = mempool_alloc(&memtx->iterator_pool); if (it == NULL) { diag_set(OutOfMemory, sizeof(struct index_rtree_iterator), "memtx_rtree_index", "iterator"); return NULL; } iterator_create(&it->base, base); - it->pool = &memtx->rtree_iterator_pool; + it->pool = &memtx->iterator_pool; it->base.next = index_rtree_iterator_next; it->base.free = index_rtree_iterator_free; rtree_iterator_init(&it->impl); @@ -351,9 +355,9 @@ memtx_rtree_index_new(struct memtx_engine *memtx, struct index_def *def) enum rtree_distance_type distance_type = (enum rtree_distance_type)def->opts.distance; - if (!mempool_is_initialized(&memtx->rtree_iterator_pool)) { - mempool_create(&memtx->rtree_iterator_pool, cord_slab_cache(), - sizeof(struct index_rtree_iterator)); + if (!mempool_is_initialized(&memtx->iterator_pool)) { + mempool_create(&memtx->iterator_pool, cord_slab_cache(), + MEMTX_ITERATOR_SIZE); } struct memtx_rtree_index *index = diff --git a/src/box/memtx_tree.c b/src/box/memtx_tree.c index f851fb869..fe66427fc 100644 --- a/src/box/memtx_tree.c +++ b/src/box/memtx_tree.c @@ -61,6 +61,9 @@ struct tree_iterator { struct mempool *pool; }; +static_assert(sizeof(struct tree_iterator) <= MEMTX_ITERATOR_SIZE, + "tree_iterator must be less or equal than MEMTX_ITERATOR_SIZE"); + static void tree_iterator_free(struct iterator *iterator); @@ -502,14 +505,14 @@ memtx_tree_index_create_iterator(struct index *base, enum iterator_type type, key = NULL; } - struct tree_iterator *it = mempool_alloc(&memtx->tree_iterator_pool); + struct tree_iterator *it = mempool_alloc(&memtx->iterator_pool); if (it == NULL) { diag_set(OutOfMemory, sizeof(struct tree_iterator), "memtx_tree_index", "iterator"); return NULL; } iterator_create(&it->base, base); - it->pool = &memtx->tree_iterator_pool; + it->pool = &memtx->iterator_pool; it->base.next = tree_iterator_start; it->base.free = tree_iterator_free; it->type = type; @@ -686,9 +689,9 @@ static const struct index_vtab memtx_tree_index_vtab = { struct memtx_tree_index * memtx_tree_index_new(struct memtx_engine *memtx, struct index_def *def) { - if (!mempool_is_initialized(&memtx->tree_iterator_pool)) { - mempool_create(&memtx->tree_iterator_pool, cord_slab_cache(), - sizeof(struct tree_iterator)); + if (!mempool_is_initialized(&memtx->iterator_pool)) { + mempool_create(&memtx->iterator_pool, cord_slab_cache(), + MEMTX_ITERATOR_SIZE); } struct memtx_tree_index *index = -- 2.20.1