From: Vladimir Davydov <vdavydov.dev@gmail.com> To: kostja@tarantool.org Cc: tarantool-patches@freelists.org Subject: [PATCH 3/8] memtx: pass engine to memory allocation functions Date: Tue, 22 May 2018 14:46:11 +0300 [thread overview] Message-ID: <9723814bc47225881dabb75f4ebb72aab59f1724.1526987033.git.vdavydov.dev@gmail.com> (raw) In-Reply-To: <cover.1526987033.git.vdavydov.dev@gmail.com> In-Reply-To: <cover.1526987033.git.vdavydov.dev@gmail.com> We need this so that we can force garbage collection when we are short on memory. There are two such functions: one is used for allocating index extents, another for allocating tuples. Index allocating function has an opaque context so we simply reuse it for passing memtx engine to it. To pass memtx engine to tuple allocating function, we add an opaque engine specific pointer to tuple_format (engine_data) and set it to memtx_engine for memtx spaces. --- src/box/memtx_bitset.c | 2 +- src/box/memtx_engine.c | 13 ++++++++++--- src/box/memtx_engine.h | 4 +++- src/box/memtx_hash.c | 2 +- src/box/memtx_rtree.c | 2 +- src/box/memtx_space.c | 4 +++- src/box/memtx_tree.c | 5 ++--- src/box/tuple_format.c | 1 + src/box/tuple_format.h | 2 ++ 9 files changed, 24 insertions(+), 11 deletions(-) diff --git a/src/box/memtx_bitset.c b/src/box/memtx_bitset.c index 93b4a00a..e2252169 100644 --- a/src/box/memtx_bitset.c +++ b/src/box/memtx_bitset.c @@ -514,7 +514,7 @@ memtx_bitset_index_new(struct memtx_engine *memtx, struct index_def *def) if (index->id_to_tuple == NULL) panic("failed to allocate memtx bitset index"); matras_create(index->id_to_tuple, MEMTX_EXTENT_SIZE, sizeof(struct tuple *), - memtx_index_extent_alloc, memtx_index_extent_free, NULL); + memtx_index_extent_alloc, memtx_index_extent_free, memtx); index->tuple_to_id = mh_bitset_index_new(); if (index->tuple_to_id == NULL) diff --git a/src/box/memtx_engine.c b/src/box/memtx_engine.c index 0c5136cf..70478d75 100644 --- a/src/box/memtx_engine.c +++ b/src/box/memtx_engine.c @@ -1059,6 +1059,8 @@ memtx_engine_set_max_tuple_size(struct memtx_engine *memtx, size_t max_size) struct tuple * memtx_tuple_new(struct tuple_format *format, const char *data, const char *end) { + struct memtx_engine *memtx = (struct memtx_engine *)format->engine_data; + (void)memtx; assert(mp_typeof(*data) == MP_ARRAY); size_t tuple_len = end - data; size_t meta_size = tuple_format_meta_size(format); @@ -1106,6 +1108,8 @@ memtx_tuple_new(struct tuple_format *format, const char *data, const char *end) void memtx_tuple_delete(struct tuple_format *format, struct tuple *tuple) { + struct memtx_engine *memtx = (struct memtx_engine *)format->engine_data; + (void)memtx; say_debug("%s(%p)", __func__, tuple); assert(tuple->refs == 0); #ifndef NDEBUG @@ -1140,7 +1144,8 @@ struct tuple_format_vtab memtx_tuple_format_vtab = { void * memtx_index_extent_alloc(void *ctx) { - (void)ctx; + struct memtx_engine *memtx = (struct memtx_engine *)ctx; + (void)memtx; if (memtx_index_reserved_extents) { assert(memtx_index_num_reserved_extents > 0); memtx_index_num_reserved_extents--; @@ -1168,7 +1173,8 @@ memtx_index_extent_alloc(void *ctx) void memtx_index_extent_free(void *ctx, void *extent) { - (void)ctx; + struct memtx_engine *memtx = (struct memtx_engine *)ctx; + (void)memtx; return mempool_free(&memtx_index_extent_pool, extent); } @@ -1177,8 +1183,9 @@ memtx_index_extent_free(void *ctx, void *extent) * Ensure that next num extent_alloc will succeed w/o an error */ int -memtx_index_extent_reserve(int num) +memtx_index_extent_reserve(struct memtx_engine *memtx, int num) { + (void)memtx; ERROR_INJECT(ERRINJ_INDEX_ALLOC, { /* same error as in mempool_alloc */ diag_set(OutOfMemory, MEMTX_EXTENT_SIZE, diff --git a/src/box/memtx_engine.h b/src/box/memtx_engine.h index 9f28c268..389314ba 100644 --- a/src/box/memtx_engine.h +++ b/src/box/memtx_engine.h @@ -168,12 +168,14 @@ enum { /** * Allocate a block of size MEMTX_EXTENT_SIZE for memtx index + * @ctx must point to memtx engine */ void * memtx_index_extent_alloc(void *ctx); /** * Free a block previously allocated by memtx_index_extent_alloc + * @ctx must point to memtx engine */ void memtx_index_extent_free(void *ctx, void *extent); @@ -183,7 +185,7 @@ memtx_index_extent_free(void *ctx, void *extent); * Ensure that next num extent_alloc will succeed w/o an error */ int -memtx_index_extent_reserve(int num); +memtx_index_extent_reserve(struct memtx_engine *memtx, int num); /** * Generic implementation of index_vtab::def_change_requires_rebuild, diff --git a/src/box/memtx_hash.c b/src/box/memtx_hash.c index 08ba84ad..d672cafe 100644 --- a/src/box/memtx_hash.c +++ b/src/box/memtx_hash.c @@ -490,7 +490,7 @@ memtx_hash_index_new(struct memtx_engine *memtx, struct index_def *def) light_index_create(hash_table, HASH_INDEX_EXTENT_SIZE, memtx_index_extent_alloc, memtx_index_extent_free, - NULL, index->base.def->key_def); + memtx, index->base.def->key_def); index->hash_table = hash_table; if (def->iid == 0) diff --git a/src/box/memtx_rtree.c b/src/box/memtx_rtree.c index baeffa26..d751a9d8 100644 --- a/src/box/memtx_rtree.c +++ b/src/box/memtx_rtree.c @@ -370,7 +370,7 @@ memtx_rtree_index_new(struct memtx_engine *memtx, struct index_def *def) index->dimension = def->opts.dimension; rtree_init(&index->tree, index->dimension, MEMTX_EXTENT_SIZE, - memtx_index_extent_alloc, memtx_index_extent_free, NULL, + memtx_index_extent_alloc, memtx_index_extent_free, memtx, distance_type); return index; } diff --git a/src/box/memtx_space.c b/src/box/memtx_space.c index 7c795b99..3dd97ddf 100644 --- a/src/box/memtx_space.c +++ b/src/box/memtx_space.c @@ -229,13 +229,14 @@ int memtx_space_replace_all_keys(struct space *space, struct txn_stmt *stmt, enum dup_replace_mode mode) { + struct memtx_engine *memtx = (struct memtx_engine *)space->engine; struct tuple *old_tuple = stmt->old_tuple; struct tuple *new_tuple = stmt->new_tuple; /* * Ensure we have enough slack memory to guarantee * successful statement-level rollback. */ - if (memtx_index_extent_reserve(new_tuple ? + if (memtx_index_extent_reserve(memtx, new_tuple != NULL ? RESERVE_EXTENTS_BEFORE_REPLACE : RESERVE_EXTENTS_BEFORE_DELETE) != 0) return -1; @@ -894,6 +895,7 @@ memtx_space_new(struct memtx_engine *memtx, free(memtx_space); return NULL; } + format->engine_data = memtx; format->exact_field_count = def->exact_field_count; tuple_format_ref(format); diff --git a/src/box/memtx_tree.c b/src/box/memtx_tree.c index 0911af52..8814b13b 100644 --- a/src/box/memtx_tree.c +++ b/src/box/memtx_tree.c @@ -688,9 +688,8 @@ memtx_tree_index_new(struct memtx_engine *memtx, struct index_def *def) } struct key_def *cmp_def = memtx_tree_index_cmp_def(index); - memtx_tree_create(&index->tree, cmp_def, - memtx_index_extent_alloc, - memtx_index_extent_free, NULL); + memtx_tree_create(&index->tree, cmp_def, memtx_index_extent_alloc, + memtx_index_extent_free, memtx); if (def->iid == 0) index->gc_task.func = memtx_tree_index_destroy_f; diff --git a/src/box/tuple_format.c b/src/box/tuple_format.c index d21477f8..bee71e4f 100644 --- a/src/box/tuple_format.c +++ b/src/box/tuple_format.c @@ -268,6 +268,7 @@ tuple_format_new(struct tuple_format_vtab *vtab, struct key_def * const *keys, if (format == NULL) return NULL; format->vtab = *vtab; + format->engine_data = NULL; format->extra_size = extra_size; if (tuple_format_register(format) < 0) { tuple_format_destroy(format); diff --git a/src/box/tuple_format.h b/src/box/tuple_format.h index 08c91a1a..3775b484 100644 --- a/src/box/tuple_format.h +++ b/src/box/tuple_format.h @@ -117,6 +117,8 @@ struct tuple_field { struct tuple_format { /** Virtual function table */ struct tuple_format_vtab vtab; + /** Pointer to engine-specific data. */ + void *engine_data; /** Identifier */ uint16_t id; /** Reference counter */ -- 2.11.0
next prev parent reply other threads:[~2018-05-22 11:46 UTC|newest] Thread overview: 21+ messages / expand[flat|nested] mbox.gz Atom feed top 2018-05-22 11:46 [PATCH 0/8] Follow-up on async memtx index cleanup Vladimir Davydov 2018-05-22 11:46 ` [PATCH 1/8] memtx: init index extent allocator in engine constructor Vladimir Davydov 2018-05-22 13:43 ` Konstantin Osipov 2018-05-22 11:46 ` [PATCH 2/8] memtx: fold memtx_tuple.cc into memtx_engine.c Vladimir Davydov 2018-05-22 13:45 ` Konstantin Osipov 2018-05-22 11:46 ` Vladimir Davydov [this message] 2018-05-22 13:47 ` [PATCH 3/8] memtx: pass engine to memory allocation functions Konstantin Osipov 2018-05-22 14:39 ` Vladimir Davydov 2018-05-22 11:46 ` [PATCH 4/8] memtx: move all global variables to engine Vladimir Davydov 2018-05-22 13:48 ` Konstantin Osipov 2018-05-22 11:46 ` [PATCH 5/8] memtx: destroy slab arena on engine shutdown Vladimir Davydov 2018-05-22 13:50 ` Konstantin Osipov 2018-05-22 16:26 ` Vladimir Davydov 2018-05-22 11:46 ` [PATCH 6/8] memtx: embed light hash into memtx_hash_index Vladimir Davydov 2018-05-22 13:51 ` Konstantin Osipov 2018-05-22 11:46 ` [PATCH 7/8] memtx: rework background garbage collection procedure Vladimir Davydov 2018-05-22 13:56 ` Konstantin Osipov 2018-05-22 14:49 ` Vladimir Davydov 2018-05-22 16:42 ` Konstantin Osipov 2018-05-22 11:46 ` [PATCH 8/8] memtx: run garbage collection on demand Vladimir Davydov 2018-05-22 14:00 ` Konstantin Osipov
Reply instructions: You may reply publicly to this message via plain-text email using any one of the following methods: * Save the following mbox file, import it into your mail client, and reply-to-all from there: mbox Avoid top-posting and favor interleaved quoting: https://en.wikipedia.org/wiki/Posting_style#Interleaved_style * Reply using the --to, --cc, and --in-reply-to switches of git-send-email(1): git send-email \ --in-reply-to=9723814bc47225881dabb75f4ebb72aab59f1724.1526987033.git.vdavydov.dev@gmail.com \ --to=vdavydov.dev@gmail.com \ --cc=kostja@tarantool.org \ --cc=tarantool-patches@freelists.org \ --subject='Re: [PATCH 3/8] memtx: pass engine to memory allocation functions' \ /path/to/YOUR_REPLY https://kernel.org/pub/software/scm/git/docs/git-send-email.html * If your mail client supports setting the In-Reply-To header via mailto: links, try the mailto: link
This is a public inbox, see mirroring instructions for how to clone and mirror all data and code used for this inbox