* [PATCH v3 1/7] memtx: introduce universal iterator_pool
2019-02-22 15:42 [PATCH v3 0/7] box: introduce hint option for memtx tree index Kirill Shcherbatov
@ 2019-02-22 15:42 ` Kirill Shcherbatov
2019-02-22 18:37 ` [tarantool-patches] " Konstantin Osipov
2019-02-22 15:42 ` [PATCH v3 2/7] lib: fix undef _api_name in bps_tree header Kirill Shcherbatov
` (5 subsequent siblings)
6 siblings, 1 reply; 27+ messages in thread
From: Kirill Shcherbatov @ 2019-02-22 15:42 UTC (permalink / raw)
To: tarantool-patches, vdavydov.dev; +Cc: Kirill Shcherbatov
Memtx uses separate mempools for iterators of different types.
Due to the fact that there will be more iterators of different
sizes in a series of upcoming changes, let's always allocate the
iterator of the largest size.
Needed for #3961
---
src/box/memtx_bitset.c | 16 ++++++++++------
src/box/memtx_engine.c | 10 ++--------
src/box/memtx_engine.h | 17 +++++++++--------
src/box/memtx_hash.c | 15 +++++++++------
src/box/memtx_rtree.c | 14 +++++++++-----
src/box/memtx_tree.c | 13 ++++++++-----
6 files changed, 47 insertions(+), 38 deletions(-)
diff --git a/src/box/memtx_bitset.c b/src/box/memtx_bitset.c
index cd7362ee1..9dbc4141d 100644
--- a/src/box/memtx_bitset.c
+++ b/src/box/memtx_bitset.c
@@ -162,6 +162,10 @@ struct bitset_index_iterator {
struct mempool *pool;
};
+static_assert(sizeof(struct bitset_index_iterator) <= MEMTX_ITERATOR_SIZE,
+ "bitset_index_iterator must be less or equal than "
+ "MEMTX_ITERATOR_SIZE");
+
static struct bitset_index_iterator *
bitset_index_iterator(struct iterator *it)
{
@@ -318,7 +322,7 @@ memtx_bitset_index_create_iterator(struct index *base, enum iterator_type type,
(void) part_count;
struct bitset_index_iterator *it;
- it = mempool_alloc(&memtx->bitset_iterator_pool);
+ it = mempool_alloc(&memtx->iterator_pool);
if (!it) {
diag_set(OutOfMemory, sizeof(*it),
"memtx_bitset_index", "iterator");
@@ -326,7 +330,7 @@ memtx_bitset_index_create_iterator(struct index *base, enum iterator_type type,
}
iterator_create(&it->base, base);
- it->pool = &memtx->bitset_iterator_pool;
+ it->pool = &memtx->iterator_pool;
it->base.next = bitset_index_iterator_next;
it->base.free = bitset_index_iterator_free;
@@ -389,7 +393,7 @@ memtx_bitset_index_create_iterator(struct index *base, enum iterator_type type,
return (struct iterator *)it;
fail:
tt_bitset_expr_destroy(&expr);
- mempool_free(&memtx->bitset_iterator_pool, it);
+ mempool_free(&memtx->iterator_pool, it);
return NULL;
}
@@ -493,9 +497,9 @@ memtx_bitset_index_new(struct memtx_engine *memtx, struct index_def *def)
assert(def->iid > 0);
assert(!def->opts.is_unique);
- if (!mempool_is_initialized(&memtx->bitset_iterator_pool)) {
- mempool_create(&memtx->bitset_iterator_pool, cord_slab_cache(),
- sizeof(struct bitset_index_iterator));
+ if (!mempool_is_initialized(&memtx->iterator_pool)) {
+ mempool_create(&memtx->iterator_pool, cord_slab_cache(),
+ MEMTX_ITERATOR_SIZE);
}
struct memtx_bitset_index *index =
diff --git a/src/box/memtx_engine.c b/src/box/memtx_engine.c
index 64f43456e..646001cf8 100644
--- a/src/box/memtx_engine.c
+++ b/src/box/memtx_engine.c
@@ -176,14 +176,8 @@ static void
memtx_engine_shutdown(struct engine *engine)
{
struct memtx_engine *memtx = (struct memtx_engine *)engine;
- if (mempool_is_initialized(&memtx->tree_iterator_pool))
- mempool_destroy(&memtx->tree_iterator_pool);
- if (mempool_is_initialized(&memtx->rtree_iterator_pool))
- mempool_destroy(&memtx->rtree_iterator_pool);
- if (mempool_is_initialized(&memtx->hash_iterator_pool))
- mempool_destroy(&memtx->hash_iterator_pool);
- if (mempool_is_initialized(&memtx->bitset_iterator_pool))
- mempool_destroy(&memtx->bitset_iterator_pool);
+ if (mempool_is_initialized(&memtx->iterator_pool))
+ mempool_destroy(&memtx->iterator_pool);
mempool_destroy(&memtx->index_extent_pool);
slab_cache_destroy(&memtx->index_slab_cache);
small_alloc_destroy(&memtx->alloc);
diff --git a/src/box/memtx_engine.h b/src/box/memtx_engine.h
index 0f8e92ee4..2cd4ba771 100644
--- a/src/box/memtx_engine.h
+++ b/src/box/memtx_engine.h
@@ -87,6 +87,13 @@ enum memtx_recovery_state {
/** Memtx extents pool, available to statistics. */
extern struct mempool memtx_index_extent_pool;
+/**
+ * The size of the biggest memtx iterator. Used with
+ * mempool_create. This is the size of the block that will be
+ * allocated for each iterator.
+ */
+#define MEMTX_ITERATOR_SIZE (696)
+
struct memtx_engine {
struct engine base;
/** Engine recovery state. */
@@ -129,14 +136,8 @@ struct memtx_engine {
size_t max_tuple_size;
/** Incremented with each next snapshot. */
uint32_t snapshot_version;
- /** Memory pool for tree index iterator. */
- struct mempool tree_iterator_pool;
- /** Memory pool for rtree index iterator. */
- struct mempool rtree_iterator_pool;
- /** Memory pool for hash index iterator. */
- struct mempool hash_iterator_pool;
- /** Memory pool for bitset index iterator. */
- struct mempool bitset_iterator_pool;
+ /** Memory pool for index iterator. */
+ struct mempool iterator_pool;
/**
* Garbage collection fiber. Used for asynchronous
* destruction of dropped indexes.
diff --git a/src/box/memtx_hash.c b/src/box/memtx_hash.c
index 511d0e515..b35a52528 100644
--- a/src/box/memtx_hash.c
+++ b/src/box/memtx_hash.c
@@ -49,6 +49,9 @@ struct hash_iterator {
struct mempool *pool;
};
+static_assert(sizeof(struct hash_iterator) <= MEMTX_ITERATOR_SIZE,
+ "hash_iterator must be less or equal than MEMTX_ITERATOR_SIZE");
+
static void
hash_iterator_free(struct iterator *iterator)
{
@@ -311,14 +314,14 @@ memtx_hash_index_create_iterator(struct index *base, enum iterator_type type,
assert(part_count == 0 || key != NULL);
- struct hash_iterator *it = mempool_alloc(&memtx->hash_iterator_pool);
+ struct hash_iterator *it = mempool_alloc(&memtx->iterator_pool);
if (it == NULL) {
diag_set(OutOfMemory, sizeof(struct hash_iterator),
"memtx_hash_index", "iterator");
return NULL;
}
iterator_create(&it->base, base);
- it->pool = &memtx->hash_iterator_pool;
+ it->pool = &memtx->iterator_pool;
it->base.free = hash_iterator_free;
it->hash_table = &index->hash_table;
light_index_iterator_begin(it->hash_table, &it->iterator);
@@ -347,7 +350,7 @@ memtx_hash_index_create_iterator(struct index *base, enum iterator_type type,
default:
diag_set(UnsupportedIndexFeature, base->def,
"requested iterator type");
- mempool_free(&memtx->hash_iterator_pool, it);
+ mempool_free(&memtx->iterator_pool, it);
return NULL;
}
return (struct iterator *)it;
@@ -450,9 +453,9 @@ static const struct index_vtab memtx_hash_index_vtab = {
struct memtx_hash_index *
memtx_hash_index_new(struct memtx_engine *memtx, struct index_def *def)
{
- if (!mempool_is_initialized(&memtx->hash_iterator_pool)) {
- mempool_create(&memtx->hash_iterator_pool, cord_slab_cache(),
- sizeof(struct hash_iterator));
+ if (!mempool_is_initialized(&memtx->iterator_pool)) {
+ mempool_create(&memtx->iterator_pool, cord_slab_cache(),
+ MEMTX_ITERATOR_SIZE);
}
struct memtx_hash_index *index =
diff --git a/src/box/memtx_rtree.c b/src/box/memtx_rtree.c
index 0f5e0ac53..9cb93f150 100644
--- a/src/box/memtx_rtree.c
+++ b/src/box/memtx_rtree.c
@@ -127,6 +127,10 @@ struct index_rtree_iterator {
struct mempool *pool;
};
+static_assert(sizeof(struct index_rtree_iterator) <= MEMTX_ITERATOR_SIZE,
+ "index_rtree_iterator must be less or equal than "
+ "MEMTX_ITERATOR_SIZE");
+
static void
index_rtree_iterator_free(struct iterator *i)
{
@@ -284,14 +288,14 @@ memtx_rtree_index_create_iterator(struct index *base, enum iterator_type type,
return NULL;
}
- struct index_rtree_iterator *it = mempool_alloc(&memtx->rtree_iterator_pool);
+ struct index_rtree_iterator *it = mempool_alloc(&memtx->iterator_pool);
if (it == NULL) {
diag_set(OutOfMemory, sizeof(struct index_rtree_iterator),
"memtx_rtree_index", "iterator");
return NULL;
}
iterator_create(&it->base, base);
- it->pool = &memtx->rtree_iterator_pool;
+ it->pool = &memtx->iterator_pool;
it->base.next = index_rtree_iterator_next;
it->base.free = index_rtree_iterator_free;
rtree_iterator_init(&it->impl);
@@ -351,9 +355,9 @@ memtx_rtree_index_new(struct memtx_engine *memtx, struct index_def *def)
enum rtree_distance_type distance_type =
(enum rtree_distance_type)def->opts.distance;
- if (!mempool_is_initialized(&memtx->rtree_iterator_pool)) {
- mempool_create(&memtx->rtree_iterator_pool, cord_slab_cache(),
- sizeof(struct index_rtree_iterator));
+ if (!mempool_is_initialized(&memtx->iterator_pool)) {
+ mempool_create(&memtx->iterator_pool, cord_slab_cache(),
+ MEMTX_ITERATOR_SIZE);
}
struct memtx_rtree_index *index =
diff --git a/src/box/memtx_tree.c b/src/box/memtx_tree.c
index f851fb869..fe66427fc 100644
--- a/src/box/memtx_tree.c
+++ b/src/box/memtx_tree.c
@@ -61,6 +61,9 @@ struct tree_iterator {
struct mempool *pool;
};
+static_assert(sizeof(struct tree_iterator) <= MEMTX_ITERATOR_SIZE,
+ "tree_iterator must be less or equal than MEMTX_ITERATOR_SIZE");
+
static void
tree_iterator_free(struct iterator *iterator);
@@ -502,14 +505,14 @@ memtx_tree_index_create_iterator(struct index *base, enum iterator_type type,
key = NULL;
}
- struct tree_iterator *it = mempool_alloc(&memtx->tree_iterator_pool);
+ struct tree_iterator *it = mempool_alloc(&memtx->iterator_pool);
if (it == NULL) {
diag_set(OutOfMemory, sizeof(struct tree_iterator),
"memtx_tree_index", "iterator");
return NULL;
}
iterator_create(&it->base, base);
- it->pool = &memtx->tree_iterator_pool;
+ it->pool = &memtx->iterator_pool;
it->base.next = tree_iterator_start;
it->base.free = tree_iterator_free;
it->type = type;
@@ -686,9 +689,9 @@ static const struct index_vtab memtx_tree_index_vtab = {
struct memtx_tree_index *
memtx_tree_index_new(struct memtx_engine *memtx, struct index_def *def)
{
- if (!mempool_is_initialized(&memtx->tree_iterator_pool)) {
- mempool_create(&memtx->tree_iterator_pool, cord_slab_cache(),
- sizeof(struct tree_iterator));
+ if (!mempool_is_initialized(&memtx->iterator_pool)) {
+ mempool_create(&memtx->iterator_pool, cord_slab_cache(),
+ MEMTX_ITERATOR_SIZE);
}
struct memtx_tree_index *index =
--
2.20.1
^ permalink raw reply [flat|nested] 27+ messages in thread
* Re: [tarantool-patches] [PATCH v3 1/7] memtx: introduce universal iterator_pool
2019-02-22 15:42 ` [PATCH v3 1/7] memtx: introduce universal iterator_pool Kirill Shcherbatov
@ 2019-02-22 18:37 ` Konstantin Osipov
2019-02-23 12:03 ` Kirill Shcherbatov
2019-02-24 6:56 ` [tarantool-patches] " Vladimir Davydov
0 siblings, 2 replies; 27+ messages in thread
From: Konstantin Osipov @ 2019-02-22 18:37 UTC (permalink / raw)
To: tarantool-patches; +Cc: vdavydov.dev, Kirill Shcherbatov
* Kirill Shcherbatov <kshcherbatov@tarantool.org> [19/02/22 19:29]:
> Memtx uses separate mempools for iterators of different types.
> Due to the fact that there will be more iterators of different
> sizes in a series of upcoming changes, let's always allocate the
> iterator of the largest size.
If rtree iterator is the one which is largest, let's use a
separate pool for it.
In general mempools are rather cheap. Each mempool takes a slab
for ~100 objects and uses no slabs if there are no objects (e.g.
if rtree index is not used, there is no mempool memory for it).
--
Konstantin Osipov, Moscow, Russia, +7 903 626 22 32
http://tarantool.io - www.twitter.com/kostja_osipov
^ permalink raw reply [flat|nested] 27+ messages in thread
* Re: [tarantool-patches] [PATCH v3 1/7] memtx: introduce universal iterator_pool
2019-02-22 18:37 ` [tarantool-patches] " Konstantin Osipov
@ 2019-02-23 12:03 ` Kirill Shcherbatov
2019-02-25 16:14 ` Vladimir Davydov
2019-02-24 6:56 ` [tarantool-patches] " Vladimir Davydov
1 sibling, 1 reply; 27+ messages in thread
From: Kirill Shcherbatov @ 2019-02-23 12:03 UTC (permalink / raw)
To: Konstantin Osipov, tarantool-patches; +Cc: vdavydov.dev
> If rtree iterator is the one which is largest, let's use a
> separate pool for it.
Memtx uses separate mempools for iterators of different types.
Due to the fact that there will be more iterators of different
sizes in a series of upcoming changes, let's always allocate the
iterator of the largest size.
No changes have been made to the rtree iterators pool because the
size of these structures is significantly larger.
Needed for #3961
---
src/box/memtx_bitset.c | 16 ++++++++++------
src/box/memtx_engine.c | 8 ++------
src/box/memtx_engine.h | 16 ++++++++++------
src/box/memtx_hash.c | 15 +++++++++------
src/box/memtx_tree.c | 13 ++++++++-----
5 files changed, 39 insertions(+), 29 deletions(-)
diff --git a/src/box/memtx_bitset.c b/src/box/memtx_bitset.c
index cd7362ee1..9dbc4141d 100644
--- a/src/box/memtx_bitset.c
+++ b/src/box/memtx_bitset.c
@@ -162,6 +162,10 @@ struct bitset_index_iterator {
struct mempool *pool;
};
+static_assert(sizeof(struct bitset_index_iterator) <= MEMTX_ITERATOR_SIZE,
+ "bitset_index_iterator must be less or equal than "
+ "MEMTX_ITERATOR_SIZE");
+
static struct bitset_index_iterator *
bitset_index_iterator(struct iterator *it)
{
@@ -318,7 +322,7 @@ memtx_bitset_index_create_iterator(struct index *base, enum iterator_type type,
(void) part_count;
struct bitset_index_iterator *it;
- it = mempool_alloc(&memtx->bitset_iterator_pool);
+ it = mempool_alloc(&memtx->iterator_pool);
if (!it) {
diag_set(OutOfMemory, sizeof(*it),
"memtx_bitset_index", "iterator");
@@ -326,7 +330,7 @@ memtx_bitset_index_create_iterator(struct index *base, enum iterator_type type,
}
iterator_create(&it->base, base);
- it->pool = &memtx->bitset_iterator_pool;
+ it->pool = &memtx->iterator_pool;
it->base.next = bitset_index_iterator_next;
it->base.free = bitset_index_iterator_free;
@@ -389,7 +393,7 @@ memtx_bitset_index_create_iterator(struct index *base, enum iterator_type type,
return (struct iterator *)it;
fail:
tt_bitset_expr_destroy(&expr);
- mempool_free(&memtx->bitset_iterator_pool, it);
+ mempool_free(&memtx->iterator_pool, it);
return NULL;
}
@@ -493,9 +497,9 @@ memtx_bitset_index_new(struct memtx_engine *memtx, struct index_def *def)
assert(def->iid > 0);
assert(!def->opts.is_unique);
- if (!mempool_is_initialized(&memtx->bitset_iterator_pool)) {
- mempool_create(&memtx->bitset_iterator_pool, cord_slab_cache(),
- sizeof(struct bitset_index_iterator));
+ if (!mempool_is_initialized(&memtx->iterator_pool)) {
+ mempool_create(&memtx->iterator_pool, cord_slab_cache(),
+ MEMTX_ITERATOR_SIZE);
}
struct memtx_bitset_index *index =
diff --git a/src/box/memtx_engine.c b/src/box/memtx_engine.c
index 64f43456e..fd82b1189 100644
--- a/src/box/memtx_engine.c
+++ b/src/box/memtx_engine.c
@@ -176,14 +176,10 @@ static void
memtx_engine_shutdown(struct engine *engine)
{
struct memtx_engine *memtx = (struct memtx_engine *)engine;
- if (mempool_is_initialized(&memtx->tree_iterator_pool))
- mempool_destroy(&memtx->tree_iterator_pool);
+ if (mempool_is_initialized(&memtx->iterator_pool))
+ mempool_destroy(&memtx->iterator_pool);
if (mempool_is_initialized(&memtx->rtree_iterator_pool))
mempool_destroy(&memtx->rtree_iterator_pool);
- if (mempool_is_initialized(&memtx->hash_iterator_pool))
- mempool_destroy(&memtx->hash_iterator_pool);
- if (mempool_is_initialized(&memtx->bitset_iterator_pool))
- mempool_destroy(&memtx->bitset_iterator_pool);
mempool_destroy(&memtx->index_extent_pool);
slab_cache_destroy(&memtx->index_slab_cache);
small_alloc_destroy(&memtx->alloc);
diff --git a/src/box/memtx_engine.h b/src/box/memtx_engine.h
index 0f8e92ee4..5859390d8 100644
--- a/src/box/memtx_engine.h
+++ b/src/box/memtx_engine.h
@@ -87,6 +87,14 @@ enum memtx_recovery_state {
/** Memtx extents pool, available to statistics. */
extern struct mempool memtx_index_extent_pool;
+/**
+ * The size of the biggest memtx iterator. Used with
+ * mempool_create. This is the size of the block that will be
+ * allocated for each iterator (except rtree index iterator that
+ * is significantly bigger so has own pool).
+ */
+#define MEMTX_ITERATOR_SIZE (696)
+
struct memtx_engine {
struct engine base;
/** Engine recovery state. */
@@ -129,14 +137,10 @@ struct memtx_engine {
size_t max_tuple_size;
/** Incremented with each next snapshot. */
uint32_t snapshot_version;
- /** Memory pool for tree index iterator. */
- struct mempool tree_iterator_pool;
/** Memory pool for rtree index iterator. */
struct mempool rtree_iterator_pool;
- /** Memory pool for hash index iterator. */
- struct mempool hash_iterator_pool;
- /** Memory pool for bitset index iterator. */
- struct mempool bitset_iterator_pool;
+ /** Memory pool for index iterator. */
+ struct mempool iterator_pool;
/**
* Garbage collection fiber. Used for asynchronous
* destruction of dropped indexes.
diff --git a/src/box/memtx_hash.c b/src/box/memtx_hash.c
index 511d0e515..b35a52528 100644
--- a/src/box/memtx_hash.c
+++ b/src/box/memtx_hash.c
@@ -49,6 +49,9 @@ struct hash_iterator {
struct mempool *pool;
};
+static_assert(sizeof(struct hash_iterator) <= MEMTX_ITERATOR_SIZE,
+ "hash_iterator must be less or equal than MEMTX_ITERATOR_SIZE");
+
static void
hash_iterator_free(struct iterator *iterator)
{
@@ -311,14 +314,14 @@ memtx_hash_index_create_iterator(struct index *base, enum iterator_type type,
assert(part_count == 0 || key != NULL);
- struct hash_iterator *it = mempool_alloc(&memtx->hash_iterator_pool);
+ struct hash_iterator *it = mempool_alloc(&memtx->iterator_pool);
if (it == NULL) {
diag_set(OutOfMemory, sizeof(struct hash_iterator),
"memtx_hash_index", "iterator");
return NULL;
}
iterator_create(&it->base, base);
- it->pool = &memtx->hash_iterator_pool;
+ it->pool = &memtx->iterator_pool;
it->base.free = hash_iterator_free;
it->hash_table = &index->hash_table;
light_index_iterator_begin(it->hash_table, &it->iterator);
@@ -347,7 +350,7 @@ memtx_hash_index_create_iterator(struct index *base, enum iterator_type type,
default:
diag_set(UnsupportedIndexFeature, base->def,
"requested iterator type");
- mempool_free(&memtx->hash_iterator_pool, it);
+ mempool_free(&memtx->iterator_pool, it);
return NULL;
}
return (struct iterator *)it;
@@ -450,9 +453,9 @@ static const struct index_vtab memtx_hash_index_vtab = {
struct memtx_hash_index *
memtx_hash_index_new(struct memtx_engine *memtx, struct index_def *def)
{
- if (!mempool_is_initialized(&memtx->hash_iterator_pool)) {
- mempool_create(&memtx->hash_iterator_pool, cord_slab_cache(),
- sizeof(struct hash_iterator));
+ if (!mempool_is_initialized(&memtx->iterator_pool)) {
+ mempool_create(&memtx->iterator_pool, cord_slab_cache(),
+ MEMTX_ITERATOR_SIZE);
}
struct memtx_hash_index *index =
diff --git a/src/box/memtx_tree.c b/src/box/memtx_tree.c
index f851fb869..fe66427fc 100644
--- a/src/box/memtx_tree.c
+++ b/src/box/memtx_tree.c
@@ -61,6 +61,9 @@ struct tree_iterator {
struct mempool *pool;
};
+static_assert(sizeof(struct tree_iterator) <= MEMTX_ITERATOR_SIZE,
+ "tree_iterator must be less or equal than MEMTX_ITERATOR_SIZE");
+
static void
tree_iterator_free(struct iterator *iterator);
@@ -502,14 +505,14 @@ memtx_tree_index_create_iterator(struct index *base, enum iterator_type type,
key = NULL;
}
- struct tree_iterator *it = mempool_alloc(&memtx->tree_iterator_pool);
+ struct tree_iterator *it = mempool_alloc(&memtx->iterator_pool);
if (it == NULL) {
diag_set(OutOfMemory, sizeof(struct tree_iterator),
"memtx_tree_index", "iterator");
return NULL;
}
iterator_create(&it->base, base);
- it->pool = &memtx->tree_iterator_pool;
+ it->pool = &memtx->iterator_pool;
it->base.next = tree_iterator_start;
it->base.free = tree_iterator_free;
it->type = type;
@@ -686,9 +689,9 @@ static const struct index_vtab memtx_tree_index_vtab = {
struct memtx_tree_index *
memtx_tree_index_new(struct memtx_engine *memtx, struct index_def *def)
{
- if (!mempool_is_initialized(&memtx->tree_iterator_pool)) {
- mempool_create(&memtx->tree_iterator_pool, cord_slab_cache(),
- sizeof(struct tree_iterator));
+ if (!mempool_is_initialized(&memtx->iterator_pool)) {
+ mempool_create(&memtx->iterator_pool, cord_slab_cache(),
+ MEMTX_ITERATOR_SIZE);
}
struct memtx_tree_index *index =
--
2.20.1
^ permalink raw reply [flat|nested] 27+ messages in thread
* Re: [tarantool-patches] [PATCH v3 1/7] memtx: introduce universal iterator_pool
2019-02-23 12:03 ` Kirill Shcherbatov
@ 2019-02-25 16:14 ` Vladimir Davydov
2019-02-25 16:39 ` [tarantool-patches] " Kirill Shcherbatov
0 siblings, 1 reply; 27+ messages in thread
From: Vladimir Davydov @ 2019-02-25 16:14 UTC (permalink / raw)
To: Kirill Shcherbatov; +Cc: Konstantin Osipov, tarantool-patches
On Sat, Feb 23, 2019 at 03:03:02PM +0300, Kirill Shcherbatov wrote:
> diff --git a/src/box/memtx_bitset.c b/src/box/memtx_bitset.c
> index cd7362ee1..9dbc4141d 100644
> --- a/src/box/memtx_bitset.c
> +++ b/src/box/memtx_bitset.c
> @@ -162,6 +162,10 @@ struct bitset_index_iterator {
> struct mempool *pool;
> };
>
> +static_assert(sizeof(struct bitset_index_iterator) <= MEMTX_ITERATOR_SIZE,
> + "bitset_index_iterator must be less or equal than "
"sizeof(struct bitset_index_iterator) must be less than or equal to MEMTX_ITERATOR_SIZE"
Ditto for other static assertions.
> + "MEMTX_ITERATOR_SIZE");
> +
> static struct bitset_index_iterator *
> bitset_index_iterator(struct iterator *it)
> {
> @@ -493,9 +497,9 @@ memtx_bitset_index_new(struct memtx_engine *memtx, struct index_def *def)
> assert(def->iid > 0);
> assert(!def->opts.is_unique);
>
> - if (!mempool_is_initialized(&memtx->bitset_iterator_pool)) {
> - mempool_create(&memtx->bitset_iterator_pool, cord_slab_cache(),
> - sizeof(struct bitset_index_iterator));
> + if (!mempool_is_initialized(&memtx->iterator_pool)) {
> + mempool_create(&memtx->iterator_pool, cord_slab_cache(),
> + MEMTX_ITERATOR_SIZE);
Since there's one pool for bitset, hash, and tree indexes, size of which
is known in advance, we can initialize it in the engine constructor and
destroy it unconditionally in the engine destructor.
> diff --git a/src/box/memtx_engine.h b/src/box/memtx_engine.h
> index 0f8e92ee4..5859390d8 100644
> --- a/src/box/memtx_engine.h
> +++ b/src/box/memtx_engine.h
> @@ -87,6 +87,14 @@ enum memtx_recovery_state {
> /** Memtx extents pool, available to statistics. */
> extern struct mempool memtx_index_extent_pool;
>
> +/**
> + * The size of the biggest memtx iterator. Used with
> + * mempool_create. This is the size of the block that will be
> + * allocated for each iterator (except rtree index iterator that
> + * is significantly bigger so has own pool).
> + */
> +#define MEMTX_ITERATOR_SIZE (696)
Should be about 150 bytes without rtree, no?
> +
> struct memtx_engine {
> struct engine base;
> /** Engine recovery state. */
> @@ -129,14 +137,10 @@ struct memtx_engine {
> size_t max_tuple_size;
> /** Incremented with each next snapshot. */
> uint32_t snapshot_version;
> - /** Memory pool for tree index iterator. */
> - struct mempool tree_iterator_pool;
> /** Memory pool for rtree index iterator. */
> struct mempool rtree_iterator_pool;
> - /** Memory pool for hash index iterator. */
> - struct mempool hash_iterator_pool;
> - /** Memory pool for bitset index iterator. */
> - struct mempool bitset_iterator_pool;
> + /** Memory pool for index iterator. */
Memory pool for all index iterators except rtree.
The latter is significantly larger so it has its
own memory pool.
> + struct mempool iterator_pool;
^ permalink raw reply [flat|nested] 27+ messages in thread
* Re: [tarantool-patches] Re: [PATCH v3 1/7] memtx: introduce universal iterator_pool
2019-02-25 16:14 ` Vladimir Davydov
@ 2019-02-25 16:39 ` Kirill Shcherbatov
2019-02-25 17:14 ` Vladimir Davydov
0 siblings, 1 reply; 27+ messages in thread
From: Kirill Shcherbatov @ 2019-02-25 16:39 UTC (permalink / raw)
To: tarantool-patches, Vladimir Davydov; +Cc: Konstantin Osipov
Memtx uses separate mempools for iterators of different types.
Due to the fact that there will be more iterators of different
sizes in a series of upcoming changes, let's always allocate the
iterator of the largest size.
No changes have been made to the rtree iterators pool because the
size of these structures is significantly larger.
Needed for #3961
---
src/box/memtx_bitset.c | 15 +++++++--------
src/box/memtx_engine.c | 9 +++------
src/box/memtx_engine.h | 20 ++++++++++++++------
src/box/memtx_hash.c | 15 +++++++--------
src/box/memtx_tree.c | 13 ++++++-------
5 files changed, 37 insertions(+), 35 deletions(-)
diff --git a/src/box/memtx_bitset.c b/src/box/memtx_bitset.c
index 12124c3f3..b6f8f7ac6 100644
--- a/src/box/memtx_bitset.c
+++ b/src/box/memtx_bitset.c
@@ -175,6 +175,10 @@ struct bitset_index_iterator {
struct mempool *pool;
};
+static_assert(sizeof(struct bitset_index_iterator) <= MEMTX_ITERATOR_SIZE,
+ "sizeof(struct bitset_index_iterator) must be less than or equal "
+ "to MEMTX_ITERATOR_SIZE");
+
static struct bitset_index_iterator *
bitset_index_iterator(struct iterator *it)
{
@@ -331,7 +335,7 @@ memtx_bitset_index_create_iterator(struct index *base, enum iterator_type type,
(void) part_count;
struct bitset_index_iterator *it;
- it = mempool_alloc(&memtx->bitset_iterator_pool);
+ it = mempool_alloc(&memtx->iterator_pool);
if (!it) {
diag_set(OutOfMemory, sizeof(*it),
"memtx_bitset_index", "iterator");
@@ -339,7 +343,7 @@ memtx_bitset_index_create_iterator(struct index *base, enum iterator_type type,
}
iterator_create(&it->base, base);
- it->pool = &memtx->bitset_iterator_pool;
+ it->pool = &memtx->iterator_pool;
it->base.next = bitset_index_iterator_next;
it->base.free = bitset_index_iterator_free;
@@ -402,7 +406,7 @@ memtx_bitset_index_create_iterator(struct index *base, enum iterator_type type,
return (struct iterator *)it;
fail:
tt_bitset_expr_destroy(&expr);
- mempool_free(&memtx->bitset_iterator_pool, it);
+ mempool_free(&memtx->iterator_pool, it);
return NULL;
}
@@ -506,11 +510,6 @@ memtx_bitset_index_new(struct memtx_engine *memtx, struct index_def *def)
assert(def->iid > 0);
assert(!def->opts.is_unique);
- if (!mempool_is_initialized(&memtx->bitset_iterator_pool)) {
- mempool_create(&memtx->bitset_iterator_pool, cord_slab_cache(),
- sizeof(struct bitset_index_iterator));
- }
-
struct memtx_bitset_index *index =
(struct memtx_bitset_index *)calloc(1, sizeof(*index));
if (index == NULL) {
diff --git a/src/box/memtx_engine.c b/src/box/memtx_engine.c
index 64f43456e..b8638680b 100644
--- a/src/box/memtx_engine.c
+++ b/src/box/memtx_engine.c
@@ -176,14 +176,9 @@ static void
memtx_engine_shutdown(struct engine *engine)
{
struct memtx_engine *memtx = (struct memtx_engine *)engine;
- if (mempool_is_initialized(&memtx->tree_iterator_pool))
- mempool_destroy(&memtx->tree_iterator_pool);
+ mempool_destroy(&memtx->iterator_pool);
if (mempool_is_initialized(&memtx->rtree_iterator_pool))
mempool_destroy(&memtx->rtree_iterator_pool);
- if (mempool_is_initialized(&memtx->hash_iterator_pool))
- mempool_destroy(&memtx->hash_iterator_pool);
- if (mempool_is_initialized(&memtx->bitset_iterator_pool))
- mempool_destroy(&memtx->bitset_iterator_pool);
mempool_destroy(&memtx->index_extent_pool);
slab_cache_destroy(&memtx->index_slab_cache);
small_alloc_destroy(&memtx->alloc);
@@ -1069,6 +1064,8 @@ memtx_engine_new(const char *snap_dirname, bool force_recovery,
slab_cache_create(&memtx->index_slab_cache, &memtx->arena);
mempool_create(&memtx->index_extent_pool, &memtx->index_slab_cache,
MEMTX_EXTENT_SIZE);
+ mempool_create(&memtx->iterator_pool, cord_slab_cache(),
+ MEMTX_ITERATOR_SIZE);
memtx->num_reserved_extents = 0;
memtx->reserved_extents = NULL;
diff --git a/src/box/memtx_engine.h b/src/box/memtx_engine.h
index 0f8e92ee4..8f4ce7cdd 100644
--- a/src/box/memtx_engine.h
+++ b/src/box/memtx_engine.h
@@ -87,6 +87,14 @@ enum memtx_recovery_state {
/** Memtx extents pool, available to statistics. */
extern struct mempool memtx_index_extent_pool;
+/**
+ * The size of the biggest memtx iterator. Used with
+ * mempool_create. This is the size of the block that will be
+ * allocated for each iterator (except rtree index iterator that
+ * is significantly bigger so has own pool).
+ */
+#define MEMTX_ITERATOR_SIZE (152)
+
struct memtx_engine {
struct engine base;
/** Engine recovery state. */
@@ -129,14 +137,14 @@ struct memtx_engine {
size_t max_tuple_size;
/** Incremented with each next snapshot. */
uint32_t snapshot_version;
- /** Memory pool for tree index iterator. */
- struct mempool tree_iterator_pool;
/** Memory pool for rtree index iterator. */
struct mempool rtree_iterator_pool;
- /** Memory pool for hash index iterator. */
- struct mempool hash_iterator_pool;
- /** Memory pool for bitset index iterator. */
- struct mempool bitset_iterator_pool;
+ /**
+ * Memory pool for all index iterators except rtree.
+ * The latter is significantly larger so it has its
+ * own memory pool.
+ */
+ struct mempool iterator_pool;
/**
* Garbage collection fiber. Used for asynchronous
* destruction of dropped indexes.
diff --git a/src/box/memtx_hash.c b/src/box/memtx_hash.c
index 63bdff208..ff478bdca 100644
--- a/src/box/memtx_hash.c
+++ b/src/box/memtx_hash.c
@@ -88,6 +88,10 @@ struct hash_iterator {
struct mempool *pool;
};
+static_assert(sizeof(struct hash_iterator) <= MEMTX_ITERATOR_SIZE,
+ "sizeof(struct hash_iterator) must be less than or equal "
+ "to MEMTX_ITERATOR_SIZE");
+
static void
hash_iterator_free(struct iterator *iterator)
{
@@ -350,14 +354,14 @@ memtx_hash_index_create_iterator(struct index *base, enum iterator_type type,
assert(part_count == 0 || key != NULL);
- struct hash_iterator *it = mempool_alloc(&memtx->hash_iterator_pool);
+ struct hash_iterator *it = mempool_alloc(&memtx->iterator_pool);
if (it == NULL) {
diag_set(OutOfMemory, sizeof(struct hash_iterator),
"memtx_hash_index", "iterator");
return NULL;
}
iterator_create(&it->base, base);
- it->pool = &memtx->hash_iterator_pool;
+ it->pool = &memtx->iterator_pool;
it->base.free = hash_iterator_free;
it->hash_table = &index->hash_table;
light_index_iterator_begin(it->hash_table, &it->iterator);
@@ -386,7 +390,7 @@ memtx_hash_index_create_iterator(struct index *base, enum iterator_type type,
default:
diag_set(UnsupportedIndexFeature, base->def,
"requested iterator type");
- mempool_free(&memtx->hash_iterator_pool, it);
+ mempool_free(&memtx->iterator_pool, it);
return NULL;
}
return (struct iterator *)it;
@@ -489,11 +493,6 @@ static const struct index_vtab memtx_hash_index_vtab = {
struct index *
memtx_hash_index_new(struct memtx_engine *memtx, struct index_def *def)
{
- if (!mempool_is_initialized(&memtx->hash_iterator_pool)) {
- mempool_create(&memtx->hash_iterator_pool, cord_slab_cache(),
- sizeof(struct hash_iterator));
- }
-
struct memtx_hash_index *index =
(struct memtx_hash_index *)calloc(1, sizeof(*index));
if (index == NULL) {
diff --git a/src/box/memtx_tree.c b/src/box/memtx_tree.c
index 2e8adb682..250df7f2d 100644
--- a/src/box/memtx_tree.c
+++ b/src/box/memtx_tree.c
@@ -119,6 +119,10 @@ struct tree_iterator {
struct mempool *pool;
};
+static_assert(sizeof(struct tree_iterator) <= MEMTX_ITERATOR_SIZE,
+ "sizeof(struct tree_iterator) must be less than or equal "
+ "to MEMTX_ITERATOR_SIZE");
+
static void
tree_iterator_free(struct iterator *iterator);
@@ -560,14 +564,14 @@ memtx_tree_index_create_iterator(struct index *base, enum iterator_type type,
key = NULL;
}
- struct tree_iterator *it = mempool_alloc(&memtx->tree_iterator_pool);
+ struct tree_iterator *it = mempool_alloc(&memtx->iterator_pool);
if (it == NULL) {
diag_set(OutOfMemory, sizeof(struct tree_iterator),
"memtx_tree_index", "iterator");
return NULL;
}
iterator_create(&it->base, base);
- it->pool = &memtx->tree_iterator_pool;
+ it->pool = &memtx->iterator_pool;
it->base.next = tree_iterator_start;
it->base.free = tree_iterator_free;
it->type = type;
@@ -744,11 +748,6 @@ static const struct index_vtab memtx_tree_index_vtab = {
struct index *
memtx_tree_index_new(struct memtx_engine *memtx, struct index_def *def)
{
- if (!mempool_is_initialized(&memtx->tree_iterator_pool)) {
- mempool_create(&memtx->tree_iterator_pool, cord_slab_cache(),
- sizeof(struct tree_iterator));
- }
-
struct memtx_tree_index *index =
(struct memtx_tree_index *)calloc(1, sizeof(*index));
if (index == NULL) {
--
2.20.1
^ permalink raw reply [flat|nested] 27+ messages in thread
* Re: [tarantool-patches] Re: [PATCH v3 1/7] memtx: introduce universal iterator_pool
2019-02-25 16:39 ` [tarantool-patches] " Kirill Shcherbatov
@ 2019-02-25 17:14 ` Vladimir Davydov
0 siblings, 0 replies; 27+ messages in thread
From: Vladimir Davydov @ 2019-02-25 17:14 UTC (permalink / raw)
To: Kirill Shcherbatov; +Cc: tarantool-patches, Konstantin Osipov
On Mon, Feb 25, 2019 at 07:39:19PM +0300, Kirill Shcherbatov wrote:
> Memtx uses separate mempools for iterators of different types.
> Due to the fact that there will be more iterators of different
> sizes in a series of upcoming changes, let's always allocate the
> iterator of the largest size.
> No changes have been made to the rtree iterators pool because the
> size of these structures is significantly larger.
>
> Needed for #3961
> ---
> src/box/memtx_bitset.c | 15 +++++++--------
> src/box/memtx_engine.c | 9 +++------
> src/box/memtx_engine.h | 20 ++++++++++++++------
> src/box/memtx_hash.c | 15 +++++++--------
> src/box/memtx_tree.c | 13 ++++++-------
> 5 files changed, 37 insertions(+), 35 deletions(-)
Pushed to 2.1.
^ permalink raw reply [flat|nested] 27+ messages in thread
* Re: [tarantool-patches] [PATCH v3 1/7] memtx: introduce universal iterator_pool
2019-02-22 18:37 ` [tarantool-patches] " Konstantin Osipov
2019-02-23 12:03 ` Kirill Shcherbatov
@ 2019-02-24 6:56 ` Vladimir Davydov
2019-02-24 17:15 ` Konstantin Osipov
1 sibling, 1 reply; 27+ messages in thread
From: Vladimir Davydov @ 2019-02-24 6:56 UTC (permalink / raw)
To: Konstantin Osipov; +Cc: tarantool-patches, Kirill Shcherbatov
On Fri, Feb 22, 2019 at 09:37:25PM +0300, Konstantin Osipov wrote:
> * Kirill Shcherbatov <kshcherbatov@tarantool.org> [19/02/22 19:29]:
> > Memtx uses separate mempools for iterators of different types.
> > Due to the fact that there will be more iterators of different
> > sizes in a series of upcoming changes, let's always allocate the
> > iterator of the largest size.
>
> If rtree iterator is the one which is largest, let's use a
> separate pool for it.
>
> In general mempools are rather cheap. Each mempool takes a slab
> for ~100 objects and uses no slabs if there are no objects (e.g.
> if rtree index is not used, there is no mempool memory for it).
But I'd rather prefer to use the same mempool for all kinds of iterator
objects to simplify the code. Take a look at how those mempools are
initialized on demand. IMO it looks ugly. Do we really want to save
those 500 of bytes that much to put up with that complexity?
^ permalink raw reply [flat|nested] 27+ messages in thread
* Re: [tarantool-patches] [PATCH v3 1/7] memtx: introduce universal iterator_pool
2019-02-24 6:56 ` [tarantool-patches] " Vladimir Davydov
@ 2019-02-24 17:15 ` Konstantin Osipov
2019-02-24 18:22 ` Vladimir Davydov
0 siblings, 1 reply; 27+ messages in thread
From: Konstantin Osipov @ 2019-02-24 17:15 UTC (permalink / raw)
To: Vladimir Davydov; +Cc: tarantool-patches, Kirill Shcherbatov
* Vladimir Davydov <vdavydov.dev@gmail.com> [19/02/24 10:01]:
> On Fri, Feb 22, 2019 at 09:37:25PM +0300, Konstantin Osipov wrote:
> > * Kirill Shcherbatov <kshcherbatov@tarantool.org> [19/02/22 19:29]:
> > > Memtx uses separate mempools for iterators of different types.
> > > Due to the fact that there will be more iterators of different
> > > sizes in a series of upcoming changes, let's always allocate the
> > > iterator of the largest size.
> >
> > If rtree iterator is the one which is largest, let's use a
> > separate pool for it.
> >
> > In general mempools are rather cheap. Each mempool takes a slab
> > for ~100 objects and uses no slabs if there are no objects (e.g.
> > if rtree index is not used, there is no mempool memory for it).
>
> But I'd rather prefer to use the same mempool for all kinds of iterator
> objects to simplify the code. Take a look at how those mempools are
> initialized on demand. IMO it looks ugly. Do we really want to save
> those 500 of bytes that much to put up with that complexity?
Just like in the recent bps tree performance issue, you don't
pessimise the code since you never really know how it's going to
be used.
--
Konstantin Osipov, Moscow, Russia, +7 903 626 22 32
http://tarantool.io - www.twitter.com/kostja_osipov
^ permalink raw reply [flat|nested] 27+ messages in thread
* Re: [tarantool-patches] [PATCH v3 1/7] memtx: introduce universal iterator_pool
2019-02-24 17:15 ` Konstantin Osipov
@ 2019-02-24 18:22 ` Vladimir Davydov
2019-02-25 16:46 ` [tarantool-patches] " Konstantin Osipov
0 siblings, 1 reply; 27+ messages in thread
From: Vladimir Davydov @ 2019-02-24 18:22 UTC (permalink / raw)
To: Konstantin Osipov; +Cc: tarantool-patches, Kirill Shcherbatov
On Sun, Feb 24, 2019 at 08:15:04PM +0300, Konstantin Osipov wrote:
> * Vladimir Davydov <vdavydov.dev@gmail.com> [19/02/24 10:01]:
> > On Fri, Feb 22, 2019 at 09:37:25PM +0300, Konstantin Osipov wrote:
> > > * Kirill Shcherbatov <kshcherbatov@tarantool.org> [19/02/22 19:29]:
> > > > Memtx uses separate mempools for iterators of different types.
> > > > Due to the fact that there will be more iterators of different
> > > > sizes in a series of upcoming changes, let's always allocate the
> > > > iterator of the largest size.
> > >
> > > If rtree iterator is the one which is largest, let's use a
> > > separate pool for it.
> > >
> > > In general mempools are rather cheap. Each mempool takes a slab
> > > for ~100 objects and uses no slabs if there are no objects (e.g.
> > > if rtree index is not used, there is no mempool memory for it).
> >
> > But I'd rather prefer to use the same mempool for all kinds of iterator
> > objects to simplify the code. Take a look at how those mempools are
> > initialized on demand. IMO it looks ugly. Do we really want to save
> > those 500 of bytes that much to put up with that complexity?
>
> Just like in the recent bps tree performance issue, you don't
> pessimise the code since you never really know how it's going to
> be used.
Oh come on, what pessimization are you talking about in this particular
case? How many iterators can be out there simultaneously? A hundred, a
thousand? 500 bytes overhead per each doesn't seem much, especially
taking into account the fact that you're likely to have a fiber with
16KB stack for each iterator.
Regarding the bps tree performance issue. I see nothing wrong about it.
We've found an issue and we'll surely fix it. There was no point to
think about such a minor optimization until we actually faced the
problem. My point is we should strive to write simple and reliable code
first, and optimize it only if there's a demand, otherwise we risk
turning the code into unmaintainable mess for no good reason.
^ permalink raw reply [flat|nested] 27+ messages in thread
* [tarantool-patches] Re: [PATCH v3 1/7] memtx: introduce universal iterator_pool
2019-02-24 18:22 ` Vladimir Davydov
@ 2019-02-25 16:46 ` Konstantin Osipov
2019-02-25 17:15 ` Vladimir Davydov
0 siblings, 1 reply; 27+ messages in thread
From: Konstantin Osipov @ 2019-02-25 16:46 UTC (permalink / raw)
To: tarantool-patches; +Cc: Kirill Shcherbatov
* Vladimir Davydov <vdavydov.dev@gmail.com> [19/02/24 21:24]:
> On Sun, Feb 24, 2019 at 08:15:04PM +0300, Konstantin Osipov wrote:
> > * Vladimir Davydov <vdavydov.dev@gmail.com> [19/02/24 10:01]:
> > > On Fri, Feb 22, 2019 at 09:37:25PM +0300, Konstantin Osipov wrote:
> > > > * Kirill Shcherbatov <kshcherbatov@tarantool.org> [19/02/22 19:29]:
> > > > > Memtx uses separate mempools for iterators of different types.
> > > > > Due to the fact that there will be more iterators of different
> > > > > sizes in a series of upcoming changes, let's always allocate the
> > > > > iterator of the largest size.
> > > >
> > > > If rtree iterator is the one which is largest, let's use a
> > > > separate pool for it.
> > > >
> > > > In general mempools are rather cheap. Each mempool takes a slab
> > > > for ~100 objects and uses no slabs if there are no objects (e.g.
> > > > if rtree index is not used, there is no mempool memory for it).
> > >
> > > But I'd rather prefer to use the same mempool for all kinds of iterator
> > > objects to simplify the code. Take a look at how those mempools are
> > > initialized on demand. IMO it looks ugly. Do we really want to save
> > > those 500 of bytes that much to put up with that complexity?
I don't know much, but a typical SAP R3 which I working on making
work well with one open source database back in 2003 had ~100k
open client cursors. This could easily entail hundreds of
thousands of server side iterators.
> Regarding the bps tree performance issue. I see nothing wrong about it.
> We've found an issue and we'll surely fix it. There was no point to
> think about such a minor optimization until we actually faced the
> problem. My point is we should strive to write simple and reliable code
> first, and optimize it only if there's a demand, otherwise we risk
> turning the code into unmaintainable mess for no good reason.
I agree with your point, I just disagree that this optimization is
not practical.
--
Konstantin Osipov, Moscow, Russia, +7 903 626 22 32
http://tarantool.io - www.twitter.com/kostja_osipov
^ permalink raw reply [flat|nested] 27+ messages in thread
* Re: [tarantool-patches] Re: [PATCH v3 1/7] memtx: introduce universal iterator_pool
2019-02-25 16:46 ` [tarantool-patches] " Konstantin Osipov
@ 2019-02-25 17:15 ` Vladimir Davydov
0 siblings, 0 replies; 27+ messages in thread
From: Vladimir Davydov @ 2019-02-25 17:15 UTC (permalink / raw)
To: Konstantin Osipov; +Cc: tarantool-patches, Kirill Shcherbatov
On Mon, Feb 25, 2019 at 07:46:58PM +0300, Konstantin Osipov wrote:
> * Vladimir Davydov <vdavydov.dev@gmail.com> [19/02/24 21:24]:
> > On Sun, Feb 24, 2019 at 08:15:04PM +0300, Konstantin Osipov wrote:
> > > * Vladimir Davydov <vdavydov.dev@gmail.com> [19/02/24 10:01]:
> > > > On Fri, Feb 22, 2019 at 09:37:25PM +0300, Konstantin Osipov wrote:
> > > > > * Kirill Shcherbatov <kshcherbatov@tarantool.org> [19/02/22 19:29]:
> > > > > > Memtx uses separate mempools for iterators of different types.
> > > > > > Due to the fact that there will be more iterators of different
> > > > > > sizes in a series of upcoming changes, let's always allocate the
> > > > > > iterator of the largest size.
> > > > >
> > > > > If rtree iterator is the one which is largest, let's use a
> > > > > separate pool for it.
> > > > >
> > > > > In general mempools are rather cheap. Each mempool takes a slab
> > > > > for ~100 objects and uses no slabs if there are no objects (e.g.
> > > > > if rtree index is not used, there is no mempool memory for it).
> > > >
> > > > But I'd rather prefer to use the same mempool for all kinds of iterator
> > > > objects to simplify the code. Take a look at how those mempools are
> > > > initialized on demand. IMO it looks ugly. Do we really want to save
> > > > those 500 of bytes that much to put up with that complexity?
>
> I don't know much, but a typical SAP R3 which I working on making
> work well with one open source database back in 2003 had ~100k
> open client cursors. This could easily entail hundreds of
> thousands of server side iterators.
>
> > Regarding the bps tree performance issue. I see nothing wrong about it.
> > We've found an issue and we'll surely fix it. There was no point to
> > think about such a minor optimization until we actually faced the
> > problem. My point is we should strive to write simple and reliable code
> > first, and optimize it only if there's a demand, otherwise we risk
> > turning the code into unmaintainable mess for no good reason.
>
> I agree with your point, I just disagree that this optimization is
> not practical.
Okay, pushed the patch that keeps rtree_iterator_pool.
^ permalink raw reply [flat|nested] 27+ messages in thread
* [PATCH v3 2/7] lib: fix undef _api_name in bps_tree header
2019-02-22 15:42 [PATCH v3 0/7] box: introduce hint option for memtx tree index Kirill Shcherbatov
2019-02-22 15:42 ` [PATCH v3 1/7] memtx: introduce universal iterator_pool Kirill Shcherbatov
@ 2019-02-22 15:42 ` Kirill Shcherbatov
2019-02-22 18:37 ` [tarantool-patches] " Konstantin Osipov
2019-02-25 15:32 ` Vladimir Davydov
2019-02-22 15:42 ` [PATCH v3 3/7] lib: introduce BPS_TREE_IDENTICAL custom comparator Kirill Shcherbatov
` (4 subsequent siblings)
6 siblings, 2 replies; 27+ messages in thread
From: Kirill Shcherbatov @ 2019-02-22 15:42 UTC (permalink / raw)
To: tarantool-patches, vdavydov.dev; +Cc: Kirill Shcherbatov
The bps_tree.h header defines the macro _api_name, but does not
undefine it at the end. Fixed.
---
src/lib/salad/bps_tree.h | 1 +
1 file changed, 1 insertion(+)
diff --git a/src/lib/salad/bps_tree.h b/src/lib/salad/bps_tree.h
index 8d1a62108..09c3338f1 100644
--- a/src/lib/salad/bps_tree.h
+++ b/src/lib/salad/bps_tree.h
@@ -6004,6 +6004,7 @@ bps_tree_debug_check_internal_functions(bool assertme)
#undef BPS_TREE_BRANCH_TRACE
/* {{{ Macros for custom naming of structs and functions */
+#undef _api_name
#undef _bps
#undef _bps_tree
#undef _BPS
--
2.20.1
^ permalink raw reply [flat|nested] 27+ messages in thread
* Re: [tarantool-patches] [PATCH v3 2/7] lib: fix undef _api_name in bps_tree header
2019-02-22 15:42 ` [PATCH v3 2/7] lib: fix undef _api_name in bps_tree header Kirill Shcherbatov
@ 2019-02-22 18:37 ` Konstantin Osipov
2019-02-25 15:32 ` Vladimir Davydov
1 sibling, 0 replies; 27+ messages in thread
From: Konstantin Osipov @ 2019-02-22 18:37 UTC (permalink / raw)
To: tarantool-patches; +Cc: vdavydov.dev, Kirill Shcherbatov
* Kirill Shcherbatov <kshcherbatov@tarantool.org> [19/02/22 19:29]:
> The bps_tree.h header defines the macro _api_name, but does not
> undefine it at the end. Fixed.
OK to push, obviously.
--
Konstantin Osipov, Moscow, Russia, +7 903 626 22 32
http://tarantool.io - www.twitter.com/kostja_osipov
^ permalink raw reply [flat|nested] 27+ messages in thread
* Re: [PATCH v3 2/7] lib: fix undef _api_name in bps_tree header
2019-02-22 15:42 ` [PATCH v3 2/7] lib: fix undef _api_name in bps_tree header Kirill Shcherbatov
2019-02-22 18:37 ` [tarantool-patches] " Konstantin Osipov
@ 2019-02-25 15:32 ` Vladimir Davydov
1 sibling, 0 replies; 27+ messages in thread
From: Vladimir Davydov @ 2019-02-25 15:32 UTC (permalink / raw)
To: Kirill Shcherbatov; +Cc: tarantool-patches
On Fri, Feb 22, 2019 at 06:42:27PM +0300, Kirill Shcherbatov wrote:
> The bps_tree.h header defines the macro _api_name, but does not
> undefine it at the end. Fixed.
> ---
> src/lib/salad/bps_tree.h | 1 +
> 1 file changed, 1 insertion(+)
Pushed to 2.1.
^ permalink raw reply [flat|nested] 27+ messages in thread
* [PATCH v3 3/7] lib: introduce BPS_TREE_IDENTICAL custom comparator
2019-02-22 15:42 [PATCH v3 0/7] box: introduce hint option for memtx tree index Kirill Shcherbatov
2019-02-22 15:42 ` [PATCH v3 1/7] memtx: introduce universal iterator_pool Kirill Shcherbatov
2019-02-22 15:42 ` [PATCH v3 2/7] lib: fix undef _api_name in bps_tree header Kirill Shcherbatov
@ 2019-02-22 15:42 ` Kirill Shcherbatov
2019-02-25 15:33 ` Vladimir Davydov
2019-02-22 15:42 ` [PATCH v3 4/7] memtx: hide index implementation details from header Kirill Shcherbatov
` (3 subsequent siblings)
6 siblings, 1 reply; 27+ messages in thread
From: Kirill Shcherbatov @ 2019-02-22 15:42 UTC (permalink / raw)
To: tarantool-patches, vdavydov.dev; +Cc: Kirill Shcherbatov
Introduce a macro BPS_TREE_IDENTICAL for BPS TREE class. This
makes possible to define custom comparators for stucture-based
leafs.
Previously, a C++ comparison operator "!=" override was used for
this purpose. Due to the fact that we are not going to rework on
C++ C-only components of Tarantool like memtx_tree, we needed a
way to make complex structures comparisons using preprocessor.
Needed for #3961
---
src/lib/salad/bps_tree.h | 82 +++++++++++++++++++++++-----------
test/unit/bps_tree_iterator.cc | 16 ++++---
2 files changed, 65 insertions(+), 33 deletions(-)
diff --git a/src/lib/salad/bps_tree.h b/src/lib/salad/bps_tree.h
index 09c3338f1..adf3addfb 100644
--- a/src/lib/salad/bps_tree.h
+++ b/src/lib/salad/bps_tree.h
@@ -1035,6 +1035,14 @@ struct bps_leaf_path_elem {
bps_tree_pos_t max_elem_pos;
};
+/**
+ * Test if bps_tree_elem_t a and b represent exactly the
+ * same data.
+ */
+#ifndef BPS_TREE_IDENTICAL
+#define BPS_TREE_IDENTICAL(a, b) (a == b)
+#endif
+
/**
* @brief Tree construction. Fills struct bps_tree members.
* @param tree - pointer to a tree
@@ -4592,7 +4600,7 @@ bps_tree_debug_check_block(const struct bps_tree *tree, struct bps_block *block,
inner->child_ids[i]);
bps_tree_elem_t calc_max_elem =
bps_tree_debug_find_max_elem(tree, tmp_block);
- if (inner->elems[i] != calc_max_elem)
+ if (!BPS_TREE_IDENTICAL(inner->elems[i], calc_max_elem))
result |= 0x4000;
}
if (block->size > 1) {
@@ -4651,7 +4659,8 @@ bps_tree_debug_check(const struct bps_tree *tree)
return result;
}
struct bps_block *root = bps_tree_root(tree);
- if (tree->max_elem != bps_tree_debug_find_max_elem(tree, root))
+ bps_tree_elem_t elem = bps_tree_debug_find_max_elem(tree, root);
+ if (!BPS_TREE_IDENTICAL(tree->max_elem, elem))
result |= 0x8;
size_t calc_count = 0;
bps_tree_block_id_t expected_prev_id = (bps_tree_block_id_t)(-1);
@@ -5016,16 +5025,22 @@ bps_tree_debug_check_move_to_right_leaf(struct bps_tree *tree, bool assertme)
assert(!assertme);
}
- if (a.header.size)
- if (ma != a.elems[a.header.size - 1]) {
+ if (a.header.size) {
+ bps_tree_elem_t elem =
+ a.elems[a.header.size - 1];
+ if (!BPS_TREE_IDENTICAL(ma, elem)) {
result |= (1 << 5);
assert(!assertme);
}
- if (b.header.size)
- if (mb != b.elems[b.header.size - 1]) {
+ }
+ if (b.header.size) {
+ bps_tree_elem_t elem =
+ b.elems[b.header.size - 1];
+ if (!BPS_TREE_IDENTICAL(mb, elem)) {
result |= (1 << 5);
assert(!assertme);
}
+ }
c = 0;
for (unsigned int u = 0;
@@ -5113,16 +5128,22 @@ bps_tree_debug_check_move_to_left_leaf(struct bps_tree *tree, bool assertme)
assert(!assertme);
}
- if (a.header.size)
- if (ma != a.elems[a.header.size - 1]) {
+ if (a.header.size) {
+ bps_tree_elem_t elem =
+ a.elems[a.header.size - 1];
+ if (!BPS_TREE_IDENTICAL(ma, elem)) {
result |= (1 << 7);
assert(!assertme);
}
- if (b.header.size)
- if (mb != b.elems[b.header.size - 1]) {
+ }
+ if (b.header.size) {
+ bps_tree_elem_t elem =
+ b.elems[b.header.size - 1];
+ if (!BPS_TREE_IDENTICAL(mb, elem)) {
result |= (1 << 7);
assert(!assertme);
}
+ }
c = 0;
for (unsigned int u = 0;
@@ -5224,21 +5245,24 @@ bps_tree_debug_check_insert_and_move_to_right_leaf(struct bps_tree *tree,
assert(!assertme);
}
- if (i - u + 1)
- if (ma
- != a.elems[a.header.size
- - 1]) {
+ if (i - u + 1) {
+ bps_tree_elem_t elem =
+ a.elems[a.header.size - 1];
+ if (!BPS_TREE_IDENTICAL(ma,
+ elem)) {
result |= (1 << 9);
assert(!assertme);
}
- if (j + u)
- if (mb
- != b.elems[b.header.size
- - 1]) {
+ }
+ if (j + u) {
+ bps_tree_elem_t elem =
+ b.elems[b.header.size - 1];
+ if (!BPS_TREE_IDENTICAL(mb,
+ elem)) {
result |= (1 << 9);
assert(!assertme);
}
-
+ }
c = 0;
for (unsigned int v = 0;
v < (unsigned int) a.header.size;
@@ -5340,20 +5364,24 @@ bps_tree_debug_check_insert_and_move_to_left_leaf(struct bps_tree *tree,
assert(!assertme);
}
- if (i + u)
- if (ma
- != a.elems[a.header.size
- - 1]) {
+ if (i + u) {
+ bps_tree_elem_t elem =
+ a.elems[a.header.size - 1];
+ if (!BPS_TREE_IDENTICAL(ma,
+ elem)) {
result |= (1 << 11);
assert(!assertme);
}
- if (j - u + 1)
- if (mb
- != b.elems[b.header.size
- - 1]) {
+ }
+ if (j - u + 1) {
+ bps_tree_elem_t elem =
+ b.elems[b.header.size - 1];
+ if (!BPS_TREE_IDENTICAL(mb,
+ elem)) {
result |= (1 << 11);
assert(!assertme);
}
+ }
c = 0;
for (unsigned int v = 0;
diff --git a/test/unit/bps_tree_iterator.cc b/test/unit/bps_tree_iterator.cc
index 3c2d7f7c4..71dde7d78 100644
--- a/test/unit/bps_tree_iterator.cc
+++ b/test/unit/bps_tree_iterator.cc
@@ -10,18 +10,16 @@
struct elem_t {
long first;
long second;
- bool operator!= (const struct elem_t& another) const
- {
- return first != another.first || second != another.second;
- }
};
+static bool equal(const elem_t &a, const elem_t &b);
static int compare(const elem_t &a, const elem_t &b);
static int compare_key(const elem_t &a, long b);
#define BPS_TREE_NAME test
#define BPS_TREE_BLOCK_SIZE 128 /* value is to low specially for tests */
#define BPS_TREE_EXTENT_SIZE 1024 /* value is to low specially for tests */
+#define BPS_TREE_IDENTICAL(a, b) equal(a, b)
#define BPS_TREE_COMPARE(a, b, arg) compare(a, b)
#define BPS_TREE_COMPARE_KEY(a, b, arg) compare_key(a, b)
#define bps_tree_elem_t struct elem_t
@@ -29,6 +27,12 @@ static int compare_key(const elem_t &a, long b);
#define bps_tree_arg_t int
#include "salad/bps_tree.h"
+static bool
+equal(const elem_t &a, const elem_t &b)
+{
+ return a.first == b.first && a.second == b.second;
+}
+
static int compare(const elem_t &a, const elem_t &b)
{
return a.first < b.first ? -1 : a.first > b.first ? 1 :
@@ -501,7 +505,7 @@ iterator_freeze_check()
}
int tested_count = 0;
while ((e = test_iterator_get_elem(&tree, &iterator1))) {
- if (*e != comp_buf1[tested_count]) {
+ if (!equal(*e, comp_buf1[tested_count])) {
fail("version restore failed (1)", "true");
}
tested_count++;
@@ -523,7 +527,7 @@ iterator_freeze_check()
tested_count = 0;
while ((e = test_iterator_get_elem(&tree, &iterator2))) {
- if (*e != comp_buf1[tested_count]) {
+ if (!equal(*e, comp_buf1[tested_count])) {
fail("version restore failed (1)", "true");
}
tested_count++;
--
2.20.1
^ permalink raw reply [flat|nested] 27+ messages in thread
* Re: [PATCH v3 3/7] lib: introduce BPS_TREE_IDENTICAL custom comparator
2019-02-22 15:42 ` [PATCH v3 3/7] lib: introduce BPS_TREE_IDENTICAL custom comparator Kirill Shcherbatov
@ 2019-02-25 15:33 ` Vladimir Davydov
0 siblings, 0 replies; 27+ messages in thread
From: Vladimir Davydov @ 2019-02-25 15:33 UTC (permalink / raw)
To: Kirill Shcherbatov; +Cc: tarantool-patches
On Fri, Feb 22, 2019 at 06:42:28PM +0300, Kirill Shcherbatov wrote:
> Introduce a macro BPS_TREE_IDENTICAL for BPS TREE class. This
> makes possible to define custom comparators for stucture-based
> leafs.
> Previously, a C++ comparison operator "!=" override was used for
> this purpose. Due to the fact that we are not going to rework on
> C++ C-only components of Tarantool like memtx_tree, we needed a
> way to make complex structures comparisons using preprocessor.
>
> Needed for #3961
> ---
> src/lib/salad/bps_tree.h | 82 +++++++++++++++++++++++-----------
> test/unit/bps_tree_iterator.cc | 16 ++++---
> 2 files changed, 65 insertions(+), 33 deletions(-)
Pushed to 2.1.
^ permalink raw reply [flat|nested] 27+ messages in thread
* [PATCH v3 4/7] memtx: hide index implementation details from header
2019-02-22 15:42 [PATCH v3 0/7] box: introduce hint option for memtx tree index Kirill Shcherbatov
` (2 preceding siblings ...)
2019-02-22 15:42 ` [PATCH v3 3/7] lib: introduce BPS_TREE_IDENTICAL custom comparator Kirill Shcherbatov
@ 2019-02-22 15:42 ` Kirill Shcherbatov
2019-02-22 18:40 ` [tarantool-patches] " Konstantin Osipov
2019-02-25 15:33 ` Vladimir Davydov
2019-02-22 15:42 ` [PATCH v3 5/7] memtx: rework memtx_tree to store arbitrary nodes Kirill Shcherbatov
` (2 subsequent siblings)
6 siblings, 2 replies; 27+ messages in thread
From: Kirill Shcherbatov @ 2019-02-22 15:42 UTC (permalink / raw)
To: tarantool-patches, vdavydov.dev; +Cc: Kirill Shcherbatov
Refactored memtx_tree code so that memtx_tree.h, memtx_rtree.h,
memtx_bitset.h, memtx_hash.h contained only the signature of the
tree object constructor while all implementation details were in
corresponding *.c files.
Needed for #3961
---
src/box/memtx_bitset.c | 17 +++++++++--
src/box/memtx_bitset.h | 26 ++--------------
src/box/memtx_hash.c | 43 ++++++++++++++++++++++++--
src/box/memtx_hash.h | 44 +++------------------------
src/box/memtx_rtree.c | 12 ++++++--
src/box/memtx_rtree.h | 16 ++--------
src/box/memtx_space.c | 16 +++++-----
src/box/memtx_tree.c | 62 ++++++++++++++++++++++++++++++++++++--
src/box/memtx_tree.h | 68 ++----------------------------------------
9 files changed, 147 insertions(+), 157 deletions(-)
diff --git a/src/box/memtx_bitset.c b/src/box/memtx_bitset.c
index 9dbc4141d..64259cf44 100644
--- a/src/box/memtx_bitset.c
+++ b/src/box/memtx_bitset.c
@@ -36,12 +36,25 @@
#include "trivia/util.h"
+#include "bitset/index.h"
#include "fiber.h"
+#include "index.h"
#include "tuple.h"
#include "memtx_engine.h"
+struct memtx_bitset_index {
+ struct index base;
+ struct tt_bitset_index index;
+#ifndef OLD_GOOD_BITSET
+ struct matras *id_to_tuple;
+ struct mh_bitset_index_t *tuple_to_id;
+ uint32_t spare_id;
+#endif /*#ifndef OLD_GOOD_BITSET*/
+};
+
#ifndef OLD_GOOD_BITSET
#include "small/matras.h"
+struct mh_bitset_index_t;
struct bitset_hash_entry {
struct tuple *tuple;
@@ -491,7 +504,7 @@ static const struct index_vtab memtx_bitset_index_vtab = {
/* .end_build = */ generic_index_end_build,
};
-struct memtx_bitset_index *
+struct index *
memtx_bitset_index_new(struct memtx_engine *memtx, struct index_def *def)
{
assert(def->iid > 0);
@@ -529,5 +542,5 @@ memtx_bitset_index_new(struct memtx_engine *memtx, struct index_def *def)
#endif /* #ifndef OLD_GOOD_BITSET */
tt_bitset_index_create(&index->index, realloc);
- return index;
+ return &index->base;
}
diff --git a/src/box/memtx_bitset.h b/src/box/memtx_bitset.h
index 62971dd51..148d463d8 100644
--- a/src/box/memtx_bitset.h
+++ b/src/box/memtx_bitset.h
@@ -31,35 +31,15 @@
* SUCH DAMAGE.
*/
-/**
- * @brief Index API wrapper for bitset_index
- * @see bitset/index.h
- */
-#include "index.h"
-#include "bitset/index.h"
-
#if defined(__cplusplus)
extern "C" {
#endif /* defined(__cplusplus) */
+struct index;
+struct index_def;
struct memtx_engine;
-#ifndef OLD_GOOD_BITSET
-struct matras;
-struct mh_bitset_index_t;
-#endif /*#ifndef OLD_GOOD_BITSET*/
-
-struct memtx_bitset_index {
- struct index base;
- struct tt_bitset_index index;
-#ifndef OLD_GOOD_BITSET
- struct matras *id_to_tuple;
- struct mh_bitset_index_t *tuple_to_id;
- uint32_t spare_id;
-#endif /*#ifndef OLD_GOOD_BITSET*/
-};
-
-struct memtx_bitset_index *
+struct index *
memtx_bitset_index_new(struct memtx_engine *memtx, struct index_def *def);
#if defined(__cplusplus)
diff --git a/src/box/memtx_hash.c b/src/box/memtx_hash.c
index b35a52528..e7d64a9fb 100644
--- a/src/box/memtx_hash.c
+++ b/src/box/memtx_hash.c
@@ -31,6 +31,7 @@
#include "memtx_hash.h"
#include "say.h"
#include "fiber.h"
+#include "index.h"
#include "tuple.h"
#include "memtx_engine.h"
#include "space.h"
@@ -39,6 +40,44 @@
#include <small/mempool.h>
+static inline bool
+memtx_hash_equal(struct tuple *tuple_a, struct tuple *tuple_b,
+ struct key_def *key_def)
+{
+ return tuple_compare(tuple_a, tuple_b, key_def) == 0;
+}
+
+static inline bool
+memtx_hash_equal_key(struct tuple *tuple, const char *key,
+ struct key_def *key_def)
+{
+ return tuple_compare_with_key(tuple, key, key_def->part_count,
+ key_def) == 0;
+}
+
+#define LIGHT_NAME _index
+#define LIGHT_DATA_TYPE struct tuple *
+#define LIGHT_KEY_TYPE const char *
+#define LIGHT_CMP_ARG_TYPE struct key_def *
+#define LIGHT_EQUAL(a, b, c) memtx_hash_equal(a, b, c)
+#define LIGHT_EQUAL_KEY(a, b, c) memtx_hash_equal_key(a, b, c)
+
+#include "salad/light.h"
+
+#undef LIGHT_NAME
+#undef LIGHT_DATA_TYPE
+#undef LIGHT_KEY_TYPE
+#undef LIGHT_CMP_ARG_TYPE
+#undef LIGHT_EQUAL
+#undef LIGHT_EQUAL_KEY
+
+struct memtx_hash_index {
+ struct index base;
+ struct light_index_core hash_table;
+ struct memtx_gc_task gc_task;
+ struct light_index_iterator gc_iterator;
+};
+
/* {{{ MemtxHash Iterators ****************************************/
struct hash_iterator {
@@ -450,7 +489,7 @@ static const struct index_vtab memtx_hash_index_vtab = {
/* .end_build = */ generic_index_end_build,
};
-struct memtx_hash_index *
+struct index *
memtx_hash_index_new(struct memtx_engine *memtx, struct index_def *def)
{
if (!mempool_is_initialized(&memtx->iterator_pool)) {
@@ -474,7 +513,7 @@ memtx_hash_index_new(struct memtx_engine *memtx, struct index_def *def)
light_index_create(&index->hash_table, MEMTX_EXTENT_SIZE,
memtx_index_extent_alloc, memtx_index_extent_free,
memtx, index->base.def->key_def);
- return index;
+ return &index->base;
}
/* }}} */
diff --git a/src/box/memtx_hash.h b/src/box/memtx_hash.h
index 10663fcfc..2dba3f5c5 100644
--- a/src/box/memtx_hash.h
+++ b/src/box/memtx_hash.h
@@ -30,52 +30,16 @@
* THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
-#include "index.h"
-#include "memtx_engine.h"
#if defined(__cplusplus)
extern "C" {
#endif /* defined(__cplusplus) */
-static inline bool
-memtx_hash_equal(struct tuple *tuple_a, struct tuple *tuple_b,
- struct key_def *key_def)
-{
- return tuple_compare(tuple_a, tuple_b, key_def) == 0;
-}
+struct index;
+struct index_def;
+struct memtx_engine;
-static inline bool
-memtx_hash_equal_key(struct tuple *tuple, const char *key,
- struct key_def *key_def)
-{
- return tuple_compare_with_key(tuple, key, key_def->part_count,
- key_def) == 0;
-}
-
-#define LIGHT_NAME _index
-#define LIGHT_DATA_TYPE struct tuple *
-#define LIGHT_KEY_TYPE const char *
-#define LIGHT_CMP_ARG_TYPE struct key_def *
-#define LIGHT_EQUAL(a, b, c) memtx_hash_equal(a, b, c)
-#define LIGHT_EQUAL_KEY(a, b, c) memtx_hash_equal_key(a, b, c)
-
-#include "salad/light.h"
-
-#undef LIGHT_NAME
-#undef LIGHT_DATA_TYPE
-#undef LIGHT_KEY_TYPE
-#undef LIGHT_CMP_ARG_TYPE
-#undef LIGHT_EQUAL
-#undef LIGHT_EQUAL_KEY
-
-struct memtx_hash_index {
- struct index base;
- struct light_index_core hash_table;
- struct memtx_gc_task gc_task;
- struct light_index_iterator gc_iterator;
-};
-
-struct memtx_hash_index *
+struct index *
memtx_hash_index_new(struct memtx_engine *memtx, struct index_def *def);
#if defined(__cplusplus)
diff --git a/src/box/memtx_rtree.c b/src/box/memtx_rtree.c
index 9cb93f150..5d2901ad9 100644
--- a/src/box/memtx_rtree.c
+++ b/src/box/memtx_rtree.c
@@ -30,9 +30,11 @@
*/
#include "memtx_rtree.h"
+#include <salad/rtree.h>
#include <small/small.h>
#include <small/mempool.h>
+#include "index.h"
#include "errinj.h"
#include "fiber.h"
#include "trivia/util.h"
@@ -41,6 +43,12 @@
#include "space.h"
#include "memtx_engine.h"
+struct memtx_rtree_index {
+ struct index base;
+ unsigned dimension;
+ struct rtree tree;
+};
+
/* {{{ Utilities. *************************************************/
static inline int
@@ -333,7 +341,7 @@ static const struct index_vtab memtx_rtree_index_vtab = {
/* .end_build = */ generic_index_end_build,
};
-struct memtx_rtree_index *
+struct index *
memtx_rtree_index_new(struct memtx_engine *memtx, struct index_def *def)
{
assert(def->iid > 0);
@@ -377,5 +385,5 @@ memtx_rtree_index_new(struct memtx_engine *memtx, struct index_def *def)
rtree_init(&index->tree, index->dimension, MEMTX_EXTENT_SIZE,
memtx_index_extent_alloc, memtx_index_extent_free, memtx,
distance_type);
- return index;
+ return &index->base;
}
diff --git a/src/box/memtx_rtree.h b/src/box/memtx_rtree.h
index 359aa898e..2941b805a 100644
--- a/src/box/memtx_rtree.h
+++ b/src/box/memtx_rtree.h
@@ -30,26 +30,16 @@
* THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
-#include <stddef.h>
-#include <stdint.h>
-
-#include <salad/rtree.h>
-
-#include "index.h"
#if defined(__cplusplus)
extern "C" {
#endif /* defined(__cplusplus) */
+struct index;
+struct index_def;
struct memtx_engine;
-struct memtx_rtree_index {
- struct index base;
- unsigned dimension;
- struct rtree tree;
-};
-
-struct memtx_rtree_index *
+struct index *
memtx_rtree_index_new(struct memtx_engine *memtx, struct index_def *def);
#if defined(__cplusplus)
diff --git a/src/box/memtx_space.c b/src/box/memtx_space.c
index 5f8cc98fd..92ec0b300 100644
--- a/src/box/memtx_space.c
+++ b/src/box/memtx_space.c
@@ -716,21 +716,21 @@ sequence_data_index_create_snapshot_iterator(struct index *index)
static struct index *
sequence_data_index_new(struct memtx_engine *memtx, struct index_def *def)
{
- struct memtx_hash_index *index = memtx_hash_index_new(memtx, def);
+ struct index *index = memtx_hash_index_new(memtx, def);
if (index == NULL)
return NULL;
static struct index_vtab vtab;
static bool vtab_initialized;
if (!vtab_initialized) {
- vtab = *index->base.vtab;
+ vtab = *index->vtab;
vtab.create_snapshot_iterator =
sequence_data_index_create_snapshot_iterator;
vtab_initialized = true;
}
- index->base.vtab = &vtab;
- return &index->base;
+ index->vtab = &vtab;
+ return index;
}
static struct index *
@@ -751,13 +751,13 @@ memtx_space_create_index(struct space *space, struct index_def *index_def)
switch (index_def->type) {
case HASH:
- return (struct index *)memtx_hash_index_new(memtx, index_def);
+ return memtx_hash_index_new(memtx, index_def);
case TREE:
- return (struct index *)memtx_tree_index_new(memtx, index_def);
+ return memtx_tree_index_new(memtx, index_def);
case RTREE:
- return (struct index *)memtx_rtree_index_new(memtx, index_def);
+ return memtx_rtree_index_new(memtx, index_def);
case BITSET:
- return (struct index *)memtx_bitset_index_new(memtx, index_def);
+ return memtx_bitset_index_new(memtx, index_def);
default:
unreachable();
return NULL;
diff --git a/src/box/memtx_tree.c b/src/box/memtx_tree.c
index fe66427fc..07d20473f 100644
--- a/src/box/memtx_tree.c
+++ b/src/box/memtx_tree.c
@@ -39,6 +39,64 @@
#include <third_party/qsort_arg.h>
#include <small/mempool.h>
+/**
+ * Struct that is used as a key in BPS tree definition.
+ */
+struct memtx_tree_key_data {
+ /** Sequence of msgpacked search fields. */
+ const char *key;
+ /** Number of msgpacked search fields. */
+ uint32_t part_count;
+};
+
+/**
+ * BPS tree element vs key comparator.
+ * Defined in header in order to allow compiler to inline it.
+ * @param tuple - tuple to compare.
+ * @param key_data - key to compare with.
+ * @param def - key definition.
+ * @retval 0 if tuple == key in terms of def.
+ * @retval <0 if tuple < key in terms of def.
+ * @retval >0 if tuple > key in terms of def.
+ */
+static int
+memtx_tree_compare_key(const struct tuple *tuple,
+ const struct memtx_tree_key_data *key_data,
+ struct key_def *def)
+{
+ return tuple_compare_with_key(tuple, key_data->key,
+ key_data->part_count, def);
+}
+
+#define BPS_TREE_NAME memtx_tree
+#define BPS_TREE_BLOCK_SIZE (512)
+#define BPS_TREE_EXTENT_SIZE MEMTX_EXTENT_SIZE
+#define BPS_TREE_COMPARE(a, b, arg) tuple_compare(a, b, arg)
+#define BPS_TREE_COMPARE_KEY(a, b, arg) memtx_tree_compare_key(a, b, arg)
+#define bps_tree_elem_t struct tuple *
+#define bps_tree_key_t struct memtx_tree_key_data *
+#define bps_tree_arg_t struct key_def *
+
+#include "salad/bps_tree.h"
+
+#undef BPS_TREE_NAME
+#undef BPS_TREE_BLOCK_SIZE
+#undef BPS_TREE_EXTENT_SIZE
+#undef BPS_TREE_COMPARE
+#undef BPS_TREE_COMPARE_KEY
+#undef bps_tree_elem_t
+#undef bps_tree_key_t
+#undef bps_tree_arg_t
+
+struct memtx_tree_index {
+ struct index base;
+ struct memtx_tree tree;
+ struct tuple **build_array;
+ size_t build_array_size, build_array_alloc_size;
+ struct memtx_gc_task gc_task;
+ struct memtx_tree_iterator gc_iterator;
+};
+
/* {{{ Utilities. *************************************************/
static int
@@ -686,7 +744,7 @@ static const struct index_vtab memtx_tree_index_vtab = {
/* .end_build = */ memtx_tree_index_end_build,
};
-struct memtx_tree_index *
+struct index *
memtx_tree_index_new(struct memtx_engine *memtx, struct index_def *def)
{
if (!mempool_is_initialized(&memtx->iterator_pool)) {
@@ -710,5 +768,5 @@ memtx_tree_index_new(struct memtx_engine *memtx, struct index_def *def)
struct key_def *cmp_def = memtx_tree_index_cmp_def(index);
memtx_tree_create(&index->tree, cmp_def, memtx_index_extent_alloc,
memtx_index_extent_free, memtx);
- return index;
+ return &index->base;
}
diff --git a/src/box/memtx_tree.h b/src/box/memtx_tree.h
index bbc8b2419..edeaebab9 100644
--- a/src/box/memtx_tree.h
+++ b/src/box/memtx_tree.h
@@ -30,78 +30,16 @@
* THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
-#include <stddef.h>
-#include <stdint.h>
-
-#include "index.h"
-#include "memtx_engine.h"
#if defined(__cplusplus)
extern "C" {
#endif /* defined(__cplusplus) */
+struct index;
+struct index_def;
struct memtx_engine;
-/**
- * Struct that is used as a key in BPS tree definition.
- */
-struct memtx_tree_key_data
-{
- /** Sequence of msgpacked search fields */
- const char *key;
- /** Number of msgpacked search fields */
- uint32_t part_count;
-};
-
-/**
- * BPS tree element vs key comparator.
- * Defined in header in order to allow compiler to inline it.
- * @param tuple - tuple to compare.
- * @param key_data - key to compare with.
- * @param def - key definition.
- * @retval 0 if tuple == key in terms of def.
- * @retval <0 if tuple < key in terms of def.
- * @retval >0 if tuple > key in terms of def.
- */
-static inline int
-memtx_tree_compare_key(const struct tuple *tuple,
- const struct memtx_tree_key_data *key_data,
- struct key_def *def)
-{
- return tuple_compare_with_key(tuple, key_data->key,
- key_data->part_count, def);
-}
-
-#define BPS_TREE_NAME memtx_tree
-#define BPS_TREE_BLOCK_SIZE (512)
-#define BPS_TREE_EXTENT_SIZE MEMTX_EXTENT_SIZE
-#define BPS_TREE_COMPARE(a, b, arg) tuple_compare(a, b, arg)
-#define BPS_TREE_COMPARE_KEY(a, b, arg) memtx_tree_compare_key(a, b, arg)
-#define bps_tree_elem_t struct tuple *
-#define bps_tree_key_t struct memtx_tree_key_data *
-#define bps_tree_arg_t struct key_def *
-
-#include "salad/bps_tree.h"
-
-#undef BPS_TREE_NAME
-#undef BPS_TREE_BLOCK_SIZE
-#undef BPS_TREE_EXTENT_SIZE
-#undef BPS_TREE_COMPARE
-#undef BPS_TREE_COMPARE_KEY
-#undef bps_tree_elem_t
-#undef bps_tree_key_t
-#undef bps_tree_arg_t
-
-struct memtx_tree_index {
- struct index base;
- struct memtx_tree tree;
- struct tuple **build_array;
- size_t build_array_size, build_array_alloc_size;
- struct memtx_gc_task gc_task;
- struct memtx_tree_iterator gc_iterator;
-};
-
-struct memtx_tree_index *
+struct index *
memtx_tree_index_new(struct memtx_engine *memtx, struct index_def *def);
#if defined(__cplusplus)
--
2.20.1
^ permalink raw reply [flat|nested] 27+ messages in thread
* Re: [tarantool-patches] [PATCH v3 4/7] memtx: hide index implementation details from header
2019-02-22 15:42 ` [PATCH v3 4/7] memtx: hide index implementation details from header Kirill Shcherbatov
@ 2019-02-22 18:40 ` Konstantin Osipov
2019-02-25 15:33 ` Vladimir Davydov
1 sibling, 0 replies; 27+ messages in thread
From: Konstantin Osipov @ 2019-02-22 18:40 UTC (permalink / raw)
To: tarantool-patches; +Cc: vdavydov.dev, Kirill Shcherbatov
* Kirill Shcherbatov <kshcherbatov@tarantool.org> [19/02/22 19:29]:
> Refactored memtx_tree code so that memtx_tree.h, memtx_rtree.h,
> memtx_bitset.h, memtx_hash.h contained only the signature of the
> tree object constructor while all implementation details were in
> corresponding *.c files.
OK to push.
--
Konstantin Osipov, Moscow, Russia, +7 903 626 22 32
http://tarantool.io - www.twitter.com/kostja_osipov
^ permalink raw reply [flat|nested] 27+ messages in thread
* Re: [PATCH v3 4/7] memtx: hide index implementation details from header
2019-02-22 15:42 ` [PATCH v3 4/7] memtx: hide index implementation details from header Kirill Shcherbatov
2019-02-22 18:40 ` [tarantool-patches] " Konstantin Osipov
@ 2019-02-25 15:33 ` Vladimir Davydov
1 sibling, 0 replies; 27+ messages in thread
From: Vladimir Davydov @ 2019-02-25 15:33 UTC (permalink / raw)
To: Kirill Shcherbatov; +Cc: tarantool-patches
On Fri, Feb 22, 2019 at 06:42:29PM +0300, Kirill Shcherbatov wrote:
> Refactored memtx_tree code so that memtx_tree.h, memtx_rtree.h,
> memtx_bitset.h, memtx_hash.h contained only the signature of the
> tree object constructor while all implementation details were in
> corresponding *.c files.
>
> Needed for #3961
> ---
> src/box/memtx_bitset.c | 17 +++++++++--
> src/box/memtx_bitset.h | 26 ++--------------
> src/box/memtx_hash.c | 43 ++++++++++++++++++++++++--
> src/box/memtx_hash.h | 44 +++------------------------
> src/box/memtx_rtree.c | 12 ++++++--
> src/box/memtx_rtree.h | 16 ++--------
> src/box/memtx_space.c | 16 +++++-----
> src/box/memtx_tree.c | 62 ++++++++++++++++++++++++++++++++++++--
> src/box/memtx_tree.h | 68 ++----------------------------------------
> 9 files changed, 147 insertions(+), 157 deletions(-)
Pushed to 2.1.
^ permalink raw reply [flat|nested] 27+ messages in thread
* [PATCH v3 5/7] memtx: rework memtx_tree to store arbitrary nodes
2019-02-22 15:42 [PATCH v3 0/7] box: introduce hint option for memtx tree index Kirill Shcherbatov
` (3 preceding siblings ...)
2019-02-22 15:42 ` [PATCH v3 4/7] memtx: hide index implementation details from header Kirill Shcherbatov
@ 2019-02-22 15:42 ` Kirill Shcherbatov
2019-02-25 16:57 ` Vladimir Davydov
2019-02-22 15:42 ` [PATCH v3 6/7] memtx: rename memtx_tree.c to memtx_tree_impl.h Kirill Shcherbatov
2019-02-22 15:42 ` [PATCH v3 7/7] memtx: introduce tuple compare hint Kirill Shcherbatov
6 siblings, 1 reply; 27+ messages in thread
From: Kirill Shcherbatov @ 2019-02-22 15:42 UTC (permalink / raw)
To: tarantool-patches, vdavydov.dev; +Cc: Kirill Shcherbatov
Reworked memtx_tree class to use arbitrary containers as a tree
nodes. This makes possible to implement tuple hints in subsequent
patches.
The memtx_tree.c is currently used in a header manner. This is to
ensure that functional changes in the patch do not mix with
refactoring, that will be finished in the next commit.
Needed for #3961
---
src/box/CMakeLists.txt | 2 +-
src/box/memtx_tree.c | 522 ++++++++++++++++++++++++++++----------
src/box/memtx_tree_decl.c | 84 ++++++
3 files changed, 473 insertions(+), 135 deletions(-)
create mode 100644 src/box/memtx_tree_decl.c
diff --git a/src/box/CMakeLists.txt b/src/box/CMakeLists.txt
index 5521e489e..dc8cc2cd5 100644
--- a/src/box/CMakeLists.txt
+++ b/src/box/CMakeLists.txt
@@ -64,7 +64,7 @@ add_library(box STATIC
index_def.c
iterator_type.c
memtx_hash.c
- memtx_tree.c
+ memtx_tree_decl.c
memtx_rtree.c
memtx_bitset.c
engine.c
diff --git a/src/box/memtx_tree.c b/src/box/memtx_tree.c
index 07d20473f..c48f96bf6 100644
--- a/src/box/memtx_tree.c
+++ b/src/box/memtx_tree.c
@@ -40,58 +40,208 @@
#include <small/mempool.h>
/**
- * Struct that is used as a key in BPS tree definition.
+ * The name of this implementation of the memtx tree object.
*/
-struct memtx_tree_key_data {
- /** Sequence of msgpacked search fields. */
- const char *key;
- /** Number of msgpacked search fields. */
- uint32_t part_count;
-};
+#ifndef MEMTX_TREE_NAME
+#error "MEMTX_TREE_NAME must be defined"
+#endif
/**
- * BPS tree element vs key comparator.
- * Defined in header in order to allow compiler to inline it.
- * @param tuple - tuple to compare.
- * @param key_data - key to compare with.
- * @param def - key definition.
- * @retval 0 if tuple == key in terms of def.
- * @retval <0 if tuple < key in terms of def.
- * @retval >0 if tuple > key in terms of def.
+ * The type of data node stored in the tree. This object is
+ * expected to contain <struct tuple *tuple> payload. In a
+ * particular case, it can be directly struct tuple *.
*/
-static int
-memtx_tree_compare_key(const struct tuple *tuple,
- const struct memtx_tree_key_data *key_data,
- struct key_def *def)
-{
- return tuple_compare_with_key(tuple, key_data->key,
- key_data->part_count, def);
-}
+#ifndef memtx_tree_elem_t
+#error "memtx_tree_elem_t must be defined"
+#endif
+
+/**
+ * The type of key used in comparisons with data nodes stored
+ * in the tree. This object is expected to contain
+ * <const char *key, uint32_t part_count> payload.
+ */
+#ifndef memtx_tree_key_t
+#error "memtx_tree_key_t must be defined"
+#endif
+
+/**
+ * Perform a comparison of memtx_tree_elem_t nodes.
+ *
+ * void
+ * MEMTX_TREE_COMPARE(memtx_tree_elem_t *elem_a,
+ * memtx_tree_elem_t *elem_b,
+ * struct key_def *key_def);
+ */
+#ifndef MEMTX_TREE_COMPARE
+#error "MEMTX_TREE_COMPARE must be defined"
+#endif
+
+/**
+ * Perform a comparison of memtx_tree_elem_t with memtx_tree_key_t.
+ *
+ * void
+ * MEMTX_TREE_COMPARE_KEY(memtx_tree_elem_t *elem,
+ * memtx_tree_key_t *key,
+ * struct key_def *key_def);
+ */
+#ifndef MEMTX_TREE_COMPARE_KEY
+#error "MEMTX_TREE_COMPARE_KEY must be defined"
+#endif
+
+/**
+ * Perform memtx_tree_elem_t initialization.
+ *
+ * void
+ * MEMTX_TREE_ELEM_SET(memtx_tree_elem_t *elem,
+ * struct tuple *tuple,
+ * struct key_def *key_def);
+ */
+#ifndef MEMTX_TREE_ELEM_SET
+#error "MEMTX_TREE_ELEM_SET must be defined"
+#endif
+
+/**
+ * Perform memtx_tree_key_t initialization.
+ *
+ * void
+ * MEMTX_TREE_KEY_SET(memtx_tree_key_t *key, const char *key_val,
+ * uint32_t part_count_val,
+ * struct key_def *key_def);
+ */
+#ifndef MEMTX_TREE_KEY_SET
+#error "MEMTX_TREE_KEY_SET must be defined"
+#endif
+
+/**
+ * Extract memtx_tree_elem_t mandatory <struct tuple *tuple>
+ * payload.
+ *
+ * struct tuple *
+ * MEMTX_TREE_ELEM_GET(memtx_tree_elem_t *elem);
+ */
+#ifndef MEMTX_TREE_ELEM_GET
+#error "MEMTX_TREE_ELEM_GET must be defined"
+#endif
+
+/**
+ * Extract memtx_tree_key_t mandatory payload:
+ * <const char *key, uint32_t part_count>.
+ *
+ * const char *
+ * MEMTX_TREE_KEY_GET(memtx_tree_key_t *key, uin32_t *part_count);
+ */
+#ifndef MEMTX_TREE_KEY_GET
+#error "MEMTX_TREE_KEY_GET must be defined"
+#endif
-#define BPS_TREE_NAME memtx_tree
+/**
+ * Test if memtx_tree_elem_t a and b represent exactly the
+ * same data. While MEMTX_TREE_COMPARE performs binary data
+ * comparison, this helper checks if the elements are identical,
+ * i.e. initialized by MEMTX_TREE_ELEM_SET with the same arguments.
+ *
+ * bool
+ * MEMTX_TREE_IDENTICAL(memtx_tree_elem_t *elem_a,
+ * memtx_tree_elem_t *elem_b);
+ */
+#ifndef MEMTX_TREE_IDENTICAL
+#error "MEMTX_TREE_IDENTICAL must be defined"
+#endif
+
+#define bps_tree_elem_t memtx_tree_elem_t
+#define bps_tree_key_t memtx_tree_key_t *
+#define bps_tree_arg_t struct key_def *
+#define BPS_TREE_NAME CONCAT3(MEMTX_TREE_NAME, _, tree)
#define BPS_TREE_BLOCK_SIZE (512)
#define BPS_TREE_EXTENT_SIZE MEMTX_EXTENT_SIZE
-#define BPS_TREE_COMPARE(a, b, arg) tuple_compare(a, b, arg)
-#define BPS_TREE_COMPARE_KEY(a, b, arg) memtx_tree_compare_key(a, b, arg)
-#define bps_tree_elem_t struct tuple *
-#define bps_tree_key_t struct memtx_tree_key_data *
-#define bps_tree_arg_t struct key_def *
+#define BPS_TREE_COMPARE(a, b, arg) MEMTX_TREE_COMPARE(&a, &b, arg)
+#define BPS_TREE_COMPARE_KEY(a, b, arg) MEMTX_TREE_COMPARE_KEY(&a, b, arg)
+#define BPS_TREE_IDENTICAL(a, b) MEMTX_TREE_IDENTICAL(&a, &b)
#include "salad/bps_tree.h"
+#undef bps_tree_elem_t
+#undef bps_tree_key_t
+#undef bps_tree_arg_t
#undef BPS_TREE_NAME
#undef BPS_TREE_BLOCK_SIZE
#undef BPS_TREE_EXTENT_SIZE
#undef BPS_TREE_COMPARE
#undef BPS_TREE_COMPARE_KEY
-#undef bps_tree_elem_t
-#undef bps_tree_key_t
-#undef bps_tree_arg_t
+#undef BPS_TREE_IDENTICAL
+
+#define memtx_tree CONCAT3(MEMTX_TREE_NAME, _, tree)
+#define _tree_api_name(postfix) CONCAT5(MEMTX_TREE_NAME, _, tree, _, postfix)
+#define memtx_tree_iterator _tree_api_name(iterator)
+#define memtx_tree_iterator_is_invalid _tree_api_name(iterator_is_invalid)
+#define memtx_tree_iterator_first _tree_api_name(iterator_first)
+#define memtx_tree_iterator_last _tree_api_name(iterator_last)
+#define memtx_tree_iterator_get_elem _tree_api_name(iterator_get_elem)
+#define memtx_tree_iterator_next _tree_api_name(iterator_next)
+#define memtx_tree_iterator_prev _tree_api_name(iterator_prev)
+#define memtx_tree_iterator_freeze _tree_api_name(iterator_freeze)
+#define memtx_tree_iterator_destroy _tree_api_name(iterator_destroy)
+#define memtx_tree_create _tree_api_name(create)
+#define memtx_tree_build _tree_api_name(build)
+#define memtx_tree_destroy _tree_api_name(destroy)
+#define memtx_tree_find _tree_api_name(find)
+#define memtx_tree_insert _tree_api_name(insert)
+#define memtx_tree_delete _tree_api_name(delete)
+#define memtx_tree_size _tree_api_name(size)
+#define memtx_tree_mem_used _tree_api_name(mem_used)
+#define memtx_tree_random _tree_api_name(random)
+#define memtx_tree_invalid_iterator _tree_api_name(invalid_iterator)
+#define memtx_tree_lower_bound _tree_api_name(lower_bound)
+#define memtx_tree_upper_bound _tree_api_name(upper_bound)
+#define memtx_tree_lower_bound_elem _tree_api_name(lower_bound_elem)
+#define memtx_tree_upper_bound_elem _tree_api_name(upper_bound_elem)
+
+#define _api_name(postfix) CONCAT3(MEMTX_TREE_NAME, _, postfix)
+#define memtx_tree_index _api_name(index)
+#define memtx_tree_qcompare _api_name(qcompare)
+#define tree_iterator _api_name(iterator)
+#define tree_iterator_free _api_name(iterator_free)
+#define tree_iterator_cast _api_name(iterator_cast)
+#define tree_iterator_dummie _api_name(iterator_dummie)
+#define tree_iterator_next _api_name(iterator_next)
+#define tree_iterator_prev _api_name(iterator_prev)
+#define tree_iterator_next_equal _api_name(iterator_next_equal)
+#define tree_iterator_prev_equal _api_name(iterator_prev_equal)
+#define tree_iterator_set_next_method _api_name(iterator_set_next_method)
+#define tree_iterator_start _api_name(iterator_start)
+#define memtx_tree_index_cmp_def _api_name(index_cmp_def)
+#define memtx_tree_index_free _api_name(index_free)
+#define memtx_tree_index_gc_run _api_name(index_gc_run)
+#define memtx_tree_index_gc_free _api_name(index_gc_free)
+#define memtx_tree_index_gc_vtab _api_name(index_gc_vtab)
+#define memtx_tree_index_destroy _api_name(index_destroy)
+#define memtx_tree_index_update_def _api_name(index_update_def)
+#define memtx_tree_index_depends_on_pk _api_name(index_depends_on_pk)
+#define memtx_tree_index_size _api_name(index_size)
+#define memtx_tree_index_bsize _api_name(index_bsize)
+#define memtx_tree_index_random _api_name(index_random)
+#define memtx_tree_index_count _api_name(index_count)
+#define memtx_tree_index_get _api_name(index_get)
+#define memtx_tree_index_insert_tuple _api_name(index_insert_tuple)
+#define memtx_tree_index_delete_tuple _api_name(index_delete_tuple)
+#define memtx_tree_index_replace _api_name(index_replace)
+#define memtx_tree_index_create_iterator _api_name(index_create_iterator)
+#define memtx_tree_index_begin_build _api_name(index_begin_build)
+#define memtx_tree_index_build_next _api_name(index_build_next)
+#define memtx_tree_index_reserve _api_name(index_reserve)
+#define memtx_tree_index_end_build _api_name(index_end_build)
+#define tree_snapshot_iterator _api_name(snapshot_iterator)
+#define tree_snapshot_iterator_free _api_name(snapshot_iterator_free)
+#define tree_snapshot_iterator_next _api_name(snapshot_iterator_next)
+#define memtx_tree_index_create_snapshot_iterator\
+ _api_name(index_create_snapshot_iterator)
+#define memtx_tree_index_vtab _api_name(index_vtab)
+#define memtx_tree_index_new _api_name(index_new)
struct memtx_tree_index {
struct index base;
struct memtx_tree tree;
- struct tuple **build_array;
+ memtx_tree_elem_t *build_array;
size_t build_array_size, build_array_alloc_size;
struct memtx_gc_task gc_task;
struct memtx_tree_iterator gc_iterator;
@@ -102,8 +252,8 @@ struct memtx_tree_index {
static int
memtx_tree_qcompare(const void* a, const void *b, void *c)
{
- return tuple_compare(*(struct tuple **)a,
- *(struct tuple **)b, (struct key_def *)c);
+ return MEMTX_TREE_COMPARE((memtx_tree_elem_t *)a,
+ (memtx_tree_elem_t *)b, c);
}
/* {{{ MemtxTree Iterators ****************************************/
@@ -113,8 +263,8 @@ struct tree_iterator {
struct index_def *index_def;
struct memtx_tree_iterator tree_iterator;
enum iterator_type type;
- struct memtx_tree_key_data key_data;
- struct tuple *current_tuple;
+ memtx_tree_key_t key_data;
+ memtx_tree_elem_t current;
/** Memory pool the iterator was allocated from. */
struct mempool *pool;
};
@@ -126,7 +276,7 @@ static void
tree_iterator_free(struct iterator *iterator);
static inline struct tree_iterator *
-tree_iterator(struct iterator *it)
+tree_iterator_cast(struct iterator *it)
{
assert(it->free == tree_iterator_free);
return (struct tree_iterator *) it;
@@ -135,9 +285,10 @@ tree_iterator(struct iterator *it)
static void
tree_iterator_free(struct iterator *iterator)
{
- struct tree_iterator *it = tree_iterator(iterator);
- if (it->current_tuple != NULL)
- tuple_unref(it->current_tuple);
+ struct tree_iterator *it = tree_iterator_cast(iterator);
+ struct tuple *tuple = MEMTX_TREE_ELEM_GET(&it->current);
+ if (tuple != NULL)
+ tuple_unref(tuple);
mempool_free(it->pool, it);
}
@@ -152,25 +303,27 @@ tree_iterator_dummie(struct iterator *iterator, struct tuple **ret)
static int
tree_iterator_next(struct iterator *iterator, struct tuple **ret)
{
- struct tuple **res;
- struct tree_iterator *it = tree_iterator(iterator);
- assert(it->current_tuple != NULL);
- struct tuple **check = memtx_tree_iterator_get_elem(it->tree, &it->tree_iterator);
- if (check == NULL || *check != it->current_tuple)
+ struct tree_iterator *it = tree_iterator_cast(iterator);
+ assert(MEMTX_TREE_ELEM_GET(&it->current) != NULL);
+ memtx_tree_elem_t *check =
+ memtx_tree_iterator_get_elem(it->tree, &it->tree_iterator);
+ if (check == NULL || !MEMTX_TREE_IDENTICAL(check, &it->current)) {
it->tree_iterator =
- memtx_tree_upper_bound_elem(it->tree, it->current_tuple,
- NULL);
- else
+ memtx_tree_upper_bound_elem(it->tree, it->current, NULL);
+ } else {
memtx_tree_iterator_next(it->tree, &it->tree_iterator);
- tuple_unref(it->current_tuple);
- it->current_tuple = NULL;
- res = memtx_tree_iterator_get_elem(it->tree, &it->tree_iterator);
+ }
+ tuple_unref(MEMTX_TREE_ELEM_GET(&it->current));
+ memtx_tree_elem_t *res =
+ memtx_tree_iterator_get_elem(it->tree, &it->tree_iterator);
if (res == NULL) {
iterator->next = tree_iterator_dummie;
+ memset(&it->current, 0, sizeof(it->current));
*ret = NULL;
} else {
- *ret = it->current_tuple = *res;
- tuple_ref(it->current_tuple);
+ *ret = MEMTX_TREE_ELEM_GET(res);
+ tuple_ref(*ret);
+ it->current = *res;
}
return 0;
}
@@ -178,23 +331,26 @@ tree_iterator_next(struct iterator *iterator, struct tuple **ret)
static int
tree_iterator_prev(struct iterator *iterator, struct tuple **ret)
{
- struct tree_iterator *it = tree_iterator(iterator);
- assert(it->current_tuple != NULL);
- struct tuple **check = memtx_tree_iterator_get_elem(it->tree, &it->tree_iterator);
- if (check == NULL || *check != it->current_tuple)
+ struct tree_iterator *it = tree_iterator_cast(iterator);
+ assert(MEMTX_TREE_ELEM_GET(&it->current) != NULL);
+ memtx_tree_elem_t *check =
+ memtx_tree_iterator_get_elem(it->tree, &it->tree_iterator);
+ if (check == NULL || !MEMTX_TREE_IDENTICAL(check, &it->current)) {
it->tree_iterator =
- memtx_tree_lower_bound_elem(it->tree, it->current_tuple,
- NULL);
+ memtx_tree_lower_bound_elem(it->tree, it->current, NULL);
+ }
memtx_tree_iterator_prev(it->tree, &it->tree_iterator);
- tuple_unref(it->current_tuple);
- it->current_tuple = NULL;
- struct tuple **res = memtx_tree_iterator_get_elem(it->tree, &it->tree_iterator);
+ tuple_unref(MEMTX_TREE_ELEM_GET(&it->current));
+ memtx_tree_elem_t *res =
+ memtx_tree_iterator_get_elem(it->tree, &it->tree_iterator);
if (!res) {
iterator->next = tree_iterator_dummie;
+ memset(&it->current, 0, sizeof(it->current));
*ret = NULL;
} else {
- *ret = it->current_tuple = *res;
- tuple_ref(it->current_tuple);
+ *ret = MEMTX_TREE_ELEM_GET(res);
+ tuple_ref(*ret);
+ it->current = *res;
}
return 0;
}
@@ -202,28 +358,29 @@ tree_iterator_prev(struct iterator *iterator, struct tuple **ret)
static int
tree_iterator_next_equal(struct iterator *iterator, struct tuple **ret)
{
- struct tree_iterator *it = tree_iterator(iterator);
- assert(it->current_tuple != NULL);
- struct tuple **check = memtx_tree_iterator_get_elem(it->tree,
- &it->tree_iterator);
- if (check == NULL || *check != it->current_tuple)
+ struct tree_iterator *it = tree_iterator_cast(iterator);
+ assert(MEMTX_TREE_ELEM_GET(&it->current) != NULL);
+ memtx_tree_elem_t *check =
+ memtx_tree_iterator_get_elem(it->tree, &it->tree_iterator);
+ if (check == NULL || !MEMTX_TREE_IDENTICAL(check, &it->current)) {
it->tree_iterator =
- memtx_tree_upper_bound_elem(it->tree, it->current_tuple,
- NULL);
- else
+ memtx_tree_upper_bound_elem(it->tree, it->current, NULL);
+ } else {
memtx_tree_iterator_next(it->tree, &it->tree_iterator);
- tuple_unref(it->current_tuple);
- it->current_tuple = NULL;
- struct tuple **res = memtx_tree_iterator_get_elem(it->tree,
- &it->tree_iterator);
+ }
+ tuple_unref(MEMTX_TREE_ELEM_GET(&it->current));
+ memtx_tree_elem_t *res =
+ memtx_tree_iterator_get_elem(it->tree, &it->tree_iterator);
/* Use user key def to save a few loops. */
- if (!res || memtx_tree_compare_key(*res, &it->key_data,
+ if (!res || MEMTX_TREE_COMPARE_KEY(res, &it->key_data,
it->index_def->key_def) != 0) {
iterator->next = tree_iterator_dummie;
+ memset(&it->current, 0, sizeof(it->current));
*ret = NULL;
} else {
- *ret = it->current_tuple = *res;
- tuple_ref(it->current_tuple);
+ *ret = MEMTX_TREE_ELEM_GET(res);
+ tuple_ref(*ret);
+ it->current = *res;
}
return 0;
}
@@ -231,27 +388,29 @@ tree_iterator_next_equal(struct iterator *iterator, struct tuple **ret)
static int
tree_iterator_prev_equal(struct iterator *iterator, struct tuple **ret)
{
- struct tree_iterator *it = tree_iterator(iterator);
- assert(it->current_tuple != NULL);
- struct tuple **check = memtx_tree_iterator_get_elem(it->tree,
- &it->tree_iterator);
- if (check == NULL || *check != it->current_tuple)
+ struct tree_iterator *it = tree_iterator_cast(iterator);
+ assert(MEMTX_TREE_ELEM_GET(&it->current) != NULL);
+ memtx_tree_elem_t *check =
+ memtx_tree_iterator_get_elem(it->tree, &it->tree_iterator);
+ if (check == NULL || !MEMTX_TREE_IDENTICAL(check, &it->current)) {
it->tree_iterator =
- memtx_tree_lower_bound_elem(it->tree, it->current_tuple,
- NULL);
+ memtx_tree_lower_bound_elem(it->tree, it->current, NULL);
+ }
memtx_tree_iterator_prev(it->tree, &it->tree_iterator);
- tuple_unref(it->current_tuple);
- it->current_tuple = NULL;
- struct tuple **res = memtx_tree_iterator_get_elem(it->tree,
- &it->tree_iterator);
+ tuple_unref(MEMTX_TREE_ELEM_GET(&it->current));
+ memtx_tree_elem_t *res =
+ memtx_tree_iterator_get_elem(it->tree, &it->tree_iterator);
/* Use user key def to save a few loops. */
- if (!res || memtx_tree_compare_key(*res, &it->key_data,
- it->index_def->key_def) != 0) {
+ if (!res ||
+ MEMTX_TREE_COMPARE_KEY(res, &it->key_data,
+ it->index_def->key_def) != 0) {
iterator->next = tree_iterator_dummie;
+ memset(&it->current, 0, sizeof(it->current));
*ret = NULL;
} else {
- *ret = it->current_tuple = *res;
- tuple_ref(it->current_tuple);
+ *ret = MEMTX_TREE_ELEM_GET(res);
+ tuple_ref(*ret);
+ it->current = *res;
}
return 0;
}
@@ -259,7 +418,7 @@ tree_iterator_prev_equal(struct iterator *iterator, struct tuple **ret)
static void
tree_iterator_set_next_method(struct tree_iterator *it)
{
- assert(it->current_tuple != NULL);
+ assert(MEMTX_TREE_ELEM_GET(&it->current) != NULL);
switch (it->type) {
case ITER_EQ:
it->base.next = tree_iterator_next_equal;
@@ -288,13 +447,15 @@ static int
tree_iterator_start(struct iterator *iterator, struct tuple **ret)
{
*ret = NULL;
- struct tree_iterator *it = tree_iterator(iterator);
+ struct tree_iterator *it = tree_iterator_cast(iterator);
it->base.next = tree_iterator_dummie;
const struct memtx_tree *tree = it->tree;
enum iterator_type type = it->type;
bool exact = false;
- assert(it->current_tuple == NULL);
- if (it->key_data.key == 0) {
+ assert(MEMTX_TREE_ELEM_GET(&it->current) == NULL);
+ uint32_t part_count;
+ const char *key = MEMTX_TREE_KEY_GET(&it->key_data, &part_count);
+ if (key == NULL) {
if (iterator_type_is_reverse(it->type))
it->tree_iterator = memtx_tree_iterator_last(tree);
else
@@ -330,12 +491,13 @@ tree_iterator_start(struct iterator *iterator, struct tuple **ret)
}
}
- struct tuple **res = memtx_tree_iterator_get_elem(it->tree,
- &it->tree_iterator);
+ memtx_tree_elem_t *res =
+ memtx_tree_iterator_get_elem(it->tree, &it->tree_iterator);
if (!res)
return 0;
- *ret = it->current_tuple = *res;
- tuple_ref(it->current_tuple);
+ *ret = MEMTX_TREE_ELEM_GET(res);
+ tuple_ref(*ret);
+ it->current = *res;
tree_iterator_set_next_method(it);
return 0;
}
@@ -389,7 +551,8 @@ memtx_tree_index_gc_run(struct memtx_gc_task *task, bool *done)
unsigned int loops = 0;
while (!memtx_tree_iterator_is_invalid(itr)) {
- struct tuple *tuple = *memtx_tree_iterator_get_elem(tree, itr);
+ memtx_tree_elem_t *res = memtx_tree_iterator_get_elem(tree, itr);
+ struct tuple *tuple = MEMTX_TREE_ELEM_GET(res);
memtx_tree_iterator_next(tree, itr);
tuple_unref(tuple);
if (++loops >= YIELD_LOOPS) {
@@ -469,8 +632,8 @@ static int
memtx_tree_index_random(struct index *base, uint32_t rnd, struct tuple **result)
{
struct memtx_tree_index *index = (struct memtx_tree_index *)base;
- struct tuple **res = memtx_tree_random(&index->tree, rnd);
- *result = res != NULL ? *res : NULL;
+ memtx_tree_elem_t *res = memtx_tree_random(&index->tree, rnd);
+ *result = res != NULL ? MEMTX_TREE_ELEM_GET(res) : NULL;
return 0;
}
@@ -490,26 +653,50 @@ memtx_tree_index_get(struct index *base, const char *key,
assert(base->def->opts.is_unique &&
part_count == base->def->key_def->part_count);
struct memtx_tree_index *index = (struct memtx_tree_index *)base;
- struct memtx_tree_key_data key_data;
- key_data.key = key;
- key_data.part_count = part_count;
- struct tuple **res = memtx_tree_find(&index->tree, &key_data);
- *result = res != NULL ? *res : NULL;
+ memtx_tree_key_t key_data;
+ struct key_def *cmp_def = memtx_tree_index_cmp_def(index);
+ MEMTX_TREE_KEY_SET(&key_data, key, part_count, cmp_def);
+ memtx_tree_elem_t *res = memtx_tree_find(&index->tree, &key_data);
+ *result = res != NULL ? MEMTX_TREE_ELEM_GET(res) : NULL;
return 0;
}
+static int
+memtx_tree_index_insert_tuple(struct index *base, struct tuple *tuple,
+ struct tuple **replaced)
+{
+ struct memtx_tree_index *index = (struct memtx_tree_index *)base;
+ memtx_tree_elem_t data;
+ MEMTX_TREE_ELEM_SET(&data, tuple, index->tree.arg);
+ memtx_tree_elem_t data_replaced;
+ memset(&data_replaced, 0, sizeof(data_replaced));
+ int rc = memtx_tree_insert(&index->tree, data, &data_replaced);
+ if (replaced != NULL)
+ *replaced = MEMTX_TREE_ELEM_GET(&data_replaced);
+ return rc;
+}
+
+static void
+memtx_tree_index_delete_tuple(struct index *base, struct tuple *tuple)
+{
+ struct memtx_tree_index *index = (struct memtx_tree_index *)base;
+ memtx_tree_elem_t data;
+ MEMTX_TREE_ELEM_SET(&data, tuple, index->tree.arg);
+ memtx_tree_delete(&index->tree, data);
+}
+
static int
memtx_tree_index_replace(struct index *base, struct tuple *old_tuple,
struct tuple *new_tuple, enum dup_replace_mode mode,
struct tuple **result)
{
- struct memtx_tree_index *index = (struct memtx_tree_index *)base;
if (new_tuple) {
struct tuple *dup_tuple = NULL;
/* Try to optimistically replace the new_tuple. */
- int tree_res = memtx_tree_insert(&index->tree,
- new_tuple, &dup_tuple);
+ int tree_res =
+ memtx_tree_index_insert_tuple(base, new_tuple,
+ &dup_tuple);
if (tree_res) {
diag_set(OutOfMemory, MEMTX_EXTENT_SIZE,
"memtx_tree_index", "replace");
@@ -519,9 +706,9 @@ memtx_tree_index_replace(struct index *base, struct tuple *old_tuple,
uint32_t errcode = replace_check_dup(old_tuple,
dup_tuple, mode);
if (errcode) {
- memtx_tree_delete(&index->tree, new_tuple);
+ memtx_tree_index_delete_tuple(base, new_tuple);
if (dup_tuple)
- memtx_tree_insert(&index->tree, dup_tuple, 0);
+ memtx_tree_index_insert_tuple(base, dup_tuple, 0);
struct space *sp = space_cache_find(base->def->space_id);
if (sp != NULL)
diag_set(ClientError, errcode, base->def->name,
@@ -533,9 +720,8 @@ memtx_tree_index_replace(struct index *base, struct tuple *old_tuple,
return 0;
}
}
- if (old_tuple) {
- memtx_tree_delete(&index->tree, old_tuple);
- }
+ if (old_tuple)
+ memtx_tree_index_delete_tuple(base, old_tuple);
*result = old_tuple;
return 0;
}
@@ -574,12 +760,12 @@ memtx_tree_index_create_iterator(struct index *base, enum iterator_type type,
it->base.next = tree_iterator_start;
it->base.free = tree_iterator_free;
it->type = type;
- it->key_data.key = key;
- it->key_data.part_count = part_count;
+ struct key_def *cmp_def = memtx_tree_index_cmp_def(index);
+ MEMTX_TREE_KEY_SET(&it->key_data, key, part_count, cmp_def);
it->index_def = base->def;
it->tree = &index->tree;
it->tree_iterator = memtx_tree_invalid_iterator();
- it->current_tuple = NULL;
+ memset(&it->current, 0, sizeof(it->current));
return (struct iterator *)it;
}
@@ -597,8 +783,8 @@ memtx_tree_index_reserve(struct index *base, uint32_t size_hint)
struct memtx_tree_index *index = (struct memtx_tree_index *)base;
if (size_hint < index->build_array_alloc_size)
return 0;
- struct tuple **tmp = (struct tuple **)realloc(index->build_array,
- size_hint * sizeof(*tmp));
+ memtx_tree_elem_t *tmp =
+ realloc(index->build_array, size_hint * sizeof(*tmp));
if (tmp == NULL) {
diag_set(OutOfMemory, size_hint * sizeof(*tmp),
"memtx_tree_index", "reserve");
@@ -614,20 +800,20 @@ memtx_tree_index_build_next(struct index *base, struct tuple *tuple)
{
struct memtx_tree_index *index = (struct memtx_tree_index *)base;
if (index->build_array == NULL) {
- index->build_array = (struct tuple **)malloc(MEMTX_EXTENT_SIZE);
+ index->build_array = (memtx_tree_elem_t *)malloc(MEMTX_EXTENT_SIZE);
if (index->build_array == NULL) {
diag_set(OutOfMemory, MEMTX_EXTENT_SIZE,
"memtx_tree_index", "build_next");
return -1;
}
index->build_array_alloc_size =
- MEMTX_EXTENT_SIZE / sizeof(struct tuple*);
+ MEMTX_EXTENT_SIZE / sizeof(index->build_array[0]);
}
assert(index->build_array_size <= index->build_array_alloc_size);
if (index->build_array_size == index->build_array_alloc_size) {
index->build_array_alloc_size = index->build_array_alloc_size +
index->build_array_alloc_size / 2;
- struct tuple **tmp = (struct tuple **)
+ memtx_tree_elem_t *tmp =
realloc(index->build_array,
index->build_array_alloc_size * sizeof(*tmp));
if (tmp == NULL) {
@@ -637,7 +823,9 @@ memtx_tree_index_build_next(struct index *base, struct tuple *tuple)
}
index->build_array = tmp;
}
- index->build_array[index->build_array_size++] = tuple;
+ memtx_tree_elem_t *elem =
+ &index->build_array[index->build_array_size++];
+ MEMTX_TREE_ELEM_SET(elem, tuple, memtx_tree_index_cmp_def(index));
return 0;
}
@@ -647,10 +835,9 @@ memtx_tree_index_end_build(struct index *base)
struct memtx_tree_index *index = (struct memtx_tree_index *)base;
struct key_def *cmp_def = memtx_tree_index_cmp_def(index);
qsort_arg(index->build_array, index->build_array_size,
- sizeof(struct tuple *),
- memtx_tree_qcompare, cmp_def);
+ sizeof(index->build_array[0]), memtx_tree_qcompare, cmp_def);
memtx_tree_build(&index->tree, index->build_array,
- index->build_array_size);
+ index->build_array_size);
free(index->build_array);
index->build_array = NULL;
@@ -681,12 +868,12 @@ tree_snapshot_iterator_next(struct snapshot_iterator *iterator, uint32_t *size)
assert(iterator->free == tree_snapshot_iterator_free);
struct tree_snapshot_iterator *it =
(struct tree_snapshot_iterator *)iterator;
- struct tuple **res = memtx_tree_iterator_get_elem(it->tree,
- &it->tree_iterator);
+ memtx_tree_elem_t *res =
+ memtx_tree_iterator_get_elem(it->tree, &it->tree_iterator);
if (res == NULL)
return NULL;
memtx_tree_iterator_next(it->tree, &it->tree_iterator);
- return tuple_data_range(*res, size);
+ return tuple_data_range(MEMTX_TREE_ELEM_GET(res), size);
}
/**
@@ -770,3 +957,70 @@ memtx_tree_index_new(struct memtx_engine *memtx, struct index_def *def)
memtx_index_extent_free, memtx);
return &index->base;
}
+
+#undef _tree_api_name
+#undef memtx_tree
+#undef memtx_tree_iterator
+#undef memtx_tree_iterator_is_invalid
+#undef memtx_tree_iterator_first
+#undef memtx_tree_iterator_last
+#undef memtx_tree_iterator_get_elem
+#undef memtx_tree_iterator_next
+#undef memtx_tree_iterator_prev
+#undef memtx_tree_iterator_freeze
+#undef memtx_tree_iterator_destroy
+#undef memtx_tree_create
+#undef memtx_tree_build
+#undef memtx_tree_destroy
+#undef memtx_tree_find
+#undef memtx_tree_insert
+#undef memtx_tree_delete
+#undef memtx_tree_size
+#undef memtx_tree_mem_used
+#undef memtx_tree_random
+#undef memtx_tree_invalid_iterator
+#undef memtx_tree_lower_bound
+#undef memtx_tree_upper_bound
+#undef memtx_tree_lower_bound_elem
+#undef memtx_tree_upper_bound_elem
+
+#undef _api_name
+#undef memtx_tree_index
+#undef memtx_tree_qcompare
+#undef tree_iterator
+#undef tree_iterator_free
+#undef tree_iterator_cast
+#undef tree_iterator_dummie
+#undef tree_iterator_next
+#undef tree_iterator_prev
+#undef tree_iterator_next_equal
+#undef tree_iterator_prev_equal
+#undef tree_iterator_set_next_method
+#undef tree_iterator_start
+#undef memtx_tree_index_cmp_def
+#undef memtx_tree_index_free
+#undef memtx_tree_index_gc_run
+#undef memtx_tree_index_gc_free
+#undef memtx_tree_index_gc_vtab
+#undef memtx_tree_index_destroy
+#undef memtx_tree_index_update_def
+#undef memtx_tree_index_depends_on_pk
+#undef memtx_tree_index_size
+#undef memtx_tree_index_bsize
+#undef memtx_tree_index_random
+#undef memtx_tree_index_count
+#undef memtx_tree_index_get
+#undef memtx_tree_index_insert_tuple
+#undef memtx_tree_index_delete_tuple
+#undef memtx_tree_index_replace
+#undef memtx_tree_index_create_iterator
+#undef memtx_tree_index_begin_build
+#undef memtx_tree_index_build_next
+#undef memtx_tree_index_reserve
+#undef memtx_tree_index_end_build
+#undef tree_snapshot_iterator
+#undef tree_snapshot_iterator_free
+#undef tree_snapshot_iterator_next
+#undef tree_index_create_snapshot_iterator
+#undef memtx_tree_index_vtab
+#undef memtx_tree_index_new
diff --git a/src/box/memtx_tree_decl.c b/src/box/memtx_tree_decl.c
new file mode 100644
index 000000000..dd7684d8e
--- /dev/null
+++ b/src/box/memtx_tree_decl.c
@@ -0,0 +1,84 @@
+/*
+ * Copyright 2010-2019, Tarantool AUTHORS, please see AUTHORS file.
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the
+ * following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY <COPYRIGHT HOLDER> ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
+ * <COPYRIGHT HOLDER> OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
+ * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+#include <stdbool.h>
+#include <stdint.h>
+#include "tuple_compare.h"
+#include "memtx_tree.h"
+
+/* {{{ Memtx tree of tuples class. ******************************/
+
+/** Struct that is used as a key in BPS tree definition. */
+struct memtx_basic_tree_key_data {
+ /** Sequence of msgpacked search fields. */
+ const char *key;
+ /** Number of msgpacked search fields. */
+ uint32_t part_count;
+};
+
+#define MEMTX_TREE_NAME memtx_basic_tree
+#define memtx_tree_elem_t struct tuple *
+#define memtx_tree_key_t struct memtx_basic_tree_key_data
+#define MEMTX_TREE_COMPARE(elem_a_ptr, elem_b_ptr, key_def) \
+ tuple_compare(*elem_a_ptr, *elem_b_ptr, key_def)
+#define MEMTX_TREE_COMPARE_KEY(elem_ptr, key_ptr, key_def) \
+ tuple_compare_with_key(*elem_ptr, (key_ptr)->key, \
+ (key_ptr)->part_count, key_def)
+#define MEMTX_TREE_ELEM_SET(elem_ptr, tuple, key_def) \
+ ({(void)key_def; *elem_ptr = tuple;})
+#define MEMTX_TREE_KEY_SET(key_ptr, key_val, part_count_val, key_def) \
+ ({(void)key_def; (key_ptr)->key = key_val; \
+ (key_ptr)->part_count = part_count_val;})
+#define MEMTX_TREE_ELEM_GET(elem_ptr) (*(elem_ptr))
+#define MEMTX_TREE_KEY_GET(key_ptr, part_count_ptr) \
+ ({*part_count_ptr = (key_ptr)->part_count; (key_ptr)->key;})
+#define MEMTX_TREE_IDENTICAL(elem_a_ptr, elem_b_ptr) \
+ ({*elem_a_ptr == *elem_b_ptr;})
+
+#include "memtx_tree.c"
+
+#undef memtx_tree_key_t
+#undef memtx_tree_elem_t
+#undef MEMTX_TREE_KEY_GET
+#undef MEMTX_TREE_ELEM_GET
+#undef MEMTX_TREE_KEY_SET
+#undef MEMTX_TREE_ELEM_SET
+#undef MEMTX_TREE_COMPARE_KEY
+#undef MEMTX_TREE_COMPARE
+#undef MEMTX_TREE_NAME
+#undef MEMTX_TREE_IDENTICAL
+
+/* }}} */
+
+struct index *
+memtx_tree_index_new(struct memtx_engine *memtx, struct index_def *def)
+{
+ return memtx_basic_tree_index_new(memtx, def);
+}
--
2.20.1
^ permalink raw reply [flat|nested] 27+ messages in thread
* Re: [PATCH v3 5/7] memtx: rework memtx_tree to store arbitrary nodes
2019-02-22 15:42 ` [PATCH v3 5/7] memtx: rework memtx_tree to store arbitrary nodes Kirill Shcherbatov
@ 2019-02-25 16:57 ` Vladimir Davydov
2019-02-26 12:10 ` [tarantool-patches] " Kirill Shcherbatov
0 siblings, 1 reply; 27+ messages in thread
From: Vladimir Davydov @ 2019-02-25 16:57 UTC (permalink / raw)
To: Kirill Shcherbatov; +Cc: tarantool-patches
On Fri, Feb 22, 2019 at 06:42:30PM +0300, Kirill Shcherbatov wrote:
> diff --git a/src/box/CMakeLists.txt b/src/box/CMakeLists.txt
> index 5521e489e..dc8cc2cd5 100644
> --- a/src/box/CMakeLists.txt
> +++ b/src/box/CMakeLists.txt
> @@ -64,7 +64,7 @@ add_library(box STATIC
> index_def.c
> iterator_type.c
> memtx_hash.c
> - memtx_tree.c
> + memtx_tree_decl.c
> memtx_rtree.c
> memtx_bitset.c
> engine.c
> diff --git a/src/box/memtx_tree.c b/src/box/memtx_tree.c
> index 07d20473f..c48f96bf6 100644
> --- a/src/box/memtx_tree.c
> +++ b/src/box/memtx_tree.c
> @@ -152,25 +303,27 @@ tree_iterator_dummie(struct iterator *iterator, struct tuple **ret)
> static int
> tree_iterator_next(struct iterator *iterator, struct tuple **ret)
> {
> - struct tuple **res;
> - struct tree_iterator *it = tree_iterator(iterator);
> - assert(it->current_tuple != NULL);
> - struct tuple **check = memtx_tree_iterator_get_elem(it->tree, &it->tree_iterator);
> - if (check == NULL || *check != it->current_tuple)
> + struct tree_iterator *it = tree_iterator_cast(iterator);
> + assert(MEMTX_TREE_ELEM_GET(&it->current) != NULL);
> + memtx_tree_elem_t *check =
> + memtx_tree_iterator_get_elem(it->tree, &it->tree_iterator);
> + if (check == NULL || !MEMTX_TREE_IDENTICAL(check, &it->current)) {
> it->tree_iterator =
> - memtx_tree_upper_bound_elem(it->tree, it->current_tuple,
> - NULL);
> - else
> + memtx_tree_upper_bound_elem(it->tree, it->current, NULL);
> + } else {
> memtx_tree_iterator_next(it->tree, &it->tree_iterator);
> - tuple_unref(it->current_tuple);
> - it->current_tuple = NULL;
> - res = memtx_tree_iterator_get_elem(it->tree, &it->tree_iterator);
> + }
> + tuple_unref(MEMTX_TREE_ELEM_GET(&it->current));
> + memtx_tree_elem_t *res =
> + memtx_tree_iterator_get_elem(it->tree, &it->tree_iterator);
> if (res == NULL) {
> iterator->next = tree_iterator_dummie;
> + memset(&it->current, 0, sizeof(it->current));
This looks like encapsulation violation. Please either use
MEMTX_TREE_ELEM_SET for this or introduce MEMTX_TREE_ELEM_CLEAR
helper.
> @@ -490,26 +653,50 @@ memtx_tree_index_get(struct index *base, const char *key,
> assert(base->def->opts.is_unique &&
> part_count == base->def->key_def->part_count);
> struct memtx_tree_index *index = (struct memtx_tree_index *)base;
> - struct memtx_tree_key_data key_data;
> - key_data.key = key;
> - key_data.part_count = part_count;
> - struct tuple **res = memtx_tree_find(&index->tree, &key_data);
> - *result = res != NULL ? *res : NULL;
> + memtx_tree_key_t key_data;
> + struct key_def *cmp_def = memtx_tree_index_cmp_def(index);
> + MEMTX_TREE_KEY_SET(&key_data, key, part_count, cmp_def);
> + memtx_tree_elem_t *res = memtx_tree_find(&index->tree, &key_data);
> + *result = res != NULL ? MEMTX_TREE_ELEM_GET(res) : NULL;
> return 0;
> }
>
> +static int
> +memtx_tree_index_insert_tuple(struct index *base, struct tuple *tuple,
> + struct tuple **replaced)
> +{
> + struct memtx_tree_index *index = (struct memtx_tree_index *)base;
> + memtx_tree_elem_t data;
> + MEMTX_TREE_ELEM_SET(&data, tuple, index->tree.arg);
How is this going to work for multikey indexes?
> + memtx_tree_elem_t data_replaced;
> + memset(&data_replaced, 0, sizeof(data_replaced));
> + int rc = memtx_tree_insert(&index->tree, data, &data_replaced);
> + if (replaced != NULL)
> + *replaced = MEMTX_TREE_ELEM_GET(&data_replaced);
> + return rc;
> +}
> +
> +static void
> +memtx_tree_index_delete_tuple(struct index *base, struct tuple *tuple)
> +{
> + struct memtx_tree_index *index = (struct memtx_tree_index *)base;
> + memtx_tree_elem_t data;
> + MEMTX_TREE_ELEM_SET(&data, tuple, index->tree.arg);
> + memtx_tree_delete(&index->tree, data);
> +}
> +
> static int
> memtx_tree_index_replace(struct index *base, struct tuple *old_tuple,
> struct tuple *new_tuple, enum dup_replace_mode mode,
> struct tuple **result)
> {
> - struct memtx_tree_index *index = (struct memtx_tree_index *)base;
> if (new_tuple) {
> struct tuple *dup_tuple = NULL;
>
> /* Try to optimistically replace the new_tuple. */
> - int tree_res = memtx_tree_insert(&index->tree,
> - new_tuple, &dup_tuple);
> + int tree_res =
> + memtx_tree_index_insert_tuple(base, new_tuple,
> + &dup_tuple);
> if (tree_res) {
> diag_set(OutOfMemory, MEMTX_EXTENT_SIZE,
> "memtx_tree_index", "replace");
> @@ -519,9 +706,9 @@ memtx_tree_index_replace(struct index *base, struct tuple *old_tuple,
> uint32_t errcode = replace_check_dup(old_tuple,
> dup_tuple, mode);
> if (errcode) {
> - memtx_tree_delete(&index->tree, new_tuple);
> + memtx_tree_index_delete_tuple(base, new_tuple);
> if (dup_tuple)
> - memtx_tree_insert(&index->tree, dup_tuple, 0);
> + memtx_tree_index_insert_tuple(base, dup_tuple, 0);
> struct space *sp = space_cache_find(base->def->space_id);
> if (sp != NULL)
> diag_set(ClientError, errcode, base->def->name,
> @@ -533,9 +720,8 @@ memtx_tree_index_replace(struct index *base, struct tuple *old_tuple,
> return 0;
> }
> }
> - if (old_tuple) {
> - memtx_tree_delete(&index->tree, old_tuple);
> - }
> + if (old_tuple)
> + memtx_tree_index_delete_tuple(base, old_tuple);
> *result = old_tuple;
> return 0;
> }
^ permalink raw reply [flat|nested] 27+ messages in thread
* Re: [tarantool-patches] Re: [PATCH v3 5/7] memtx: rework memtx_tree to store arbitrary nodes
2019-02-25 16:57 ` Vladimir Davydov
@ 2019-02-26 12:10 ` Kirill Shcherbatov
0 siblings, 0 replies; 27+ messages in thread
From: Kirill Shcherbatov @ 2019-02-26 12:10 UTC (permalink / raw)
To: tarantool-patches, Vladimir Davydov
> This looks like encapsulation violation. Please either use
> MEMTX_TREE_ELEM_SET for this or introduce MEMTX_TREE_ELEM_CLEAR
> helper.
If we use MEMTX_TREE_ELEM_SET for this purpose, in case of hints we need this
additional check
- (elem_ptr)->hint = tuple_hint(info, key_def); \
+ (elem_ptr)->hint = info != NULL ? tuple_hint(info, key_def) : 0; \
So, let's introduce MEMTX_TREE_ELEM_CLEAR.
> How is this going to work for multikey indexes?
You may show kshch/gh-1257-multikey-index branch - there is a multikey indexes
prototype over patches that we are discuss.
^ permalink raw reply [flat|nested] 27+ messages in thread
* [PATCH v3 6/7] memtx: rename memtx_tree.c to memtx_tree_impl.h
2019-02-22 15:42 [PATCH v3 0/7] box: introduce hint option for memtx tree index Kirill Shcherbatov
` (4 preceding siblings ...)
2019-02-22 15:42 ` [PATCH v3 5/7] memtx: rework memtx_tree to store arbitrary nodes Kirill Shcherbatov
@ 2019-02-22 15:42 ` Kirill Shcherbatov
2019-02-22 15:42 ` [PATCH v3 7/7] memtx: introduce tuple compare hint Kirill Shcherbatov
6 siblings, 0 replies; 27+ messages in thread
From: Kirill Shcherbatov @ 2019-02-22 15:42 UTC (permalink / raw)
To: tarantool-patches, vdavydov.dev; +Cc: Kirill Shcherbatov
File meta_tree.c that implements the template class memtx_tree,
renamed to memes_tree_impl.h due to the fact that the inclusion
of code in header files is more common.
Needed for #3961
---
src/box/memtx_tree_decl.c | 2 +-
src/box/{memtx_tree.c => memtx_tree_impl.h} | 0
2 files changed, 1 insertion(+), 1 deletion(-)
rename src/box/{memtx_tree.c => memtx_tree_impl.h} (100%)
diff --git a/src/box/memtx_tree_decl.c b/src/box/memtx_tree_decl.c
index dd7684d8e..82209eaab 100644
--- a/src/box/memtx_tree_decl.c
+++ b/src/box/memtx_tree_decl.c
@@ -62,7 +62,7 @@ struct memtx_basic_tree_key_data {
#define MEMTX_TREE_IDENTICAL(elem_a_ptr, elem_b_ptr) \
({*elem_a_ptr == *elem_b_ptr;})
-#include "memtx_tree.c"
+#include "memtx_tree_impl.h"
#undef memtx_tree_key_t
#undef memtx_tree_elem_t
diff --git a/src/box/memtx_tree.c b/src/box/memtx_tree_impl.h
similarity index 100%
rename from src/box/memtx_tree.c
rename to src/box/memtx_tree_impl.h
--
2.20.1
^ permalink raw reply [flat|nested] 27+ messages in thread
* [PATCH v3 7/7] memtx: introduce tuple compare hint
2019-02-22 15:42 [PATCH v3 0/7] box: introduce hint option for memtx tree index Kirill Shcherbatov
` (5 preceding siblings ...)
2019-02-22 15:42 ` [PATCH v3 6/7] memtx: rename memtx_tree.c to memtx_tree_impl.h Kirill Shcherbatov
@ 2019-02-22 15:42 ` Kirill Shcherbatov
2019-02-25 17:44 ` Vladimir Davydov
6 siblings, 1 reply; 27+ messages in thread
From: Kirill Shcherbatov @ 2019-02-22 15:42 UTC (permalink / raw)
To: tarantool-patches, vdavydov.dev; +Cc: Alexandr Lyapunov
From: Alexandr Lyapunov <aleks@tarantool.org>
Implement functions for retrieving tuple hints for a particular
key_def. Hint is an integer that can be used for tuple comparison
optimization: if a hint of one tuple is less than a hint of another
then the first tuple is definitely less than the second; only if
hints are equal tuple_compare must be called for getting comparison
result.
Hints are calculated using only the first part of key_def.
Hints are only useful when:
* they are precalculated and stored along with the tuple;
calculation is not cheap (cheaper than tuple_compare btw) but
precalculated hints allow to compare tuples without even fetching
tuple data.
* first part of key_def is 'string'(for now)
* since hint is calculated using only the first part of key_def
(and only first several characters if it is a string) this part
must be different with high probability for every tuple pair.
Closes #3961
---
src/box/CMakeLists.txt | 1 +
src/box/key_def.c | 2 +
src/box/key_def.h | 44 ++++++++
src/box/memtx_tree_decl.c | 89 ++++++++++++++++
src/box/tuple_hint.cc | 210 ++++++++++++++++++++++++++++++++++++++
src/box/tuple_hint.h | 51 +++++++++
src/coll.c | 33 ++++++
src/coll.h | 4 +
8 files changed, 434 insertions(+)
create mode 100644 src/box/tuple_hint.cc
create mode 100644 src/box/tuple_hint.h
diff --git a/src/box/CMakeLists.txt b/src/box/CMakeLists.txt
index dc8cc2cd5..05a4f40f6 100644
--- a/src/box/CMakeLists.txt
+++ b/src/box/CMakeLists.txt
@@ -38,6 +38,7 @@ add_library(tuple STATIC
tuple_format.c
tuple_update.c
tuple_compare.cc
+ tuple_hint.cc
tuple_extract_key.cc
tuple_hash.cc
tuple_bloom.c
diff --git a/src/box/key_def.c b/src/box/key_def.c
index 432b72a97..ced5b0580 100644
--- a/src/box/key_def.c
+++ b/src/box/key_def.c
@@ -34,6 +34,7 @@
#include "tuple_compare.h"
#include "tuple_extract_key.h"
#include "tuple_hash.h"
+#include "tuple_hint.h"
#include "column_mask.h"
#include "schema_def.h"
#include "coll_id_cache.h"
@@ -135,6 +136,7 @@ key_def_set_func(struct key_def *def)
key_def_set_compare_func(def);
key_def_set_hash_func(def);
key_def_set_extract_func(def);
+ key_def_set_hint_func(def);
}
static void
diff --git a/src/box/key_def.h b/src/box/key_def.h
index dd62f6a35..00775368f 100644
--- a/src/box/key_def.h
+++ b/src/box/key_def.h
@@ -153,6 +153,13 @@ typedef uint32_t (*tuple_hash_t)(const struct tuple *tuple,
typedef uint32_t (*key_hash_t)(const char *key,
struct key_def *key_def);
+/** @copydoc tuple_hint() */
+typedef uint64_t (*tuple_hint_t)(const struct tuple *tuple,
+ struct key_def *key_def);
+
+/** @copydoc key_hint() */
+typedef uint64_t (*key_hint_t)(const char *key, struct key_def *key_def);
+
/* Definition of a multipart key. */
struct key_def {
/** @see tuple_compare() */
@@ -167,6 +174,10 @@ struct key_def {
tuple_hash_t tuple_hash;
/** @see key_hash() */
key_hash_t key_hash;
+ /** @see tuple_hint() */
+ tuple_hint_t tuple_hint;
+ /** @see key_hint() */
+ key_hint_t key_hint;
/**
* Minimal part count which always is unique. For example,
* if a secondary index is unique, then
@@ -624,6 +635,39 @@ key_hash(const char *key, struct key_def *key_def)
return key_def->key_hash(key, key_def);
}
+ /*
+ * Get a comparison hint of a tuple.
+ * Hint is such a function h(tuple) in terms of particular key_def that
+ * has follows rules:
+ * if h(t1) < h(t2) then t1 < t2;
+ * if h(t1) > h(t2) then t1 > t2;
+ * if t1 == t2 then h(t1) == h(t2);
+ * These rules means that instead of direct tuple vs tuple (or tuple vs key)
+ * comparison one may compare theirs hints first; and only if theirs hints
+ * are equal compare the tuples themselves.
+ * @param tuple - tuple to get hint of.
+ * @param key_def - key_def that defines which comparison is used.
+ * @return the hint.
+ */
+static inline uint64_t
+tuple_hint(const struct tuple *tuple, struct key_def *key_def)
+{
+ return key_def->tuple_hint(tuple, key_def);
+}
+
+/**
+ * Get a comparison hint of a key.
+ * @See tuple_hint for hint term definition.
+ * @param key - key to get hint of.
+ * @param key_def - key_def that defines which comparison is used.
+ * @return the hint.
+ */
+static inline uint64_t
+key_hint(const char *key, struct key_def *key_def)
+{
+ return key_def->key_hint(key, key_def);
+}
+
#if defined(__cplusplus)
} /* extern "C" */
#endif /* defined(__cplusplus) */
diff --git a/src/box/memtx_tree_decl.c b/src/box/memtx_tree_decl.c
index 82209eaab..a28fdd9fd 100644
--- a/src/box/memtx_tree_decl.c
+++ b/src/box/memtx_tree_decl.c
@@ -77,8 +77,97 @@ struct memtx_basic_tree_key_data {
/* }}} */
+/* {{{ Memtx hinted tree class. *********************************/
+
+/**
+ * Struct that is used as a key in BPS tree definition in
+ * memtx_hint_only_tree and memtx_hinted_tree.
+*/
+struct memtx_hinted_tree_key_data {
+ /** Sequence of msgpacked search fields. */
+ const char *key;
+ /** Number of msgpacked search fields. */
+ uint32_t part_count;
+ /**
+ * Comparison hint. Calculated automatically on 'set'
+ * operation with MEMTX_TREE_KEY_SET().
+ */
+ uint64_t hint;
+};
+
+/**
+ * Struct that is used as a key in BPS tree definition in
+ * memtx_hint_only_tree and memtx_hinted_tree.
+ */
+struct memtx_hinted_tree_data {
+ /** Tuple this node is represents. */
+ struct tuple *tuple;
+ /**
+ * Comparison hint. Calculated automatically on 'set'
+ * operation with MEMTX_TREE_ELEM_SET().
+ */
+ uint64_t hint;
+};
+
+#define MEMTX_TREE_NAME memtx_hinted_tree
+#define memtx_tree_elem_t struct memtx_hinted_tree_data
+#define memtx_tree_key_t struct memtx_hinted_tree_key_data
+#define MEMTX_TREE_COMPARE(elem_a_ptr, elem_b_ptr, key_def) ({ \
+ int rc; \
+ if ((elem_a_ptr)->hint != (elem_b_ptr)->hint) { \
+ rc = (elem_a_ptr)->hint < (elem_b_ptr)->hint ? -1 : 1; \
+ } else { \
+ rc = tuple_compare((elem_a_ptr)->tuple, (elem_b_ptr)->tuple, \
+ key_def); \
+ } \
+ rc; \
+})
+#define MEMTX_TREE_COMPARE_KEY(elem_ptr, key_ptr, key_def) ({ \
+ int rc; \
+ if ((elem_ptr)->hint != (key_ptr)->hint) { \
+ rc = (elem_ptr)->hint < (key_ptr)->hint ? -1 : 1; \
+ } else { \
+ rc = tuple_compare_with_key((elem_ptr)->tuple, (key_ptr)->key, \
+ (key_ptr)->part_count, key_def); \
+ } \
+ rc; \
+})
+#define MEMTX_TREE_ELEM_SET(elem_ptr, info, key_def) ({ \
+ (elem_ptr)->tuple = info; \
+ (elem_ptr)->hint = tuple_hint(info, key_def); \
+})
+#define MEMTX_TREE_KEY_SET(key_ptr, key_val, part_count_val, key_def) ({ \
+ (key_ptr)->key = key_val; \
+ (key_ptr)->part_count = part_count_val; \
+ (key_ptr)->hint = part_count_val > 0 ? key_hint(key_val, key_def) : 0; \
+})
+#define MEMTX_TREE_ELEM_GET(elem_ptr) ((elem_ptr)->tuple)
+#define MEMTX_TREE_KEY_GET(key_ptr, part_count_ptr) \
+ ({*part_count_ptr = (key_ptr)->part_count; (key_ptr)->key;})
+#define MEMTX_TREE_IDENTICAL(elem_a_ptr, elem_b_ptr) \
+ ({(elem_a_ptr)->tuple == (elem_b_ptr)->tuple;})
+
+#include "memtx_tree_impl.h"
+
+#undef memtx_tree_key_t
+#undef memtx_tree_elem_t
+#undef MEMTX_TREE_KEY_GET
+#undef MEMTX_TREE_ELEM_GET
+#undef MEMTX_TREE_KEY_SET
+#undef MEMTX_TREE_ELEM_SET
+#undef MEMTX_TREE_COMPARE_KEY
+#undef MEMTX_TREE_COMPARE
+#undef MEMTX_TREE_NAME
+#undef MEMTX_TREE_IDENTICAL
+
+/* }}} */
+
struct index *
memtx_tree_index_new(struct memtx_engine *memtx, struct index_def *def)
{
+ if (def->cmp_def->parts->type == FIELD_TYPE_STRING ||
+ def->cmp_def->parts->type == FIELD_TYPE_UNSIGNED ||
+ def->cmp_def->parts->type == FIELD_TYPE_INTEGER)
+ return memtx_hinted_tree_index_new(memtx, def);
return memtx_basic_tree_index_new(memtx, def);
}
diff --git a/src/box/tuple_hint.cc b/src/box/tuple_hint.cc
new file mode 100644
index 000000000..7b73403be
--- /dev/null
+++ b/src/box/tuple_hint.cc
@@ -0,0 +1,210 @@
+/*
+ * Copyright 2010-2019, Tarantool AUTHORS, please see AUTHORS file.
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the
+ * following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY <COPYRIGHT HOLDER> ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
+ * <COPYRIGHT HOLDER> OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
+ * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+#include "coll.h"
+#include "tuple.h"
+#include "tuple_hint.h"
+
+static uint64_t
+key_hint_default(const char *key, struct key_def *key_def)
+{
+ (void)key;
+ (void)key_def;
+ return 0;
+}
+
+static uint64_t
+tuple_hint_default(const struct tuple *tuple, struct key_def *key_def)
+{
+ (void)tuple;
+ (void)key_def;
+ return 0;
+}
+
+template<bool is_nullable>
+static uint64_t
+key_hint_uint(const char *key, struct key_def *key_def)
+{
+ (void)key_def;
+ assert(key_part_is_nullable(key_def->parts) == is_nullable);
+ assert(key_def->parts->type == FIELD_TYPE_UNSIGNED ||
+ key_def->parts->type == FIELD_TYPE_INTEGER);
+ if (is_nullable && mp_typeof(*key) == MP_NIL)
+ return 0;
+ assert(mp_typeof(*key) == MP_UINT);
+ uint64_t val = mp_decode_uint(&key);
+ if (val > INT64_MAX)
+ return INT64_MAX;
+ return val - (uint64_t)INT64_MIN;
+}
+
+template<bool is_nullable>
+static uint64_t
+tuple_hint_uint(const struct tuple *tuple, struct key_def *key_def)
+{
+ assert(key_part_is_nullable(key_def->parts) == is_nullable);
+ assert(key_def->parts->type == FIELD_TYPE_UNSIGNED);
+ const char *field = tuple_field_by_part(tuple, key_def->parts);
+ if (is_nullable && field == NULL)
+ return 0;
+ return key_hint_uint<is_nullable>(field, key_def);
+}
+
+template<bool is_nullable>
+static uint64_t
+key_hint_int(const char *key, struct key_def *key_def)
+{
+ assert(key_part_is_nullable(key_def->parts) == is_nullable);
+ assert(key_def->parts->type == FIELD_TYPE_INTEGER);
+ if (is_nullable && mp_typeof(*key) == MP_NIL)
+ return 0;
+ if (mp_typeof(*key) == MP_UINT) {
+ return key_hint_uint<is_nullable>(key, key_def);
+ } else {
+ assert(mp_typeof(*key) == MP_INT);
+ int64_t val = mp_decode_int(&key);
+ return (uint64_t)val - (uint64_t)INT64_MIN;
+ }
+}
+
+template<bool is_nullable>
+static uint64_t
+tuple_hint_int(const struct tuple *tuple, struct key_def *key_def)
+{
+ assert(key_part_is_nullable(key_def->parts) == is_nullable);
+ assert(key_def->parts->type == FIELD_TYPE_INTEGER);
+ const char *field = tuple_field_by_part(tuple, key_def->parts);
+ if (is_nullable && field == NULL)
+ return 0;
+ return key_hint_int<is_nullable>(field, key_def);
+}
+
+template<bool is_nullable>
+static uint64_t
+key_hint_string(const char *key, struct key_def *key_def)
+{
+ (void)key_def;
+ assert(key_part_is_nullable(key_def->parts) == is_nullable);
+ assert(key_def->parts->type == FIELD_TYPE_STRING);
+ if (is_nullable && mp_typeof(*key) == MP_NIL)
+ return 0;
+ assert(key_def->parts->type == FIELD_TYPE_STRING);
+ assert(mp_typeof(*key) == MP_STR);
+ uint32_t len;
+ const unsigned char *str =
+ (const unsigned char *)mp_decode_str(&key, &len);
+ uint64_t result = 0;
+ uint32_t process_len = MIN(len, 8);
+ for (uint32_t i = 0; i < process_len; i++) {
+ result <<= 8;
+ result |= str[i];
+ }
+ result <<= 8 * (8 - process_len);
+ return result;
+}
+
+template<bool is_nullable>
+static uint64_t
+tuple_hint_string(const struct tuple *tuple, struct key_def *key_def)
+{
+ assert(key_part_is_nullable(key_def->parts) == is_nullable);
+ assert(key_def->parts->type == FIELD_TYPE_STRING);
+ const char *field = tuple_field_by_part(tuple, key_def->parts);
+ if (is_nullable && field == NULL)
+ return 0;
+ return key_hint_string<is_nullable>(field, key_def);
+}
+
+template<bool is_nullable>
+static uint64_t
+key_hint_string_coll(const char *key, struct key_def *key_def)
+{
+ assert(key_part_is_nullable(key_def->parts) == is_nullable);
+ assert(key_def->parts->type == FIELD_TYPE_STRING &&
+ key_def->parts->coll != NULL);
+ if (is_nullable && mp_typeof(*key) == MP_NIL)
+ return 0;
+ assert(key_def->parts->type == FIELD_TYPE_STRING);
+ assert(mp_typeof(*key) == MP_STR);
+ assert(key_def->parts->coll != NULL);
+ uint32_t len;
+ const char *str = mp_decode_str(&key, &len);
+ return key_def->parts->coll->hint(str, len, key_def->parts->coll);
+}
+
+template<bool is_nullable>
+static uint64_t
+tuple_hint_string_coll(const struct tuple *tuple, struct key_def *key_def)
+{
+ assert(key_part_is_nullable(key_def->parts) == is_nullable);
+ assert(key_def->parts->type == FIELD_TYPE_STRING &&
+ key_def->parts->coll != NULL);
+ const char *field = tuple_field_by_part(tuple, key_def->parts);
+ if (is_nullable && field == NULL)
+ return 0;
+ return key_hint_string_coll<is_nullable>(field, key_def);
+}
+
+void
+key_def_set_hint_func(struct key_def *def)
+{
+ def->key_hint = key_hint_default;
+ def->tuple_hint = tuple_hint_default;
+ bool is_nullable = key_part_is_nullable(def->parts);
+ if (def->parts->type == FIELD_TYPE_STRING && def->parts->coll != NULL) {
+ def->key_hint = is_nullable ? key_hint_string_coll<true> :
+ key_hint_string_coll<false>;
+ def->tuple_hint = is_nullable ? tuple_hint_string_coll<true> :
+ tuple_hint_string_coll<false>;
+ return;
+ }
+ switch (def->parts->type) {
+ case FIELD_TYPE_UNSIGNED:
+ def->key_hint = is_nullable ? key_hint_uint<true> :
+ key_hint_uint<false>;
+ def->tuple_hint = is_nullable ? tuple_hint_uint<true> :
+ tuple_hint_uint<false>;
+ break;
+ case FIELD_TYPE_INTEGER:
+ def->key_hint = is_nullable ? key_hint_int<true> :
+ key_hint_int<false>;
+ def->tuple_hint = is_nullable ? tuple_hint_int<true> :
+ tuple_hint_int<false>;
+ break;
+ case FIELD_TYPE_STRING:
+ def->key_hint = is_nullable ? key_hint_string<true> :
+ key_hint_string<false>;
+ def->tuple_hint = is_nullable ? tuple_hint_string<true> :
+ tuple_hint_string<false>;
+ break;
+ default:
+ break;
+ };
+}
diff --git a/src/box/tuple_hint.h b/src/box/tuple_hint.h
new file mode 100644
index 000000000..d38a96c8f
--- /dev/null
+++ b/src/box/tuple_hint.h
@@ -0,0 +1,51 @@
+#ifndef TARANTOOL_BOX_TUPLE_HINT_H_INCLUDED
+#define TARANTOOL_BOX_TUPLE_HINT_H_INCLUDED
+/*
+ * Copyright 2010-2019, Tarantool AUTHORS, please see AUTHORS file.
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the
+ * following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY <COPYRIGHT HOLDER> ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
+ * <COPYRIGHT HOLDER> OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
+ * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+#if defined(__cplusplus)
+extern "C" {
+#endif /* defined(__cplusplus) */
+
+struct key_def;
+
+/**
+ * Initialize tuple_hint() and key_hint() functions for the
+ * key_def.
+ * @param key_def key definition to set up.
+ */
+void
+key_def_set_hint_func(struct key_def *key_def);
+
+#if defined(__cplusplus)
+} /* extern "C" */
+#endif /* defined(__cplusplus) */
+
+#endif /* TARANTOOL_BOX_TUPLE_HINT_H_INCLUDED */
diff --git a/src/coll.c b/src/coll.c
index 6d9c44dbf..bf892087d 100644
--- a/src/coll.c
+++ b/src/coll.c
@@ -133,6 +133,37 @@ coll_bin_hash(const char *s, size_t s_len, uint32_t *ph, uint32_t *pcarry,
return s_len;
}
+/** Get a compare hint of a binary collation. */
+static uint64_t
+coll_bin_hint(const char *s, size_t s_len, struct coll *coll)
+{
+ (void)coll;
+ uint64_t result = 0;
+ uint32_t process_len = MIN(s_len, 8);
+ for (uint32_t i = 0; i < process_len; i++) {
+ result <<= 8;
+ result |= ((unsigned char *)s)[i];
+ }
+ result <<= 8 * (8 - process_len);
+ return result;
+}
+
+/** Get a compare hint of a string using ICU collation. */
+static uint64_t
+coll_icu_hint(const char *s, size_t s_len, struct coll *coll)
+{
+ assert(coll->type == COLL_TYPE_ICU);
+ UCharIterator itr;
+ uiter_setUTF8(&itr, s, s_len);
+ uint8_t buf[7];
+ uint32_t state[2] = {0, 0};
+ UErrorCode status = U_ZERO_ERROR;
+ int32_t got = ucol_nextSortKeyPart(coll->collator, &itr, state, buf,
+ sizeof(buf), &status);
+ assert(got >= 0 && got <= 7);
+ return coll_bin_hint((const char *)buf, got, NULL);
+}
+
/**
* Set up ICU collator and init cmp and hash members of collation.
* @param coll Collation to set up.
@@ -242,6 +273,7 @@ coll_icu_init_cmp(struct coll *coll, const struct coll_def *def)
}
coll->cmp = coll_icu_cmp;
coll->hash = coll_icu_hash;
+ coll->hint = coll_icu_hint;
return 0;
}
@@ -332,6 +364,7 @@ coll_new(const struct coll_def *def)
coll->collator = NULL;
coll->cmp = coll_bin_cmp;
coll->hash = coll_bin_hash;
+ coll->hint = coll_bin_hint;
break;
default:
unreachable();
diff --git a/src/coll.h b/src/coll.h
index 9c725712a..53133dae3 100644
--- a/src/coll.h
+++ b/src/coll.h
@@ -47,6 +47,8 @@ typedef int (*coll_cmp_f)(const char *s, size_t s_len, const char *t,
typedef uint32_t (*coll_hash_f)(const char *s, size_t s_len, uint32_t *ph,
uint32_t *pcarry, struct coll *coll);
+typedef uint64_t (*coll_hint_f)(const char *s, size_t s_len, struct coll *coll);
+
struct UCollator;
/**
@@ -61,6 +63,8 @@ struct coll {
/** String comparator. */
coll_cmp_f cmp;
coll_hash_f hash;
+ /** The pointer to routine calculating tuple hint. */
+ coll_hint_f hint;
/** Reference counter. */
int refs;
/**
--
2.20.1
^ permalink raw reply [flat|nested] 27+ messages in thread
* Re: [PATCH v3 7/7] memtx: introduce tuple compare hint
2019-02-22 15:42 ` [PATCH v3 7/7] memtx: introduce tuple compare hint Kirill Shcherbatov
@ 2019-02-25 17:44 ` Vladimir Davydov
2019-02-26 12:10 ` [tarantool-patches] " Kirill Shcherbatov
0 siblings, 1 reply; 27+ messages in thread
From: Vladimir Davydov @ 2019-02-25 17:44 UTC (permalink / raw)
To: Kirill Shcherbatov; +Cc: tarantool-patches, Alexandr Lyapunov
On Fri, Feb 22, 2019 at 06:42:32PM +0300, Kirill Shcherbatov wrote:
> From: Alexandr Lyapunov <aleks@tarantool.org>
>
> Implement functions for retrieving tuple hints for a particular
> key_def. Hint is an integer that can be used for tuple comparison
> optimization: if a hint of one tuple is less than a hint of another
> then the first tuple is definitely less than the second; only if
> hints are equal tuple_compare must be called for getting comparison
> result.
>
> Hints are calculated using only the first part of key_def.
>
> Hints are only useful when:
> * they are precalculated and stored along with the tuple;
> calculation is not cheap (cheaper than tuple_compare btw) but
> precalculated hints allow to compare tuples without even fetching
> tuple data.
> * first part of key_def is 'string'(for now)
> * since hint is calculated using only the first part of key_def
> (and only first several characters if it is a string) this part
> must be different with high probability for every tuple pair.
>
> Closes #3961
> ---
> src/box/CMakeLists.txt | 1 +
> src/box/key_def.c | 2 +
> src/box/key_def.h | 44 ++++++++
> src/box/memtx_tree_decl.c | 89 ++++++++++++++++
> src/box/tuple_hint.cc | 210 ++++++++++++++++++++++++++++++++++++++
> src/box/tuple_hint.h | 51 +++++++++
> src/coll.c | 33 ++++++
> src/coll.h | 4 +
> 8 files changed, 434 insertions(+)
> create mode 100644 src/box/tuple_hint.cc
> create mode 100644 src/box/tuple_hint.h
See a few minor comments inline. Apart from them the patch is fine by
me, but as Kostja justifiably noted, may be it's worth enabling hints
for all data types, including floating point numbers. Then we wouldn't
need the memtx tree parametrization introduced by the previous patches.
OTOH we will probably need it anyway for multikey indexes.
> diff --git a/src/box/tuple_hint.cc b/src/box/tuple_hint.cc
> new file mode 100644
> index 000000000..7b73403be
> --- /dev/null
> +++ b/src/box/tuple_hint.cc
> @@ -0,0 +1,210 @@
> +/*
> + * Copyright 2010-2019, Tarantool AUTHORS, please see AUTHORS file.
> + *
> + * Redistribution and use in source and binary forms, with or
> + * without modification, are permitted provided that the following
> + * conditions are met:
> + *
> + * 1. Redistributions of source code must retain the above
> + * copyright notice, this list of conditions and the
> + * following disclaimer.
> + *
> + * 2. Redistributions in binary form must reproduce the above
> + * copyright notice, this list of conditions and the following
> + * disclaimer in the documentation and/or other materials
> + * provided with the distribution.
> + *
> + * THIS SOFTWARE IS PROVIDED BY <COPYRIGHT HOLDER> ``AS IS'' AND
> + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
> + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
> + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
> + * <COPYRIGHT HOLDER> OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
> + * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
> + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
> + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
> + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
> + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
> + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
> + * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
> + * SUCH DAMAGE.
> + */
> +#include "coll.h"
> +#include "tuple.h"
> +#include "tuple_hint.h"
> +
> +static uint64_t
> +key_hint_default(const char *key, struct key_def *key_def)
> +{
> + (void)key;
> + (void)key_def;
> + return 0;
> +}
> +
> +static uint64_t
> +tuple_hint_default(const struct tuple *tuple, struct key_def *key_def)
> +{
> + (void)tuple;
> + (void)key_def;
> + return 0;
> +}
> +
> +template<bool is_nullable>
> +static uint64_t
> +key_hint_uint(const char *key, struct key_def *key_def)
> +{
> + (void)key_def;
> + assert(key_part_is_nullable(key_def->parts) == is_nullable);
> + assert(key_def->parts->type == FIELD_TYPE_UNSIGNED ||
> + key_def->parts->type == FIELD_TYPE_INTEGER);
FIELD_TYPE_INTEGER? How can that be?
> + if (is_nullable && mp_typeof(*key) == MP_NIL)
> + return 0;
> + assert(mp_typeof(*key) == MP_UINT);
> + uint64_t val = mp_decode_uint(&key);
> + if (val > INT64_MAX)
> + return INT64_MAX;
> + return val - (uint64_t)INT64_MIN;
> +}
> +
> +template<bool is_nullable>
> +static uint64_t
> +tuple_hint_uint(const struct tuple *tuple, struct key_def *key_def)
> +{
> + assert(key_part_is_nullable(key_def->parts) == is_nullable);
> + assert(key_def->parts->type == FIELD_TYPE_UNSIGNED);
> + const char *field = tuple_field_by_part(tuple, key_def->parts);
> + if (is_nullable && field == NULL)
> + return 0;
> + return key_hint_uint<is_nullable>(field, key_def);
> +}
> +
> +template<bool is_nullable>
> +static uint64_t
> +key_hint_int(const char *key, struct key_def *key_def)
> +{
> + assert(key_part_is_nullable(key_def->parts) == is_nullable);
> + assert(key_def->parts->type == FIELD_TYPE_INTEGER);
> + if (is_nullable && mp_typeof(*key) == MP_NIL)
> + return 0;
> + if (mp_typeof(*key) == MP_UINT) {
> + return key_hint_uint<is_nullable>(key, key_def);
> + } else {
> + assert(mp_typeof(*key) == MP_INT);
> + int64_t val = mp_decode_int(&key);
> + return (uint64_t)val - (uint64_t)INT64_MIN;
> + }
> +}
> +
> +template<bool is_nullable>
> +static uint64_t
> +tuple_hint_int(const struct tuple *tuple, struct key_def *key_def)
> +{
> + assert(key_part_is_nullable(key_def->parts) == is_nullable);
> + assert(key_def->parts->type == FIELD_TYPE_INTEGER);
> + const char *field = tuple_field_by_part(tuple, key_def->parts);
> + if (is_nullable && field == NULL)
> + return 0;
> + return key_hint_int<is_nullable>(field, key_def);
> +}
> +
> +template<bool is_nullable>
> +static uint64_t
> +key_hint_string(const char *key, struct key_def *key_def)
> +{
> + (void)key_def;
> + assert(key_part_is_nullable(key_def->parts) == is_nullable);
> + assert(key_def->parts->type == FIELD_TYPE_STRING);
Assert that coll is NULL.
> + if (is_nullable && mp_typeof(*key) == MP_NIL)
> + return 0;
> + assert(key_def->parts->type == FIELD_TYPE_STRING);
Duplicate assertion.
> + assert(mp_typeof(*key) == MP_STR);
> + uint32_t len;
> + const unsigned char *str =
> + (const unsigned char *)mp_decode_str(&key, &len);
> + uint64_t result = 0;
> + uint32_t process_len = MIN(len, 8);
> + for (uint32_t i = 0; i < process_len; i++) {
> + result <<= 8;
> + result |= str[i];
> + }
> + result <<= 8 * (8 - process_len);
> + return result;
> +}
> +
> +template<bool is_nullable>
> +static uint64_t
> +tuple_hint_string(const struct tuple *tuple, struct key_def *key_def)
> +{
> + assert(key_part_is_nullable(key_def->parts) == is_nullable);
> + assert(key_def->parts->type == FIELD_TYPE_STRING);
Assert that coll is NULL.
> + const char *field = tuple_field_by_part(tuple, key_def->parts);
> + if (is_nullable && field == NULL)
> + return 0;
> + return key_hint_string<is_nullable>(field, key_def);
> +}
> +
> +template<bool is_nullable>
> +static uint64_t
> +key_hint_string_coll(const char *key, struct key_def *key_def)
> +{
> + assert(key_part_is_nullable(key_def->parts) == is_nullable);
> + assert(key_def->parts->type == FIELD_TYPE_STRING &&
> + key_def->parts->coll != NULL);
> + if (is_nullable && mp_typeof(*key) == MP_NIL)
> + return 0;
> + assert(key_def->parts->type == FIELD_TYPE_STRING);
Duplicate assertion.
> + assert(mp_typeof(*key) == MP_STR);
> + assert(key_def->parts->coll != NULL);
Duplicate assertion.
> + uint32_t len;
> + const char *str = mp_decode_str(&key, &len);
> + return key_def->parts->coll->hint(str, len, key_def->parts->coll);
> +}
> diff --git a/src/coll.c b/src/coll.c
> index 6d9c44dbf..bf892087d 100644
> --- a/src/coll.c
> +++ b/src/coll.c
> @@ -133,6 +133,37 @@ coll_bin_hash(const char *s, size_t s_len, uint32_t *ph, uint32_t *pcarry,
> return s_len;
> }
>
> +/** Get a compare hint of a binary collation. */
> +static uint64_t
> +coll_bin_hint(const char *s, size_t s_len, struct coll *coll)
> +{
> + (void)coll;
> + uint64_t result = 0;
> + uint32_t process_len = MIN(s_len, 8);
> + for (uint32_t i = 0; i < process_len; i++) {
> + result <<= 8;
> + result |= ((unsigned char *)s)[i];
> + }
> + result <<= 8 * (8 - process_len);
> + return result;
> +}
> +
> +/** Get a compare hint of a string using ICU collation. */
> +static uint64_t
> +coll_icu_hint(const char *s, size_t s_len, struct coll *coll)
> +{
> + assert(coll->type == COLL_TYPE_ICU);
> + UCharIterator itr;
> + uiter_setUTF8(&itr, s, s_len);
> + uint8_t buf[7];
Should be buf[8]?
> + uint32_t state[2] = {0, 0};
> + UErrorCode status = U_ZERO_ERROR;
> + int32_t got = ucol_nextSortKeyPart(coll->collator, &itr, state, buf,
> + sizeof(buf), &status);
> + assert(got >= 0 && got <= 7);
> + return coll_bin_hint((const char *)buf, got, NULL);
> +}
^ permalink raw reply [flat|nested] 27+ messages in thread
* Re: [tarantool-patches] Re: [PATCH v3 7/7] memtx: introduce tuple compare hint
2019-02-25 17:44 ` Vladimir Davydov
@ 2019-02-26 12:10 ` Kirill Shcherbatov
0 siblings, 0 replies; 27+ messages in thread
From: Kirill Shcherbatov @ 2019-02-26 12:10 UTC (permalink / raw)
To: tarantool-patches, Vladimir Davydov; +Cc: Alexandr Lyapunov
> See a few minor comments inline. Apart from them the patch is fine by
> me, but as Kostja justifiably noted, may be it's worth enabling hints
> for all data types, including floating point numbers. Then we wouldn't
> need the memtx tree parametrization introduced by the previous patches.
> OTOH we will probably need it anyway for multikey indexes.
We really need memtx tree parametrization at least because multikey index
tree has own MEMTX_TREE_COMPARE/MEMTX_TREE_COMPARE_KEY functions
(that use multikey_idx).
Show kshch/gh-1257-multikey-index branch for more details.
>> +template<bool is_nullable>
>> +static uint64_t
>> +key_hint_uint(const char *key, struct key_def *key_def)
>> +{
>> + (void)key_def;
>> + assert(key_part_is_nullable(key_def->parts) == is_nullable);
>> + assert(key_def->parts->type == FIELD_TYPE_UNSIGNED ||
>> + key_def->parts->type == FIELD_TYPE_INTEGER);
>
> FIELD_TYPE_INTEGER? How can that be?
I've reused it below. But ok, let's just inline it.
> Should be buf[8]?
Yep.
^ permalink raw reply [flat|nested] 27+ messages in thread