From: Kirill Shcherbatov <kshcherbatov@tarantool.org>
To: tarantool-patches@freelists.org
Cc: vdavydov.dev@gmail.com, Kirill Shcherbatov <kshcherbatov@tarantool.org>
Subject: [tarantool-patches] [PATCH v5 09/12] box: introduce has_json_paths flag in templates
Date: Mon, 29 Oct 2018 09:56:41 +0300 [thread overview]
Message-ID: <8ab1ada80e185c433d19045c03e9faf7f304cc7d.1540795996.git.kshcherbatov@tarantool.org> (raw)
In-Reply-To: <cover.1540795996.git.kshcherbatov@tarantool.org>
In-Reply-To: <cover.1540795996.git.kshcherbatov@tarantool.org>
Introduced has_json_path flag for compare, hash and extract
function(that are really hot) to make possible do not look to
path field for flat indexes without any JSON paths.
Part of #1012
---
src/box/tuple_compare.cc | 112 +++++++++++++++++++++++++++++++------------
| 104 ++++++++++++++++++++++++++--------------
src/box/tuple_hash.cc | 45 ++++++++++++-----
3 files changed, 182 insertions(+), 79 deletions(-)
diff --git a/src/box/tuple_compare.cc b/src/box/tuple_compare.cc
index 554c29f..97963d0 100644
--- a/src/box/tuple_compare.cc
+++ b/src/box/tuple_compare.cc
@@ -458,11 +458,12 @@ tuple_common_key_parts(const struct tuple *tuple_a, const struct tuple *tuple_b,
return i;
}
-template<bool is_nullable, bool has_optional_parts>
+template<bool is_nullable, bool has_optional_parts, bool has_json_path>
static inline int
tuple_compare_slowpath(const struct tuple *tuple_a, const struct tuple *tuple_b,
struct key_def *key_def)
{
+ assert(has_json_path == key_def->has_json_paths);
assert(!has_optional_parts || is_nullable);
assert(is_nullable == key_def->is_nullable);
assert(has_optional_parts == key_def->has_optional_parts);
@@ -508,10 +509,19 @@ tuple_compare_slowpath(const struct tuple *tuple_a, const struct tuple *tuple_b,
end = part + key_def->part_count;
for (; part < end; part++) {
- field_a = tuple_field_by_part_raw(format_a, tuple_a_raw,
- field_map_a, part);
- field_b = tuple_field_by_part_raw(format_b, tuple_b_raw,
- field_map_b, part);
+ if (!has_json_path) {
+ field_a = tuple_field_raw(format_a, tuple_a_raw,
+ field_map_a,
+ part->fieldno);
+ field_b = tuple_field_raw(format_b, tuple_b_raw,
+ field_map_b,
+ part->fieldno);
+ } else {
+ field_a = tuple_field_by_part_raw(format_a, tuple_a_raw,
+ field_map_a, part);
+ field_b = tuple_field_by_part_raw(format_b, tuple_b_raw,
+ field_map_b, part);
+ }
assert(has_optional_parts ||
(field_a != NULL && field_b != NULL));
if (! is_nullable) {
@@ -558,10 +568,19 @@ tuple_compare_slowpath(const struct tuple *tuple_a, const struct tuple *tuple_b,
*/
end = key_def->parts + key_def->part_count;
for (; part < end; ++part) {
- field_a = tuple_field_by_part_raw(format_a, tuple_a_raw,
- field_map_a, part);
- field_b = tuple_field_by_part_raw(format_b, tuple_b_raw,
- field_map_b, part);
+ if (!has_json_path) {
+ field_a = tuple_field_raw(format_a, tuple_a_raw,
+ field_map_a,
+ part->fieldno);
+ field_b = tuple_field_raw(format_b, tuple_b_raw,
+ field_map_b,
+ part->fieldno);
+ } else {
+ field_a = tuple_field_by_part_raw(format_a, tuple_a_raw,
+ field_map_a, part);
+ field_b = tuple_field_by_part_raw(format_b, tuple_b_raw,
+ field_map_b, part);
+ }
/*
* Extended parts are primary, and they can not
* be absent or be NULLs.
@@ -575,11 +594,12 @@ tuple_compare_slowpath(const struct tuple *tuple_a, const struct tuple *tuple_b,
return 0;
}
-template<bool is_nullable, bool has_optional_parts>
+template<bool is_nullable, bool has_optional_parts, bool has_json_paths>
static inline int
tuple_compare_with_key_slowpath(const struct tuple *tuple, const char *key,
uint32_t part_count, struct key_def *key_def)
{
+ assert(has_json_paths == key_def->has_json_paths);
assert(!has_optional_parts || is_nullable);
assert(is_nullable == key_def->is_nullable);
assert(has_optional_parts == key_def->has_optional_parts);
@@ -591,9 +611,14 @@ tuple_compare_with_key_slowpath(const struct tuple *tuple, const char *key,
const uint32_t *field_map = tuple_field_map(tuple);
enum mp_type a_type, b_type;
if (likely(part_count == 1)) {
- const char *field =
- tuple_field_by_part_raw(format, tuple_raw, field_map,
- part);
+ const char *field;
+ if (!has_json_paths) {
+ field = tuple_field_raw(format, tuple_raw, field_map,
+ part->fieldno);
+ } else {
+ field = tuple_field_by_part_raw(format, tuple_raw,
+ field_map, part);
+ }
if (! is_nullable) {
return tuple_compare_field(field, key, part->type,
part->coll);
@@ -617,9 +642,14 @@ tuple_compare_with_key_slowpath(const struct tuple *tuple, const char *key,
struct key_part *end = part + part_count;
int rc;
for (; part < end; ++part, mp_next(&key)) {
- const char *field =
- tuple_field_by_part_raw(format, tuple_raw,
- field_map, part);
+ const char *field;
+ if (!has_json_paths) {
+ field = tuple_field_raw(format, tuple_raw, field_map,
+ part->fieldno);
+ } else {
+ field = tuple_field_by_part_raw(format, tuple_raw,
+ field_map, part);
+ }
if (! is_nullable) {
rc = tuple_compare_field(field, key, part->type,
part->coll);
@@ -1012,19 +1042,31 @@ static const comparator_signature cmp_arr[] = {
#undef COMPARATOR
+static const tuple_compare_t compare_slowpath_funcs[] = {
+ tuple_compare_slowpath<false, false, false>,
+ tuple_compare_slowpath<true, false, false>,
+ tuple_compare_slowpath<false, true, false>,
+ tuple_compare_slowpath<true, true, false>,
+ tuple_compare_slowpath<false, false, true>,
+ tuple_compare_slowpath<true, false, true>,
+ tuple_compare_slowpath<false, true, true>,
+ tuple_compare_slowpath<true, true, true>
+};
+
tuple_compare_t
tuple_compare_create(const struct key_def *def)
{
+ int cmp_func_idx = (def->is_nullable ? 1 : 0) +
+ 2 * (def->has_optional_parts ? 1 : 0) +
+ 4 * (def->has_json_paths ? 1 : 0);
if (def->is_nullable) {
if (key_def_is_sequential(def)) {
if (def->has_optional_parts)
return tuple_compare_sequential<true, true>;
else
return tuple_compare_sequential<true, false>;
- } else if (def->has_optional_parts) {
- return tuple_compare_slowpath<true, true>;
} else {
- return tuple_compare_slowpath<true, false>;
+ return compare_slowpath_funcs[cmp_func_idx];
}
}
assert(! def->has_optional_parts);
@@ -1044,10 +1086,9 @@ tuple_compare_create(const struct key_def *def)
return cmp_arr[k].f;
}
}
- if (key_def_is_sequential(def))
- return tuple_compare_sequential<false, false>;
- else
- return tuple_compare_slowpath<false, false>;
+ return key_def_is_sequential(def) ?
+ tuple_compare_sequential<false, false> :
+ compare_slowpath_funcs[cmp_func_idx];
}
/* }}} tuple_compare */
@@ -1229,9 +1270,23 @@ static const comparator_with_key_signature cmp_wk_arr[] = {
#undef KEY_COMPARATOR
+static const tuple_compare_with_key_t compare_with_key_slowpath_funcs[] = {
+ tuple_compare_with_key_slowpath<false, false, false>,
+ tuple_compare_with_key_slowpath<true, false, false>,
+ tuple_compare_with_key_slowpath<false, true, false>,
+ tuple_compare_with_key_slowpath<true, true, false>,
+ tuple_compare_with_key_slowpath<false, false, true>,
+ tuple_compare_with_key_slowpath<true, false, true>,
+ tuple_compare_with_key_slowpath<false, true, true>,
+ tuple_compare_with_key_slowpath<true, true, true>
+};
+
tuple_compare_with_key_t
tuple_compare_with_key_create(const struct key_def *def)
{
+ int cmp_func_idx = (def->is_nullable ? 1 : 0) +
+ 2 * (def->has_optional_parts ? 1 : 0) +
+ 4 * (def->has_json_paths ? 1 : 0);
if (def->is_nullable) {
if (key_def_is_sequential(def)) {
if (def->has_optional_parts) {
@@ -1241,10 +1296,8 @@ tuple_compare_with_key_create(const struct key_def *def)
return tuple_compare_with_key_sequential<true,
false>;
}
- } else if (def->has_optional_parts) {
- return tuple_compare_with_key_slowpath<true, true>;
} else {
- return tuple_compare_with_key_slowpath<true, false>;
+ return compare_with_key_slowpath_funcs[cmp_func_idx];
}
}
assert(! def->has_optional_parts);
@@ -1267,10 +1320,9 @@ tuple_compare_with_key_create(const struct key_def *def)
return cmp_wk_arr[k].f;
}
}
- if (key_def_is_sequential(def))
- return tuple_compare_with_key_sequential<false, false>;
- else
- return tuple_compare_with_key_slowpath<false, false>;
+ return key_def_is_sequential(def) ?
+ tuple_compare_with_key_sequential<false, false> :
+ compare_with_key_slowpath_funcs[cmp_func_idx];
}
/* }}} tuple_compare_with_key */
--git a/src/box/tuple_extract_key.cc b/src/box/tuple_extract_key.cc
index 04c5463..63ad970 100644
--- a/src/box/tuple_extract_key.cc
+++ b/src/box/tuple_extract_key.cc
@@ -5,13 +5,18 @@
enum { MSGPACK_NULL = 0xc0 };
/** True if key part i and i+1 are sequential. */
+template <bool has_json_paths>
static inline bool
key_def_parts_are_sequential(const struct key_def *def, int i)
{
uint32_t fieldno1 = def->parts[i].fieldno + 1;
uint32_t fieldno2 = def->parts[i + 1].fieldno;
- return fieldno1 == fieldno2 && def->parts[i].path == NULL &&
- def->parts[i + 1].path == NULL;
+ if (!has_json_paths) {
+ return fieldno1 == fieldno2;
+ } else {
+ return fieldno1 == fieldno2 && def->parts[i].path == NULL &&
+ def->parts[i + 1].path == NULL;
+ }
}
/** True, if a key con contain two or more parts in sequence. */
@@ -19,7 +24,7 @@ static bool
key_def_contains_sequential_parts(const struct key_def *def)
{
for (uint32_t i = 0; i < def->part_count - 1; ++i) {
- if (key_def_parts_are_sequential(def, i))
+ if (key_def_parts_are_sequential<true>(def, i))
return true;
}
return false;
@@ -99,11 +104,13 @@ tuple_extract_key_sequential(const struct tuple *tuple, struct key_def *key_def,
* General-purpose implementation of tuple_extract_key()
* @copydoc tuple_extract_key()
*/
-template <bool contains_sequential_parts, bool has_optional_parts>
+template <bool contains_sequential_parts, bool has_optional_parts,
+ bool has_json_paths>
static char *
tuple_extract_key_slowpath(const struct tuple *tuple,
struct key_def *key_def, uint32_t *key_size)
{
+ assert(has_json_paths == key_def->has_json_paths);
assert(!has_optional_parts || key_def->is_nullable);
assert(has_optional_parts == key_def->has_optional_parts);
assert(contains_sequential_parts ==
@@ -118,9 +125,14 @@ tuple_extract_key_slowpath(const struct tuple *tuple,
/* Calculate the key size. */
for (uint32_t i = 0; i < part_count; ++i) {
- const char *field =
- tuple_field_by_part_raw(format, data, field_map,
- &key_def->parts[i]);
+ const char *field;
+ if (!has_json_paths) {
+ field = tuple_field_raw(format, data, field_map,
+ key_def->parts[i].fieldno);
+ } else {
+ field = tuple_field_by_part_raw(format, data, field_map,
+ &key_def->parts[i]);
+ }
if (has_optional_parts && field == NULL) {
bsize += mp_sizeof_nil();
continue;
@@ -133,7 +145,8 @@ tuple_extract_key_slowpath(const struct tuple *tuple,
* minimize tuple_field_raw() calls.
*/
for (; i < part_count - 1; i++) {
- if (!key_def_parts_are_sequential(key_def, i)) {
+ if (!key_def_parts_are_sequential
+ <has_json_paths>(key_def, i)) {
/*
* End of sequential part.
*/
@@ -159,9 +172,14 @@ tuple_extract_key_slowpath(const struct tuple *tuple,
}
char *key_buf = mp_encode_array(key, part_count);
for (uint32_t i = 0; i < part_count; ++i) {
- const char *field =
- tuple_field_by_part_raw(format, data, field_map,
- &key_def->parts[i]);
+ const char *field;
+ if (!has_json_paths) {
+ field = tuple_field_raw(format, data, field_map,
+ key_def->parts[i].fieldno);
+ } else {
+ field = tuple_field_by_part_raw(format, data, field_map,
+ &key_def->parts[i]);
+ }
if (has_optional_parts && field == NULL) {
key_buf = mp_encode_nil(key_buf);
continue;
@@ -174,7 +192,8 @@ tuple_extract_key_slowpath(const struct tuple *tuple,
* minimize tuple_field_raw() calls.
*/
for (; i < part_count - 1; i++) {
- if (!key_def_parts_are_sequential(key_def, i)) {
+ if (!key_def_parts_are_sequential
+ <has_json_paths>(key_def, i)) {
/*
* End of sequential part.
*/
@@ -207,11 +226,12 @@ tuple_extract_key_slowpath(const struct tuple *tuple,
* General-purpose version of tuple_extract_key_raw()
* @copydoc tuple_extract_key_raw()
*/
-template <bool has_optional_parts>
+template <bool has_optional_parts, bool has_json_paths>
static char *
tuple_extract_key_slowpath_raw(const char *data, const char *data_end,
struct key_def *key_def, uint32_t *key_size)
{
+ assert(has_json_paths == key_def->has_json_paths);
assert(!has_optional_parts || key_def->is_nullable);
assert(has_optional_parts == key_def->has_optional_parts);
assert(mp_sizeof_nil() == 1);
@@ -239,7 +259,8 @@ tuple_extract_key_slowpath_raw(const char *data, const char *data_end,
uint32_t fieldno = key_def->parts[i].fieldno;
uint32_t null_count = 0;
for (; i < key_def->part_count - 1; i++) {
- if (!key_def_parts_are_sequential(key_def, i))
+ if (!key_def_parts_are_sequential
+ <has_json_paths>(key_def, i))
break;
}
const struct key_part *part = &key_def->parts[i];
@@ -312,6 +333,17 @@ tuple_extract_key_slowpath_raw(const char *data, const char *data_end,
return key;
}
+static const tuple_extract_key_t extract_key_slowpath_funcs[] = {
+ tuple_extract_key_slowpath<false, false, false>,
+ tuple_extract_key_slowpath<true, false, false>,
+ tuple_extract_key_slowpath<false, true, false>,
+ tuple_extract_key_slowpath<true, true, false>,
+ tuple_extract_key_slowpath<false, false, true>,
+ tuple_extract_key_slowpath<true, false, true>,
+ tuple_extract_key_slowpath<false, true, true>,
+ tuple_extract_key_slowpath<true, true, true>
+};
+
/**
* Initialize tuple_extract_key() and tuple_extract_key_raw()
*/
@@ -332,32 +364,30 @@ tuple_extract_key_set(struct key_def *key_def)
tuple_extract_key_sequential_raw<false>;
}
} else {
- if (key_def->has_optional_parts) {
- assert(key_def->is_nullable);
- if (key_def_contains_sequential_parts(key_def)) {
- key_def->tuple_extract_key =
- tuple_extract_key_slowpath<true, true>;
- } else {
- key_def->tuple_extract_key =
- tuple_extract_key_slowpath<false, true>;
- }
- } else {
- if (key_def_contains_sequential_parts(key_def)) {
- key_def->tuple_extract_key =
- tuple_extract_key_slowpath<true, false>;
- } else {
- key_def->tuple_extract_key =
- tuple_extract_key_slowpath<false,
- false>;
- }
- }
+ int func_idx =
+ (key_def_contains_sequential_parts(key_def) ? 1 : 0) +
+ 2 * (key_def->has_optional_parts ? 1 : 0) +
+ 4 * (key_def->has_json_paths ? 1 : 0);
+ key_def->tuple_extract_key =
+ extract_key_slowpath_funcs[func_idx];
+ assert(!key_def->has_optional_parts || key_def->is_nullable);
}
if (key_def->has_optional_parts) {
assert(key_def->is_nullable);
- key_def->tuple_extract_key_raw =
- tuple_extract_key_slowpath_raw<true>;
+ if (key_def->has_json_paths) {
+ key_def->tuple_extract_key_raw =
+ tuple_extract_key_slowpath_raw<true, true>;
+ } else {
+ key_def->tuple_extract_key_raw =
+ tuple_extract_key_slowpath_raw<true, false>;
+ }
} else {
- key_def->tuple_extract_key_raw =
- tuple_extract_key_slowpath_raw<false>;
+ if (key_def->has_json_paths) {
+ key_def->tuple_extract_key_raw =
+ tuple_extract_key_slowpath_raw<false, true>;
+ } else {
+ key_def->tuple_extract_key_raw =
+ tuple_extract_key_slowpath_raw<false, false>;
+ }
}
}
diff --git a/src/box/tuple_hash.cc b/src/box/tuple_hash.cc
index 3486ce1..8ede290 100644
--- a/src/box/tuple_hash.cc
+++ b/src/box/tuple_hash.cc
@@ -213,7 +213,7 @@ static const hasher_signature hash_arr[] = {
#undef HASHER
-template <bool has_optional_parts>
+template <bool has_optional_parts, bool has_json_paths>
uint32_t
tuple_hash_slowpath(const struct tuple *tuple, struct key_def *key_def);
@@ -256,10 +256,17 @@ tuple_hash_func_set(struct key_def *key_def) {
}
slowpath:
- if (key_def->has_optional_parts)
- key_def->tuple_hash = tuple_hash_slowpath<true>;
- else
- key_def->tuple_hash = tuple_hash_slowpath<false>;
+ if (key_def->has_optional_parts) {
+ if (key_def->has_json_paths)
+ key_def->tuple_hash = tuple_hash_slowpath<true, true>;
+ else
+ key_def->tuple_hash = tuple_hash_slowpath<true, false>;
+ } else {
+ if (key_def->has_json_paths)
+ key_def->tuple_hash = tuple_hash_slowpath<false, true>;
+ else
+ key_def->tuple_hash = tuple_hash_slowpath<false, false>;
+ }
key_def->key_hash = key_hash_slowpath;
}
@@ -319,10 +326,11 @@ tuple_hash_key_part(uint32_t *ph1, uint32_t *pcarry, const struct tuple *tuple,
return tuple_hash_field(ph1, pcarry, &field, part->coll);
}
-template <bool has_optional_parts>
+template <bool has_optional_parts, bool has_json_paths>
uint32_t
tuple_hash_slowpath(const struct tuple *tuple, struct key_def *key_def)
{
+ assert(has_json_paths == key_def->has_json_paths);
assert(has_optional_parts == key_def->has_optional_parts);
uint32_t h = HASH_SEED;
uint32_t carry = 0;
@@ -331,9 +339,13 @@ tuple_hash_slowpath(const struct tuple *tuple, struct key_def *key_def)
struct tuple_format *format = tuple_format(tuple);
const char *tuple_raw = tuple_data(tuple);
const uint32_t *field_map = tuple_field_map(tuple);
- const char *field =
- tuple_field_by_part_raw(format, tuple_raw, field_map,
- key_def->parts);
+ const char *field;
+ if (!has_json_paths) {
+ field = tuple_field(tuple, prev_fieldno);
+ } else {
+ field = tuple_field_by_part_raw(format, tuple_raw, field_map,
+ key_def->parts);
+ }
const char *end = (char *)tuple + tuple_size(tuple);
if (has_optional_parts && field == NULL) {
total_size += tuple_hash_null(&h, &carry);
@@ -347,9 +359,18 @@ tuple_hash_slowpath(const struct tuple *tuple, struct key_def *key_def)
* need of tuple_field
*/
if (prev_fieldno + 1 != key_def->parts[part_id].fieldno) {
- struct key_part *part = &key_def->parts[part_id];
- field = tuple_field_by_part_raw(format, tuple_raw,
- field_map, part);
+ if (!has_json_paths) {
+ field = tuple_field(tuple,
+ key_def->parts[part_id].
+ fieldno);
+ } else {
+ struct key_part *part =
+ &key_def->parts[part_id];
+ field = tuple_field_by_part_raw(format,
+ tuple_raw,
+ field_map,
+ part);
+ }
}
if (has_optional_parts && (field == NULL || field >= end)) {
total_size += tuple_hash_null(&h, &carry);
--
2.7.4
prev parent reply other threads:[~2018-10-29 6:57 UTC|newest]
Thread overview: 39+ messages / expand[flat|nested] mbox.gz Atom feed top
2018-10-29 6:56 [PATCH v5 00/12] box: indexes by JSON path Kirill Shcherbatov
2018-10-29 6:56 ` [PATCH v5 01/12] box: refactor key_def_find routine Kirill Shcherbatov
2018-11-19 17:48 ` Vladimir Davydov
2018-10-29 6:56 ` [PATCH v5 10/12] box: tune tuple_field_raw_by_path for indexed data Kirill Shcherbatov
2018-10-29 6:56 ` [PATCH v5 11/12] box: introduce offset slot cache in key_part Kirill Shcherbatov
2018-11-01 13:32 ` [tarantool-patches] " Konstantin Osipov
2018-11-06 12:15 ` [tarantool-patches] " Kirill Shcherbatov
2018-10-29 6:56 ` [PATCH v5 12/12] box: specify indexes in user-friendly form Kirill Shcherbatov
2018-11-01 13:34 ` [tarantool-patches] " Konstantin Osipov
2018-11-01 14:18 ` Konstantin Osipov
2018-11-06 12:15 ` [tarantool-patches] " Kirill Shcherbatov
2018-10-29 6:56 ` [PATCH v5 02/12] box: introduce key_def_parts_are_sequential Kirill Shcherbatov
2018-11-01 14:23 ` [tarantool-patches] " Konstantin Osipov
2018-11-06 12:14 ` [tarantool-patches] " Kirill Shcherbatov
2018-11-19 17:48 ` Vladimir Davydov
2018-10-29 6:56 ` [PATCH v5 03/12] box: introduce tuple_field_go_to_path Kirill Shcherbatov
2018-11-19 17:48 ` Vladimir Davydov
2018-10-29 6:56 ` [PATCH v5 04/12] box: introduce tuple_format_add_key_part Kirill Shcherbatov
2018-11-01 14:38 ` [tarantool-patches] " Konstantin Osipov
2018-11-06 12:15 ` [tarantool-patches] " Kirill Shcherbatov
2018-11-19 17:50 ` Vladimir Davydov
2018-10-29 6:56 ` [PATCH v5 05/12] lib: implement JSON tree class for json library Kirill Shcherbatov
2018-11-01 15:08 ` [tarantool-patches] " Konstantin Osipov
2018-11-06 12:15 ` [tarantool-patches] " Kirill Shcherbatov
2018-11-19 17:53 ` Vladimir Davydov
2018-11-20 16:43 ` Vladimir Davydov
2018-11-21 10:37 ` [tarantool-patches] " Kirill Shcherbatov
2018-11-26 10:50 ` Kirill Shcherbatov
2018-10-29 6:56 ` [PATCH v5 06/12] box: manage format fields with JSON tree class Kirill Shcherbatov
2018-10-29 6:56 ` [PATCH v5 07/12] lib: introduce json_path_normalize routine Kirill Shcherbatov
2018-11-01 15:22 ` [tarantool-patches] " Konstantin Osipov
2018-11-01 15:27 ` [tarantool-patches] " Kirill Shcherbatov
2018-11-20 15:13 ` Vladimir Davydov
2018-11-26 10:50 ` Kirill Shcherbatov
2018-11-20 15:14 ` Vladimir Davydov
2018-10-29 6:56 ` [PATCH v5 08/12] box: introduce JSON indexes Kirill Shcherbatov
2018-11-20 16:52 ` Vladimir Davydov
2018-11-26 10:50 ` [tarantool-patches] " Kirill Shcherbatov
2018-10-29 6:56 ` Kirill Shcherbatov [this message]
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=8ab1ada80e185c433d19045c03e9faf7f304cc7d.1540795996.git.kshcherbatov@tarantool.org \
--to=kshcherbatov@tarantool.org \
--cc=tarantool-patches@freelists.org \
--cc=vdavydov.dev@gmail.com \
--subject='Re: [tarantool-patches] [PATCH v5 09/12] box: introduce has_json_paths flag in templates' \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox