From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: From: Kirill Shcherbatov Subject: [PATCH v9 4/6] box: introduce has_json_paths flag in templates Date: Sun, 3 Feb 2019 13:20:24 +0300 Message-Id: <42c9311f3216ec6754b3616bde0927025820fa02.1549187339.git.kshcherbatov@tarantool.org> In-Reply-To: References: MIME-Version: 1.0 Content-Transfer-Encoding: 8bit To: tarantool-patches@freelists.org, vdavydov.dev@gmail.com Cc: Kirill Shcherbatov List-ID: Introduced has_json_path flag for compare, hash and extract functions templates(that are really hot) to make possible do not look to path field for flat indexes without any JSON paths. Part of #1012 --- src/box/tuple_compare.cc | 110 +++++++++++++++++++++++++---------- src/box/tuple_extract_key.cc | 106 +++++++++++++++++++++------------ src/box/tuple_hash.cc | 38 ++++++++---- 3 files changed, 174 insertions(+), 80 deletions(-) diff --git a/src/box/tuple_compare.cc b/src/box/tuple_compare.cc index 7ab6e3bf6..db4bdfd92 100644 --- a/src/box/tuple_compare.cc +++ b/src/box/tuple_compare.cc @@ -458,11 +458,12 @@ tuple_common_key_parts(const struct tuple *tuple_a, const struct tuple *tuple_b, return i; } -template +template static inline int tuple_compare_slowpath(const struct tuple *tuple_a, const struct tuple *tuple_b, struct key_def *key_def) { + assert(has_json_paths == key_def->has_json_paths); assert(!has_optional_parts || is_nullable); assert(is_nullable == key_def->is_nullable); assert(has_optional_parts == key_def->has_optional_parts); @@ -470,7 +471,7 @@ tuple_compare_slowpath(const struct tuple *tuple_a, const struct tuple *tuple_b, const char *tuple_a_raw = tuple_data(tuple_a); const char *tuple_b_raw = tuple_data(tuple_b); if (key_def->part_count == 1 && part->fieldno == 0 && - part->path == NULL) { + (!has_json_paths || part->path == NULL)) { /* * First field can not be optional - empty tuples * can not exist. @@ -508,10 +509,17 @@ tuple_compare_slowpath(const struct tuple *tuple_a, const struct tuple *tuple_b, end = part + key_def->part_count; for (; part < end; part++) { - field_a = tuple_field_by_part_raw(format_a, tuple_a_raw, - field_map_a, part); - field_b = tuple_field_by_part_raw(format_b, tuple_b_raw, - field_map_b, part); + if (!has_json_paths) { + field_a = tuple_field_raw(format_a, tuple_a_raw, + field_map_a, part->fieldno); + field_b = tuple_field_raw(format_b, tuple_b_raw, + field_map_b, part->fieldno); + } else { + field_a = tuple_field_by_part_raw(format_a, tuple_a_raw, + field_map_a, part); + field_b = tuple_field_by_part_raw(format_b, tuple_b_raw, + field_map_b, part); + } assert(has_optional_parts || (field_a != NULL && field_b != NULL)); if (! is_nullable) { @@ -558,10 +566,17 @@ tuple_compare_slowpath(const struct tuple *tuple_a, const struct tuple *tuple_b, */ end = key_def->parts + key_def->part_count; for (; part < end; ++part) { - field_a = tuple_field_by_part_raw(format_a, tuple_a_raw, - field_map_a, part); - field_b = tuple_field_by_part_raw(format_b, tuple_b_raw, - field_map_b, part); + if (!has_json_paths) { + field_a = tuple_field_raw(format_a, tuple_a_raw, + field_map_a, part->fieldno); + field_b = tuple_field_raw(format_b, tuple_b_raw, + field_map_b, part->fieldno); + } else { + field_a = tuple_field_by_part_raw(format_a, tuple_a_raw, + field_map_a, part); + field_b = tuple_field_by_part_raw(format_b, tuple_b_raw, + field_map_b, part); + } /* * Extended parts are primary, and they can not * be absent or be NULLs. @@ -575,11 +590,12 @@ tuple_compare_slowpath(const struct tuple *tuple_a, const struct tuple *tuple_b, return 0; } -template +template static inline int tuple_compare_with_key_slowpath(const struct tuple *tuple, const char *key, uint32_t part_count, struct key_def *key_def) { + assert(has_json_paths == key_def->has_json_paths); assert(!has_optional_parts || is_nullable); assert(is_nullable == key_def->is_nullable); assert(has_optional_parts == key_def->has_optional_parts); @@ -591,9 +607,14 @@ tuple_compare_with_key_slowpath(const struct tuple *tuple, const char *key, const uint32_t *field_map = tuple_field_map(tuple); enum mp_type a_type, b_type; if (likely(part_count == 1)) { - const char *field = - tuple_field_by_part_raw(format, tuple_raw, field_map, - part); + const char *field; + if (!has_json_paths) { + field = tuple_field_raw(format, tuple_raw, field_map, + part->fieldno); + } else { + field = tuple_field_by_part_raw(format, tuple_raw, + field_map, part); + } if (! is_nullable) { return tuple_compare_field(field, key, part->type, part->coll); @@ -617,9 +638,14 @@ tuple_compare_with_key_slowpath(const struct tuple *tuple, const char *key, struct key_part *end = part + part_count; int rc; for (; part < end; ++part, mp_next(&key)) { - const char *field = - tuple_field_by_part_raw(format, tuple_raw, - field_map, part); + const char *field; + if (!has_json_paths) { + field = tuple_field_raw(format, tuple_raw, field_map, + part->fieldno); + } else { + field = tuple_field_by_part_raw(format, tuple_raw, + field_map, part); + } if (! is_nullable) { rc = tuple_compare_field(field, key, part->type, part->coll); @@ -1012,19 +1038,31 @@ static const comparator_signature cmp_arr[] = { #undef COMPARATOR +static const tuple_compare_t compare_slowpath_funcs[] = { + tuple_compare_slowpath, + tuple_compare_slowpath, + tuple_compare_slowpath, + tuple_compare_slowpath, + tuple_compare_slowpath, + tuple_compare_slowpath, + tuple_compare_slowpath, + tuple_compare_slowpath +}; + tuple_compare_t tuple_compare_create(const struct key_def *def) { + int cmp_func_idx = (def->is_nullable ? 1 : 0) + + 2 * (def->has_optional_parts ? 1 : 0) + + 4 * (def->has_json_paths ? 1 : 0); if (def->is_nullable) { if (key_def_is_sequential(def)) { if (def->has_optional_parts) return tuple_compare_sequential; else return tuple_compare_sequential; - } else if (def->has_optional_parts) { - return tuple_compare_slowpath; } else { - return tuple_compare_slowpath; + return compare_slowpath_funcs[cmp_func_idx]; } } assert(! def->has_optional_parts); @@ -1044,10 +1082,9 @@ tuple_compare_create(const struct key_def *def) return cmp_arr[k].f; } } - if (key_def_is_sequential(def)) - return tuple_compare_sequential; - else - return tuple_compare_slowpath; + return key_def_is_sequential(def) ? + tuple_compare_sequential : + compare_slowpath_funcs[cmp_func_idx]; } /* }}} tuple_compare */ @@ -1229,9 +1266,23 @@ static const comparator_with_key_signature cmp_wk_arr[] = { #undef KEY_COMPARATOR +static const tuple_compare_with_key_t compare_with_key_slowpath_funcs[] = { + tuple_compare_with_key_slowpath, + tuple_compare_with_key_slowpath, + tuple_compare_with_key_slowpath, + tuple_compare_with_key_slowpath, + tuple_compare_with_key_slowpath, + tuple_compare_with_key_slowpath, + tuple_compare_with_key_slowpath, + tuple_compare_with_key_slowpath +}; + tuple_compare_with_key_t tuple_compare_with_key_create(const struct key_def *def) { + int cmp_func_idx = (def->is_nullable ? 1 : 0) + + 2 * (def->has_optional_parts ? 1 : 0) + + 4 * (def->has_json_paths ? 1 : 0); if (def->is_nullable) { if (key_def_is_sequential(def)) { if (def->has_optional_parts) { @@ -1241,10 +1292,8 @@ tuple_compare_with_key_create(const struct key_def *def) return tuple_compare_with_key_sequential; } - } else if (def->has_optional_parts) { - return tuple_compare_with_key_slowpath; } else { - return tuple_compare_with_key_slowpath; + return compare_with_key_slowpath_funcs[cmp_func_idx]; } } assert(! def->has_optional_parts); @@ -1267,10 +1316,9 @@ tuple_compare_with_key_create(const struct key_def *def) return cmp_wk_arr[k].f; } } - if (key_def_is_sequential(def)) - return tuple_compare_with_key_sequential; - else - return tuple_compare_with_key_slowpath; + return key_def_is_sequential(def) ? + tuple_compare_with_key_sequential : + compare_with_key_slowpath_funcs[cmp_func_idx]; } /* }}} tuple_compare_with_key */ diff --git a/src/box/tuple_extract_key.cc b/src/box/tuple_extract_key.cc index 1e8ec7588..0f55b8adb 100644 --- a/src/box/tuple_extract_key.cc +++ b/src/box/tuple_extract_key.cc @@ -5,13 +5,18 @@ enum { MSGPACK_NULL = 0xc0 }; /** True if key part i and i+1 are sequential. */ +template static inline bool key_def_parts_are_sequential(const struct key_def *def, int i) { const struct key_part *part1 = &def->parts[i]; const struct key_part *part2 = &def->parts[i + 1]; - return part1->fieldno + 1 == part2->fieldno && - part1->path == NULL && part2->path == NULL; + if (!has_json_paths) { + return part1->fieldno + 1 == part2->fieldno; + } else { + return part1->fieldno + 1 == part2->fieldno && + part1->path == NULL && part2->path == NULL; + } } /** True, if a key con contain two or more parts in sequence. */ @@ -19,7 +24,7 @@ static bool key_def_contains_sequential_parts(const struct key_def *def) { for (uint32_t i = 0; i < def->part_count - 1; ++i) { - if (key_def_parts_are_sequential(def, i)) + if (key_def_parts_are_sequential(def, i)) return true; } return false; @@ -99,11 +104,13 @@ tuple_extract_key_sequential(const struct tuple *tuple, struct key_def *key_def, * General-purpose implementation of tuple_extract_key() * @copydoc tuple_extract_key() */ -template +template static char * tuple_extract_key_slowpath(const struct tuple *tuple, struct key_def *key_def, uint32_t *key_size) { + assert(has_json_paths == key_def->has_json_paths); assert(!has_optional_parts || key_def->is_nullable); assert(has_optional_parts == key_def->has_optional_parts); assert(contains_sequential_parts == @@ -118,9 +125,14 @@ tuple_extract_key_slowpath(const struct tuple *tuple, /* Calculate the key size. */ for (uint32_t i = 0; i < part_count; ++i) { - const char *field = - tuple_field_by_part_raw(format, data, field_map, - &key_def->parts[i]); + const char *field; + if (!has_json_paths) { + field = tuple_field_raw(format, data, field_map, + key_def->parts[i].fieldno); + } else { + field = tuple_field_by_part_raw(format, data, field_map, + &key_def->parts[i]); + } if (has_optional_parts && field == NULL) { bsize += mp_sizeof_nil(); continue; @@ -133,7 +145,8 @@ tuple_extract_key_slowpath(const struct tuple *tuple, * minimize tuple_field_raw() calls. */ for (; i < part_count - 1; i++) { - if (!key_def_parts_are_sequential(key_def, i)) { + if (!key_def_parts_are_sequential + (key_def, i)) { /* * End of sequential part. */ @@ -159,9 +172,14 @@ tuple_extract_key_slowpath(const struct tuple *tuple, } char *key_buf = mp_encode_array(key, part_count); for (uint32_t i = 0; i < part_count; ++i) { - const char *field = - tuple_field_by_part_raw(format, data, field_map, - &key_def->parts[i]); + const char *field; + if (!has_json_paths) { + field = tuple_field_raw(format, data, field_map, + key_def->parts[i].fieldno); + } else { + field = tuple_field_by_part_raw(format, data, field_map, + &key_def->parts[i]); + } if (has_optional_parts && field == NULL) { key_buf = mp_encode_nil(key_buf); continue; @@ -174,7 +192,8 @@ tuple_extract_key_slowpath(const struct tuple *tuple, * minimize tuple_field_raw() calls. */ for (; i < part_count - 1; i++) { - if (!key_def_parts_are_sequential(key_def, i)) { + if (!key_def_parts_are_sequential + (key_def, i)) { /* * End of sequential part. */ @@ -207,11 +226,12 @@ tuple_extract_key_slowpath(const struct tuple *tuple, * General-purpose version of tuple_extract_key_raw() * @copydoc tuple_extract_key_raw() */ -template +template static char * tuple_extract_key_slowpath_raw(const char *data, const char *data_end, struct key_def *key_def, uint32_t *key_size) { + assert(has_json_paths == key_def->has_json_paths); assert(!has_optional_parts || key_def->is_nullable); assert(has_optional_parts == key_def->has_optional_parts); assert(mp_sizeof_nil() == 1); @@ -239,7 +259,8 @@ tuple_extract_key_slowpath_raw(const char *data, const char *data_end, uint32_t fieldno = key_def->parts[i].fieldno; uint32_t null_count = 0; for (; i < key_def->part_count - 1; i++) { - if (!key_def_parts_are_sequential(key_def, i)) + if (!key_def_parts_are_sequential + (key_def, i)) break; } const struct key_part *part = &key_def->parts[i]; @@ -287,7 +308,7 @@ tuple_extract_key_slowpath_raw(const char *data, const char *data_end, } const char *src = field; const char *src_end = field_end; - if (part->path != NULL) { + if (has_json_paths && part->path != NULL) { if (tuple_field_go_to_path(&src, part->path, part->path_len) != 0) { /* @@ -320,6 +341,17 @@ tuple_extract_key_slowpath_raw(const char *data, const char *data_end, return key; } +static const tuple_extract_key_t extract_key_slowpath_funcs[] = { + tuple_extract_key_slowpath, + tuple_extract_key_slowpath, + tuple_extract_key_slowpath, + tuple_extract_key_slowpath, + tuple_extract_key_slowpath, + tuple_extract_key_slowpath, + tuple_extract_key_slowpath, + tuple_extract_key_slowpath +}; + /** * Initialize tuple_extract_key() and tuple_extract_key_raw() */ @@ -340,32 +372,30 @@ tuple_extract_key_set(struct key_def *key_def) tuple_extract_key_sequential_raw; } } else { - if (key_def->has_optional_parts) { - assert(key_def->is_nullable); - if (key_def_contains_sequential_parts(key_def)) { - key_def->tuple_extract_key = - tuple_extract_key_slowpath; - } else { - key_def->tuple_extract_key = - tuple_extract_key_slowpath; - } - } else { - if (key_def_contains_sequential_parts(key_def)) { - key_def->tuple_extract_key = - tuple_extract_key_slowpath; - } else { - key_def->tuple_extract_key = - tuple_extract_key_slowpath; - } - } + int func_idx = + (key_def_contains_sequential_parts(key_def) ? 1 : 0) + + 2 * (key_def->has_optional_parts ? 1 : 0) + + 4 * (key_def->has_json_paths ? 1 : 0); + key_def->tuple_extract_key = + extract_key_slowpath_funcs[func_idx]; + assert(!key_def->has_optional_parts || key_def->is_nullable); } if (key_def->has_optional_parts) { assert(key_def->is_nullable); - key_def->tuple_extract_key_raw = - tuple_extract_key_slowpath_raw; + if (key_def->has_json_paths) { + key_def->tuple_extract_key_raw = + tuple_extract_key_slowpath_raw; + } else { + key_def->tuple_extract_key_raw = + tuple_extract_key_slowpath_raw; + } } else { - key_def->tuple_extract_key_raw = - tuple_extract_key_slowpath_raw; + if (key_def->has_json_paths) { + key_def->tuple_extract_key_raw = + tuple_extract_key_slowpath_raw; + } else { + key_def->tuple_extract_key_raw = + tuple_extract_key_slowpath_raw; + } } } diff --git a/src/box/tuple_hash.cc b/src/box/tuple_hash.cc index 825c3e5b3..19da43360 100644 --- a/src/box/tuple_hash.cc +++ b/src/box/tuple_hash.cc @@ -214,7 +214,7 @@ static const hasher_signature hash_arr[] = { #undef HASHER -template +template uint32_t tuple_hash_slowpath(const struct tuple *tuple, struct key_def *key_def); @@ -257,10 +257,17 @@ tuple_hash_func_set(struct key_def *key_def) { } slowpath: - if (key_def->has_optional_parts) - key_def->tuple_hash = tuple_hash_slowpath; - else - key_def->tuple_hash = tuple_hash_slowpath; + if (key_def->has_optional_parts) { + if (key_def->has_json_paths) + key_def->tuple_hash = tuple_hash_slowpath; + else + key_def->tuple_hash = tuple_hash_slowpath; + } else { + if (key_def->has_json_paths) + key_def->tuple_hash = tuple_hash_slowpath; + else + key_def->tuple_hash = tuple_hash_slowpath; + } key_def->key_hash = key_hash_slowpath; } @@ -348,10 +355,11 @@ tuple_hash_key_part(uint32_t *ph1, uint32_t *pcarry, const struct tuple *tuple, return tuple_hash_field(ph1, pcarry, &field, part->coll); } -template +template uint32_t tuple_hash_slowpath(const struct tuple *tuple, struct key_def *key_def) { + assert(has_json_paths == key_def->has_json_paths); assert(has_optional_parts == key_def->has_optional_parts); uint32_t h = HASH_SEED; uint32_t carry = 0; @@ -360,9 +368,13 @@ tuple_hash_slowpath(const struct tuple *tuple, struct key_def *key_def) struct tuple_format *format = tuple_format(tuple); const char *tuple_raw = tuple_data(tuple); const uint32_t *field_map = tuple_field_map(tuple); - const char *field = - tuple_field_by_part_raw(format, tuple_raw, field_map, - key_def->parts); + const char *field; + if (!has_json_paths) { + field = tuple_field(tuple, prev_fieldno); + } else { + field = tuple_field_by_part_raw(format, tuple_raw, field_map, + key_def->parts); + } const char *end = (char *)tuple + tuple_size(tuple); if (has_optional_parts && field == NULL) { total_size += tuple_hash_null(&h, &carry); @@ -377,8 +389,12 @@ tuple_hash_slowpath(const struct tuple *tuple, struct key_def *key_def) */ if (prev_fieldno + 1 != key_def->parts[part_id].fieldno) { struct key_part *part = &key_def->parts[part_id]; - field = tuple_field_by_part_raw(format, tuple_raw, - field_map, part); + if (!has_json_paths) { + field = tuple_field(tuple, part->fieldno); + } else { + field = tuple_field_by_part_raw(format, + tuple_raw, field_map, part); + } } if (has_optional_parts && (field == NULL || field >= end)) { total_size += tuple_hash_null(&h, &carry); -- 2.20.1