[tarantool-patches] Re: [PATCH] sql: store statistics in statN as an array of integers
n.pettik
korablev at tarantool.org
Tue Mar 5 20:54:49 MSK 2019
I pushed some refactoring at the top of your branch.
Check it and if it is OK, merge commits. I also attach
it to the letter:
diff --git a/src/box/lua/upgrade.lua b/src/box/lua/upgrade.lua
index ab46142ba..fde91b7d9 100644
--- a/src/box/lua/upgrade.lua
+++ b/src/box/lua/upgrade.lua
@@ -615,10 +615,10 @@ local function upgrade_to_2_1_0()
end
--------------------------------------------------------------------------------
--- Tarantool 2.1.1
+-- Tarantool 2.1.2
--------------------------------------------------------------------------------
-local function upgrade_sql_stat_to_2_1_1()
+local function upgrade_to_2_1_2()
local _sql_stat1 = box.space[box.schema.SQL_STAT1_ID]
local _sql_stat4 = box.space[box.schema.SQL_STAT4_ID]
@@ -626,7 +626,6 @@ local function upgrade_sql_stat_to_2_1_1()
format[3].type = 'array'
_sql_stat1:format(format)
-
format = _sql_stat4:format()
format[3].type = 'array'
format[4].type = 'array'
@@ -634,10 +633,6 @@ local function upgrade_sql_stat_to_2_1_1()
_sql_stat4:format(format)
end
-local function upgrade_to_2_1_1()
- upgrade_sql_stat_to_2_1_1()
-end
-
local function get_version()
local version = box.space._schema:get{'version'}
if version == nil then
@@ -666,7 +661,7 @@ local function upgrade(options)
{version = mkversion(1, 10, 0), func = upgrade_to_1_10_0, auto = true},
{version = mkversion(1, 10, 2), func = upgrade_to_1_10_2, auto = true},
{version = mkversion(2, 1, 0), func = upgrade_to_2_1_0, auto = true},
- {version = mkversion(2, 1, 1), func = upgrade_to_2_1_1, auto = true}
+ {version = mkversion(2, 1, 2), func = upgrade_to_2_1_2, auto = true}
}
for _, handler in ipairs(handlers) do
diff --git a/src/box/sql/analyze.c b/src/box/sql/analyze.c
index cfc987693..94940ef28 100644
--- a/src/box/sql/analyze.c
+++ b/src/box/sql/analyze.c
@@ -645,7 +645,7 @@ statGet(sql_context * context, int argc, sql_value ** argv)
/* Return the value to store in the "stat" column of the _sql_stat1
* table for this index.
*
- * The value is a msg pack array composed of a list of integers describing
+ * The value is an array composed of a list of integers describing
* the index. The first integer in the list is the total number of
* entries in the index. There is one additional integer in the list
* for each indexed column. This additional integer is an estimate of
@@ -663,35 +663,30 @@ statGet(sql_context * context, int argc, sql_value ** argv)
*
* I = (K+D-1)/D
*/
- int i;
-
- size_t size = mp_sizeof_array(p->nKeyCol + 1);
+ uint32_t size = mp_sizeof_array(p->nKeyCol + 1);
size += mp_sizeof_uint(p->nRow);
- for (i = 0; i < p->nKeyCol; ++i) {
+ for (int i = 0; i < p->nKeyCol; ++i) {
uint64_t dist_count = p->current.anDLt[i] + 1;
uint64_t val = (p->nRow + dist_count - 1) / dist_count;
size += mp_sizeof_uint(val);
}
- char *z_ret = sqlMallocZero(size);
- if (z_ret == 0) {
+ char *mp_stat1 = malloc(size);
+ if (mp_stat1 == NULL) {
sql_result_error_nomem(context);
return;
}
- char *z = z_ret;
- z = mp_encode_array(z, p->nKeyCol + 1);
- z = mp_encode_uint(z, p->nRow);
- for (i = 0; i < p->nKeyCol; ++i) {
+ char *data = mp_stat1;
+ data = mp_encode_array(data, p->nKeyCol + 1);
+ data = mp_encode_uint(data, p->nRow);
+ for (int i = 0; i < p->nKeyCol; ++i) {
uint64_t dist_count = p->current.anDLt[i] + 1;
uint64_t val = (p->nRow + dist_count - 1) / dist_count;
- z = mp_encode_uint(z, val);
- assert(p->current.anEq[i]);
+ data = mp_encode_uint(data, val);
+ assert(p->current.anEq[i] > 0);
}
- const char *b = z_ret;
- assert(mp_check(&b, z) == 0);
- (void) b;
- assert(z_ret != z);
+ assert(data == size + mp_stat1);
- sql_result_blob(context, z_ret, size, sql_free);
+ sql_result_blob(context, mp_stat1, size, free);
context->pOut->flags|= MEM_Subtype;
context->pOut->subtype = SQL_SUBTYPE_MSGPACK;
} else if (eCall == STAT_GET_KEY) {
@@ -720,35 +715,25 @@ statGet(sql_context * context, int argc, sql_value ** argv)
p->iGet++;
break;
}
- }
-
- int i;
-
- size_t size = mp_sizeof_array(p->nCol);
- for (i = 0; i < p->nCol; ++i) {
- size += mp_sizeof_uint(aCnt[i]);
- }
-
- char *z_ret = sqlMallocZero(size);
- if (z_ret == 0) {
- sql_result_error_nomem(context);
- } else {
- char *z = z_ret;
- z = mp_encode_array(z, p->nCol);
- for (i = 0; i < p->nCol; i++) {
- z = mp_encode_uint(z, aCnt[i]);
}
- const char *b = z_ret;
- assert(mp_check(&b, z) == 0);
- (void) b;
- assert(z_ret != z);
+ size_t size = mp_sizeof_array(p->nCol);
+ for (int i = 0; i < p->nCol; ++i)
+ size += mp_sizeof_uint(aCnt[i]);
- sql_result_blob(context, z_ret, size, sql_free);
+ char *mp_stat4 = malloc(size);
+ if (mp_stat4 == NULL) {
+ sql_result_error_nomem(context);
+ return;
+ }
+ char *data = mp_stat4;
+ data = mp_encode_array(data, p->nCol);
+ for (int i = 0; i < p->nCol; i++)
+ data = mp_encode_uint(data, aCnt[i]);
+ assert(data == mp_stat4 + size);
+ sql_result_blob(context, mp_stat4, size, free);
context->pOut->flags|= MEM_Subtype;
context->pOut->subtype = SQL_SUBTYPE_MSGPACK;
}
-
-}
#ifndef SQL_DEBUG
UNUSED_PARAMETER(argc);
#endif
@@ -1188,13 +1173,20 @@ struct analysis_index_info {
uint32_t index_count;
};
-#define KW_UNORDERED 0x01
-#define KW_NOSKIPSCAN 0x02
/**
- * The first argument points to a msg_pack array
+ * Tokens which may present at array containing statistics.
+ * They can turn on/off certain optimizations and help query
+ * planner. To be extended.
+ */
+enum {
+ UNORDERED_HINT_TK = 0x1,
+ NOSKIPSCAN_HINT_TK = 0x2,
+};
+
+/**
+ * The first argument points to a msgpack array
* containing a list of integers. Load the first
* stat_size of these into the output arrays.
- * keywords_info needed for keywords encoding/decoding.
*
* @param stat_array MP_ARRAY containing array of integers.
* @param stat_size Size of input array (not counting the keywords).
@@ -1204,12 +1196,24 @@ struct analysis_index_info {
*/
static void
decode_stat_array(const char *stat_array, int stat_size, tRowcnt *stat_exact,
- LogEst *stat_log, uint8_t keywords_info) {
+ LogEst *stat_log, uint8_t *keywords_mask)
+{
const char *z = stat_array;
- uint32_t array_size = mp_decode_array(&z);
- if (z == NULL)
+ if (z == NULL || mp_typeof(*z) != MP_ARRAY)
+ return;
+ int array_size = mp_decode_array(&z);
+ /*
+ * Number of entries in array should be greater or equal
+ * to given size. If it greater, then it could contain
+ * hint tokens to enable/disable certain optimizations.
+ * But in case it is less, it is likely to be malformed
+ * data, so it makes no sense to continue processing.
+ */
+ if (array_size < stat_size)
return;
for (int i = 0; i < stat_size; i++) {
+ if (mp_typeof(*z) != MP_UINT)
+ return;
tRowcnt v = (tRowcnt) mp_decode_uint(&z);
if (stat_exact != NULL)
stat_exact[i] = v;
@@ -1218,20 +1222,18 @@ decode_stat_array(const char *stat_array, int stat_size, tRowcnt *stat_exact,
}
/* Keywords processing if needed. */
- if (keywords_info != 0) {
+ if (keywords_mask != NULL) {
+ *keywords_mask = 0;
uint32_t keyword_count = array_size - stat_size;
- if (keyword_count > 0) {
- while (keyword_count) {
- const char *sval;
- uint32_t sval_len;
- sval = mp_decode_str(&z, &sval_len);
- if (!sql_stricmp(sval, "unordered")) {
- keywords_info |= KW_UNORDERED;
- } else if (!sql_stricmp(sval, "noskipscan")) {
- keywords_info |= KW_NOSKIPSCAN;
- }
- keyword_count--;
- }
+ while (keyword_count-- > 0) {
+ uint32_t sval_len;
+ if (mp_typeof(*z) != MP_STR)
+ return;
+ const char *sval = mp_decode_str(&z, &sval_len);
+ if (strncmp(sval, "unordered", 9) == 0)
+ *keywords_mask |= UNORDERED_HINT_TK;
+ else if (strncmp(sval, "noskipscan", 10) == 0)
+ *keywords_mask |= NOSKIPSCAN_HINT_TK;
}
}
}
@@ -1307,19 +1309,11 @@ analysis_loader(void *data, int argc, char **argv, char **unused)
diag_set(OutOfMemory, stat1_size, "region", "tuple_log_est");
return -1;
}
- uint8_t keywords_info = 0;
+ uint8_t keywords_mask = 0;
decode_stat_array(argv[2], column_count, stat->tuple_stat1,
- stat->tuple_log_est, keywords_info);
-
- if ((keywords_info & KW_UNORDERED) == 0)
- stat->is_unordered = false;
- else
- stat->is_unordered = true;
- if ((keywords_info & KW_NOSKIPSCAN) == 0)
- stat->skip_scan_enabled = true;
- else
- stat->skip_scan_enabled = false;
-
+ stat->tuple_log_est, &keywords_mask);
+ stat->is_unordered = (keywords_mask & UNORDERED_HINT_TK);
+ stat->skip_scan_enabled = ! (keywords_mask & NOSKIPSCAN_HINT_TK);
return 0;
}
@@ -1524,11 +1518,11 @@ load_stat_from_space(struct sql *db, const char *sql_select_prepare,
&stat->samples[stats[current_idx_count].sample_count];
decode_stat_array((char *)sql_column_text(stmt, 2),
- column_count, sample->eq, 0, 0);
+ column_count, sample->eq, NULL, NULL);
decode_stat_array((char *)sql_column_text(stmt, 3),
- column_count, sample->lt, 0, 0);
+ column_count, sample->lt, NULL, NULL);
decode_stat_array((char *)sql_column_text(stmt, 4),
- column_count, sample->dlt, 0, 0);
+ column_count, sample->dlt, NULL, NULL);
/* Take a copy of the sample. */
sample->key_size = sql_column_bytes(stmt, 5);
sample->sample_key = region_alloc(&fiber()->gc,
diff --git a/test/box-py/bootstrap.result b/test/box-py/bootstrap.result
index b622a529d..009fa6488 100644
--- a/test/box-py/bootstrap.result
+++ b/test/box-py/bootstrap.result
@@ -4,7 +4,7 @@ box.internal.bootstrap()
box.space._schema:select{}
---
- - ['max_id', 511]
- - ['version', 2, 1, 1]
+ - ['version', 2, 1, 2]
...
box.space._cluster:select{}
> diff --git a/test/sql-tap/analyze1.test.lua b/test/sql-tap/analyze1.test.lua
> index cc1259314..e6d512f97 100755
> --- a/test/sql-tap/analyze1.test.lua
> +++ b/test/sql-tap/analyze1.test.lua
> @@ -1,6 +1,6 @@
> #!/usr/bin/env tarantool
> test = require("sqltester")
> -test:plan(38)
> +test:plan(39)
>
> test:do_execsql_test(
> - "analyze-4.1",
> + "analyze-4.1.1",
> [[
> DELETE FROM "_sql_stat1";
> - INSERT INTO "_sql_stat1" VALUES('t4', 't4i1', 'nonsense');
> - INSERT INTO "_sql_stat1" VALUES('t4', 't4i2', '432653287412874653284129847632');
> + ]], {
> + -- <analyze-4.1>
> + -- </analyze-4.1>
> + })
> +
> +_sql_stat1 = box.space[box.schema.SQL_STAT1_ID]
You can simply do this:
_sql_stat1 = box.space._sql_stat1
> +_sql_stat1:insert{'t4', 't4i1', {'nonsense'}}
> +_sql_stat1:insert{'t4', 't4i2', {432653287412874653284129847632}}
Firstly, it would be better if each test-related thing would
be placed in scope of test function. So, lets move these
insertions to test:do_test func (you can check out examples
in sql-tap/lua-tables.test.lua for instance).
Secondly, simple insertion to _sql_stat spaces doesn’t trigger
update of statistics in in-memory structs. In other words, these
tests check nothing (and without your patch as well). To make
these tests work, you should force statistics loading with instance
reload. I am not sure if this possible in tap suite, so probably you
should move these tests to sql/ suite.
> +function sum_table(t)
Please, provide brief comment to functions.
> + local res = 0
> + for k, v in pairs(t) do
> + res = res + v
If you don’t need one of params, just skip it:
for _, v in pairs(t) do
…
Also, you can use built-in facilities:
fun = require('fun')
fun.iter({1,2,3,4,5,6,7,8,9,10}):sum()
> + end
> + return res
> +end
> +
> +function get_tuples_where_order_by_limit(order_by, order, limit)
> + space = box.space[box.schema.SQL_STAT4_ID]
> + t = {}
> + for k, v in space:pairs() do
> + table.insert(t, v)
> + end
> +
> + local i
> + local count
Always initialise variables, it is best practice.
> + local where = {tbl = "T1", idx = 'I1'}
> + if where ~= 0 then
This is always true.
> + for k, v in pairs(where) do
> + i = 1
> + for key, tuple in pairs(t) do
> + tuple = t[i]
> + if tuple[k] ~= v then
> + t[i] = nil
> + else
> + count = i
> + end
> + i = i + 1
> + end
> + end
> + end
Can’t read this code. Please, comment it at least.
> +
> + local compare
> + if order == 'asc' then
> + compare = function(a, b)
> + if sum_table(a[order_by]) <= sum_table(b[order_by]) then
> + return true
> + end
> + end
> + else
> + compare = function(a, b)
> + if sum_table(a[order_by]) > sum_table(b[order_by]) then
> + return true
> + end
> + end
> + end
> +
> + table.sort(t, compare)
> +
> + if limit == nil then
> + limit = count
> + end
> +
> + local ret = ''
> + i = 1
> + while i <= limit do
> + if i == 1 then
> + ret = tostring(t[i])
> + else
> + ret = ret..' '..tostring(t[i])
> + end
> + i = i + 1
> + end
> + return ret
> +end
> +
> +_sql_stat4 = box.space[box.schema.SQL_STAT4_ID]
> +
> +box.internal.sql_create_function("get_tuples_where_order_by_limit", "TEXT", get_tuples_where_order_by_limit)
> +
> test:do_execsql_test(
> "analyze-6.1.3",
> [[
> - SELECT "tbl", "idx", "neq", "nlt", "ndlt" FROM "_sql_stat4" where "tbl"='T1' and "idx"='I1' ORDER BY "nlt" LIMIT 1;
> + SELECT get_tuples_where_order_by_limit('nlt', 'asc', 1);
In this particular test(s), you pass the same limit and order by
args. So I’d rather hardcode them to make function smaller and
simpler.
> ]], {
> -- <analyze-6.1.3>
> - "T1", "I1", "221 221 221 1", "0 0 0 10", "0 0 0 10"
> + "[\'T1\', \'I1\', [221, 221, 221, 1], [0, 0, 0, 10], [0, 0, 0, 10], !!binary lKF4oXmhego=]"
> -- </analyze-6.1.3>
> })
>
>
>
>
> @@ -184,12 +187,12 @@ test:do_execsql_test(
> ]], generate_tens(100))
>
> -- The first element in the "nEq" list of all samples should therefore be 10.
> ---
> +
Extra empty line.
> test:do_execsql_test(
> "3.3.2",
> [[
> ANALYZE;
> - SELECT lrange("neq", 1, 1) FROM "_sql_stat4" WHERE "idx" = 'I2';
> + SELECT lrange(msgpack_decode_sample("neq"), 1, 1) FROM "_sql_stat4" WHERE "idx" = 'I2';
> ]], generate_tens_str(24))
>
> ---------------------------------------------------------------------------
> @@ -283,31 +286,100 @@ test:do_execsql_test(
> -- </4.2>
> })
>
> +box.internal.sql_create_function("get_tuples_where_order_by_limit", "TEXT", get_tuples_where_order_by_limit)
> +
> test:do_execsql_test(
> 4.3,
> [[
> - SELECT "neq", lrange("nlt", 1, 3), lrange("ndlt", 1, 3), lrange(msgpack_decode_sample("sample"), 1, 3)
> - FROM "_sql_stat4" WHERE "idx" = 'I1' ORDER BY "sample" LIMIT 16;
> + SELECT get_tuples_where_order_by_limit('sample', 'asc', 16);
> ]], {
> -- <4.3>
> - "10 10 10","0 0 0","0 0 0","0 0 0","10 10 10","10 10 10","1 1 1","1 1 1","10 10 10","20 20 20",
> - "2 2 2","2 2 2","10 10 10","30 30 30","3 3 3","3 3 3","10 10 10","40 40 40","4 4 4","4 4 4",
> - "10 10 10","50 50 50","5 5 5","5 5 5","10 10 10","60 60 60","6 6 6","6 6 6","10 10 10","70 70 70",
> - "7 7 7","7 7 7","10 10 10","80 80 80","8 8 8","8 8 8","10 10 10","90 90 90","9 9 9","9 9 9",
> - "10 10 10","100 100 100","10 10 10","10 10 10","10 10 10","110 110 110","11 11 11","11 11 11",
> - "10 10 10","120 120 120","12 12 12","12 12 12","10 10 10","130 130 130","13 13 13","13 13 13",
> - "10 10 10","140 140 140","14 14 14","14 14 14","10 10 10","150 150 150","15 15 15","15 15 15"
> + "[\'T1\', \'I1\', [10, 10, 10], [0, 0, 0], [0, 0, 0], !!binary kwAAoTA=], “..
Firstly, it would be nice to see decoded sample; secondly - throw away index/table.
It would make diff look clearer.
> + "[\'T1\', \'I1', [10, 10, 10], [10, 10, 10], [1, 1, 1], !!binary kwEBoTE=], "..
> + "[\'T1\', \'I1\', [10, 10, 10], [20, 20, 20], [2, 2, 2], !!binary kwICoTI=], "..
> + "[\'T1\', \'I1\', [10, 10, 10], [30, 30, 30], [3, 3, 3], !!binary kwMDoTM=], "..
> + "[\'T1\', \'I1\', [10, 10, 10], [40, 40, 40], [4, 4, 4], !!binary kwQEoTQ=], "..
> + "[\'T1\', \'I1\', [10, 10, 10], [50, 50, 50], [5, 5, 5], !!binary kwUFoTU=], "..
> + "[\'T1\', \'I1\', [10, 10, 10], [60, 60, 60], [6, 6, 6], !!binary kwYGoTY=], "..
> + "[\'T1\', \'I1\', [10, 10, 10], [70, 70, 70], [7, 7, 7], !!binary kwcHoTc=], "..
> + "[\'T1\', \'I1\', [10, 10, 10], [80, 80, 80], [8, 8, 8], !!binary kwgIoTg=], "..
> + "[\'T1\', \'I1\', [10, 10, 10], [90, 90, 90], [9, 9, 9], !!binary kwkJoTk=], "..
> + "[\'T1\', \'I1\', [10, 10, 10], [100, 100, 100], [10, 10, 10], !!binary kwoKojEw], "..
> + "[\'T1\', \'I1\', [10, 10, 10], [110, 110, 110], [11, 11, 11], !!binary kwsLojEx], "..
> + "[\'T1\', \'I1\', [10, 10, 10], [120, 120, 120], [12, 12, 12], !!binary kwwMojEy], "..
> + "[\'T1\', \'I1\', [10, 10, 10], [130, 130, 130], [13, 13, 13], !!binary kw0NojEz], "..
> + "[\'T1\', \'I1\', [10, 10, 10], [140, 140, 140], [14, 14, 14], !!binary kw4OojE0], "..
> + "[\'T1\', \'I1\', [10, 10, 10], [150, 150, 150], [15, 15, 15], !!binary kw8PojE1]"
> -- </4.3>
> })
>
> test:do_execsql_test(
> 4.4,
> [[
> - SELECT "neq", lrange("nlt", 1, 3), lrange("ndlt", 1, 3), lrange(msgpack_decode_sample("sample"), 1, 3)
> - FROM "_sql_stat4" WHERE "idx" = 'I1' ORDER BY "sample" DESC LIMIT 2;
> + SELECT get_tuples_where_order_by_limit('sample', 'desc', 2);
> ]], {
> -- <4.4>
> - "2 1 1","295 296 296","120 122 125","201 4 h","5 3 1","290 290 291","119 119 120","200 1 b"
> + "['T1', 'I1', [2, 1, 1], [295, 296, 296], [120, 122, 125], !!binary k8zJBKFo], "..
> + "['T1', 'I1', [5, 3, 1], [290, 290, 291], [119, 119, 120], !!binary k8zIAaFi]"
> -- </4.4>
> })
>
> @@ -391,16 +463,41 @@ test:do_execsql_test(
> INSERT INTO t1 VALUES(null, 4, 4);
> INSERT INTO t1 VALUES(null, 5, 5);
> ANALYZE;
> - CREATE TABLE x1(tbl TEXT, idx TEXT , neq TEXT, nlt TEXT, ndlt TEXT, sample BLOB, PRIMARY KEY(tbl, idx, sample));
> - INSERT INTO x1 SELECT * FROM "_sql_stat4";
> - DELETE FROM "_sql_stat4";
> - INSERT INTO "_sql_stat4" SELECT * FROM x1;
> - ANALYZE;
> ]])
>
> +function copy_tuples(from, to)
> + for i,t in from:pairs() do
> + to:insert(t)
> + end
> +end
> +
> +function prepare_to_6_2()
> + local format = {}
> + format[1] = {name='tbl', type='string'}
> + format[2] = {name='idx', type='string'}
> + format[3] = {name='neq', type='array'}
> + format[4] = {name='nlt', type='array'}
> + format[5] = {name='ndlt', type='array'}
> + format[6] = {name='sample', type='scalar'}
> + x1 = box.schema.space.create("x1", {engine = 'memtx', format = format, field_count = 0})
> + x1:create_index('primary', {parts = {1, 'string', 2, 'string', 6, 'scalar'}})
> + copy_tuples(_sql_stat4, x1)
> +end
> +
> +prepare_to_6_2()
Wrap this func in test-func call.
> +
> test:do_execsql_test(
> 6.2,
> [[
> + DELETE FROM "_sql_stat4";
> + ]])
> +
> +copy_tuples(x1, _sql_stat4)
> +
> +test:do_execsql_test(
> + 6.3,
> + [[
> + ANALYZE;
> SELECT * FROM t1 WHERE a = 'abc';
> ]])
>
> @@ -459,10 +556,19 @@ test:do_execsql_test(
> -- -- </7.2>
> -- })
>
> +function update_stat_fields(stat, field_num, val)
Why do you need this wrapper at all?
> + for i,t in stat:pairs() do
> + t = t:transform(3, 3)
> + print(t)
Debug print.
> + stat:update(t, {{'=', field_num, val}})
> + end
> +end
> +
> +update_stat_fields(_sql_stat4, 3, {0, 0, 0})
Wrap this func in test-func call.
> +
> test:do_execsql_test(
> 7.3,
> [[
> - UPDATE "_sql_stat4" SET "neq" = '0 0 0';
> ANALYZE;
> SELECT * FROM t1 WHERE a = 1;
> ]], {
> @@ -471,11 +577,12 @@ test:do_execsql_test(
> -- </7.3>
> })
>
> +box.sql.execute('ANALYZE’)
Again you break philosophy of tap suite:
wrap this analyze statement into one of
testing funcs.
>
> -- This is just for coverage....
> -test:do_execsql_test(
> - 15.11,
> - [[
> - ANALYZE;
> - UPDATE "_sql_stat1" SET "stat" = "stat" || ' unordered';
> - ]])
> +box.sql.execute('ANALYZE')
> +update_stat_fields(_sql_stat1, 3, {'unordered’})
Firstly, ‘unordered’ token should be the only member
of array, it should be placed after original statistics.
Secondly, without instance reloading all these insertions
to stat space make no sense.
> diff --git a/test/sql-tap/gh-3350-skip-scan.test.lua b/test/sql-tap/gh-3350-skip-scan.test.lua
> index 4cecfe081..eec09a546 100755
> --- a/test/sql-tap/gh-3350-skip-scan.test.lua
> +++ b/test/sql-tap/gh-3350-skip-scan.test.lua
> @@ -3,7 +3,7 @@
> -- gh-3350, gh-2859
>
> +local _sql_stat1 = box.space[box.schema.SQL_STAT1_ID]
> +_sql_stat1:insert{'T1','T1ABC', {10000,5000,2000,10}}
> +
> +test:do_execsql_test(
> + "skip-scan-1.4.2",
> + [[
> ANALYZE t2;
> SELECT a,b,c,d FROM t1 WHERE b=345;
> ]], {
> @@ -104,5 +113,4 @@ test:do_execsql_test(
> }
> )
>
> -
Extra diff.
> test:finish_test()
More information about the Tarantool-patches
mailing list