From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from [87.239.111.99] (localhost [127.0.0.1]) by dev.tarantool.org (Postfix) with ESMTP id ECE5B6ECE3; Wed, 3 Nov 2021 18:01:16 +0300 (MSK) DKIM-Filter: OpenDKIM Filter v2.11.0 dev.tarantool.org ECE5B6ECE3 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=tarantool.org; s=dev; t=1635951677; bh=n3IBO0V6Vj8AQE2cv+jAqrliFcFTE/wPx8EuBJ6LHII=; h=Date:To:References:In-Reply-To:Subject:List-Id:List-Unsubscribe: List-Archive:List-Post:List-Help:List-Subscribe:From:Reply-To: From; b=X1lbyVs8/J7T97fcVslQ8y07xvu7RwmbPG25gmBWGuoOeIUCuL/9UIufjsWYTVx73 B/n5IMtI0fdRvE30E40TpfiZE0hdhCq59c/iRyKiocM/0w1cdqaaPDI+lCSqDIJb5C NZ68xT/KL7Src4/SGXZit0YNbEd6eXRZVbhftFDs= Received: from smtp35.i.mail.ru (smtp35.i.mail.ru [94.100.177.95]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by dev.tarantool.org (Postfix) with ESMTPS id 086586ECE3 for ; Wed, 3 Nov 2021 18:01:15 +0300 (MSK) DKIM-Filter: OpenDKIM Filter v2.11.0 dev.tarantool.org 086586ECE3 Received: by smtp35.i.mail.ru with esmtpa (envelope-from ) id 1miHlL-0001XA-6A; Wed, 03 Nov 2021 18:01:15 +0300 Message-ID: <5ae7bc55-485c-d526-9c84-3cbba13b6254@tarantool.org> Date: Wed, 3 Nov 2021 18:01:14 +0300 MIME-Version: 1.0 User-Agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:91.0) Gecko/20100101 Thunderbird/91.2.1 Content-Language: ru To: =?UTF-8?B?0K/QvSDQqNGC0YPQvdC00LXRgA==?= , tml References: <20211025095223.22521-1-ya.shtunder@gmail.com> <665ef444-80de-4848-f9f0-a3ccc6e7c059@tarantool.org> In-Reply-To: Content-Type: text/plain; charset=UTF-8; format=flowed Content-Transfer-Encoding: 8bit X-4EC0790: 10 X-7564579A: EEAE043A70213CC8 X-77F55803: 4F1203BC0FB41BD9F9D976862A30D4FE73DAF5629B89E81F7BE3C6ABECA89A3F182A05F53808504058582F32BE71165F5E316FF97F5E5C7EE3F57ADE4FACB39615EEBC11F2115900 X-7FA49CB5: FF5795518A3D127A4AD6D5ED66289B5278DA827A17800CE785DAC9A352760686C2099A533E45F2D0395957E7521B51C2CFCAF695D4D8E9FCEA1F7E6F0F101C6778DA827A17800CE7F4E79F226F99D4DDEA1F7E6F0F101C6723150C8DA25C47586E58E00D9D99D84E1BDDB23E98D2D38BBCA57AF85F7723F2247268434517CB4FCF34CAA27A453F98CC7F00164DA146DAFE8445B8C89999728AA50765F7900637F6B57BC7E64490618DEB871D839B7333395957E7521B51C2DFABB839C843B9C08941B15DA834481F8AA50765F7900637F6B57BC7E6449061A352F6E88A58FB86F5D81C698A659EA73AA81AA40904B5D9A18204E546F3947C8D6D1DB188AF51012D242C3BD2E3F4C64AD6D5ED66289B52698AB9A7B718F8C46E0066C2D8992A16725E5C173C3A84C34B6590F86FB8E8FBBA3038C0950A5D36B5C8C57E37DE458B0BC6067A898B09E46D1867E19FE14079C09775C1D3CA48CF3D321E7403792E342EB15956EA79C166A417C69337E82CC275ECD9A6C639B01B78DA827A17800CE778B471BB9634AD8A731C566533BA786AA5CC5B56E945C8DA X-C1DE0DAB: 0D63561A33F958A59E8610167D96ED6B5EF251062A07593E0BABE8A4F1AAD732D59269BC5F550898D99A6476B3ADF6B47008B74DF8BB9EF7333BD3B22AA88B938A852937E12ACA75C69C5C0DDE134364410CA545F18667F91A7EA1CDA0B5A7A0 X-C8649E89: 4E36BF7865823D7055A7F0CF078B5EC49A30900B95165D34CC972AA0E2E780C40156CC8E1EAE5054FDE646FBDCAD5C14F0C5E44231AB862A753F0028F819B2D11D7E09C32AA3244C6CDE68DB5C59262140D40CBFF64B9DB3A90944CA99CF22E3729B2BEF169E0186 X-D57D3AED: 3ZO7eAau8CL7WIMRKs4sN3D3tLDjz0dLbV79QFUyzQ2Ujvy7cMT6pYYqY16iZVKkSc3dCLJ7zSJH7+u4VD18S7Vl4ZUrpaVfd2+vE6kuoey4m4VkSEu530nj6fImhcD4MUrOEAnl0W826KZ9Q+tr5ycPtXkTV4k65bRjmOUUP8cvGozZ33TWg5HZplvhhXbhDGzqmQDTd6OAevLeAnq3Ra9uf7zvY2zzsIhlcp/Y7m53TZgf2aB4JOg4gkr2bioj3JiVFN03mSUOETcUupa/fA== X-Mailru-Sender: 11C2EC085EDE56FA38FD4C59F7EFE407122F548EF41130C4841A3C075D62A35D8ADE62837DA509346BB2E709EA627F343C7DDD459B58856F0E45BC603594F5A135B915D4279FF0579437F6177E88F7363CDA0F3B3F5B9367 X-Mras: Ok Subject: Re: [Tarantool-patches] [PATCH v3] replication: removing anonymous replicas from synchro quorum X-BeenThere: tarantool-patches@dev.tarantool.org X-Mailman-Version: 2.1.34 Precedence: list List-Id: Tarantool development patches List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , From: Serge Petrenko via Tarantool-patches Reply-To: Serge Petrenko Errors-To: tarantool-patches-bounces@dev.tarantool.org Sender: "Tarantool-patches" 03.11.2021 13:05, Ян Штундер пишет: > Thanks for the comments! >  I corrected your remarks. Thanks for the changes! LGTM. > > Diff: > > +++ b/test/replication-luatest/gh_5418_qsync_with_anon_test.lua > @@ -0,0 +1,62 @@ > +local t = require('luatest') > +local cluster = require('test.luatest_helpers.cluster') > +local helpers = require('test.luatest_helpers') > + > +local g = t.group('gh-5418', {{engine = 'memtx'}, {engine = 'vinyl'}}) > + > +g.before_each(function(cg) > +    local engine = cg.params.engine > + > +    cg.cluster = cluster:new({}) > + > +    local box_cfg = { > +        replication         = { > +            helpers.instance_uri('master') > +        }, > +        replication_synchro_quorum = 2, > +        replication_timeout = 1 > +    } > + > +    cg.master = cg.cluster:build_server({alias = 'master', engine = > engine, box_cfg = box_cfg}) > + > +    local box_cfg = { > +        replication         = { > +            helpers.instance_uri('master'), > +            helpers.instance_uri('replica') > +        }, > +        replication_timeout = 1, > +        replication_connect_timeout = 4, > +        read_only           = true, > +        replication_anon    = true > +    } > + > +    cg.replica = cg.cluster:build_server({alias = 'replica', engine = > engine, box_cfg = box_cfg}) > + > +    cg.cluster:add_server(cg.master) > +    cg.cluster:add_server(cg.replica) > +    cg.cluster:start() > +end) > + > + > +g.after_each(function(cg) > +    cg.cluster.servers = nil > +    cg.cluster:drop() > +end) > + > + > +g.test_qsync_with_anon = function(cg) > +    cg.master:eval("box.schema.space.create('sync', {is_sync = true})") > +    cg.master:eval("box.space.sync:create_index('pk')") > +    cg.master:eval("box.ctl.promote()") > + > +    t.assert_error_msg_content_equals("Quorum collection for a > synchronous transaction is timed out", > +        function() cg.master:eval("return box.space.sync:insert{1}") end) > + > +    -- Wait until everything is replicated from the master to the replica > +    local vclock = cg.master:eval("return box.info.vclock") > +    vclock[0] = nil > +    helpers:wait_vclock(cg.replica, vclock) > + > +    t.assert_equals(cg.master:eval("return box.space.sync:select()"), {}) > +    t.assert_equals(cg.replica:eval("return > box.space.sync:select()"), {}) > +end > diff --git a/test/replication/qsync_with_anon.result > b/test/replication/qsync_with_anon.result > deleted file mode 100644 > index 99c6fb902..000000000 > --- a/test/replication/qsync_with_anon.result > +++ /dev/null > @@ -1,231 +0,0 @@ > --- test-run result file version 2 > -env = require('test_run') > - | --- > - | ... > -test_run = env.new() > - | --- > - | ... > -engine = test_run:get_cfg('engine') > - | --- > - | ... > - > -orig_synchro_quorum = box.cfg.replication_synchro_quorum > - | --- > - | ... > -orig_synchro_timeout = box.cfg.replication_synchro_timeout > - | --- > - | ... > - > -NUM_INSTANCES = 2 > - | --- > - | ... > -BROKEN_QUORUM = NUM_INSTANCES + 1 > - | --- > - | ... > - > -box.schema.user.grant('guest', 'replication') > - | --- > - | ... > - > --- Setup a cluster with anonymous replica. > -test_run:cmd('create server replica_anon with rpl_master=default, > script="replication/anon1.lua"') > - | --- > - | - true > - | ... > -test_run:cmd('start server replica_anon') > - | --- > - | - true > - | ... > -test_run:cmd('switch replica_anon') > - | --- > - | - true > - | ... > - > --- [RFC, Asynchronous replication] successful transaction applied on > async > --- replica. > --- Testcase setup. > -test_run:switch('default') > - | --- > - | - true > - | ... > -box.cfg{replication_synchro_quorum=NUM_INSTANCES, > replication_synchro_timeout=1000} > - | --- > - | ... > -_ = box.schema.space.create('sync', {is_sync=true, engine=engine}) > - | --- > - | ... > -_ = box.space.sync:create_index('pk') > - | --- > - | ... > -box.ctl.promote() > - | --- > - | ... > --- Testcase body. > -test_run:switch('default') > - | --- > - | - true > - | ... > -box.space.sync:insert{1} -- success > - | --- > - | - [1] > - | ... > -box.space.sync:insert{2} -- success > - | --- > - | - [2] > - | ... > -box.space.sync:insert{3} -- success > - | --- > - | - [3] > - | ... > -test_run:cmd('switch replica_anon') > - | --- > - | - true > - | ... > -box.space.sync:select{} -- 1, 2, 3 > - | --- > - | - - [1] > - |   - [2] > - |   - [3] > - | ... > --- Testcase cleanup. > -test_run:switch('default') > - | --- > - | - true > - | ... > -box.space.sync:drop() > - | --- > - | ... > - > --- [RFC, Asynchronous replication] failed transaction rolled back on > async > --- replica. > --- Testcase setup. > -box.cfg{replication_synchro_quorum = NUM_INSTANCES, > replication_synchro_timeout = 1000} > - | --- > - | ... > -_ = box.schema.space.create('sync', {is_sync=true, engine=engine}) > - | --- > - | ... > -_ = box.space.sync:create_index('pk') > - | --- > - | ... > --- Write something to flush the current master's state to replica. > -_ = box.space.sync:insert{1} > - | --- > - | ... > -_ = box.space.sync:delete{1} > - | --- > - | ... > - > -box.cfg{replication_synchro_quorum = BROKEN_QUORUM, > replication_synchro_timeout = 1000} > - | --- > - | ... > -fiber = require('fiber') > - | --- > - | ... > -ok, err = nil > - | --- > - | ... > -f = fiber.create(function()                 \ > -    ok, err = pcall(box.space.sync.insert, box.space.sync, {1})       >           \ > -end) > - | --- > - | ... > - > -test_run:cmd('switch replica_anon') > - | --- > - | - true > - | ... > -test_run:wait_cond(function() return box.space.sync:count() == 1 end) > - | --- > - | - true > - | ... > -box.space.sync:select{} > - | --- > - | - - [1] > - | ... > - > -test_run:switch('default') > - | --- > - | - true > - | ... > -box.cfg{replication_synchro_timeout = 0.001} > - | --- > - | ... > -test_run:wait_cond(function() return f:status() == 'dead' end) > - | --- > - | - true > - | ... > -box.space.sync:select{} > - | --- > - | - [] > - | ... > - > -test_run:cmd('switch replica_anon') > - | --- > - | - true > - | ... > -test_run:wait_cond(function() return box.space.sync:count() == 0 end) > - | --- > - | - true > - | ... > -box.space.sync:select{} > - | --- > - | - [] > - | ... > - > -test_run:switch('default') > - | --- > - | - true > - | ... > -box.cfg{replication_synchro_quorum=NUM_INSTANCES, > replication_synchro_timeout=1000} > - | --- > - | ... > -box.space.sync:insert{1} -- success > - | --- > - | - [1] > - | ... > -test_run:cmd('switch replica_anon') > - | --- > - | - true > - | ... > -box.space.sync:select{} -- 1 > - | --- > - | - - [1] > - | ... > --- Testcase cleanup. > -test_run:switch('default') > - | --- > - | - true > - | ... > -box.space.sync:drop() > - | --- > - | ... > - > --- Teardown. > -test_run:switch('default') > - | --- > - | - true > - | ... > -test_run:cmd('stop server replica_anon') > - | --- > - | - true > - | ... > -test_run:cmd('delete server replica_anon') > - | --- > - | - true > - | ... > -box.schema.user.revoke('guest', 'replication') > - | --- > - | ... > -box.cfg{                  \ > -    replication_synchro_quorum = orig_synchro_quorum,                 \ > -    replication_synchro_timeout = orig_synchro_timeout,                 \ > -} > - | --- > - | ... > -box.ctl.demote() > - | --- > - | ... > -test_run:cleanup_cluster() > - | --- > - | ... > diff --git a/test/replication/qsync_with_anon.test.lua > b/test/replication/qsync_with_anon.test.lua > deleted file mode 100644 > index e73880ec7..000000000 > --- a/test/replication/qsync_with_anon.test.lua > +++ /dev/null > @@ -1,86 +0,0 @@ > -env = require('test_run') > -test_run = env.new() > -engine = test_run:get_cfg('engine') > - > -orig_synchro_quorum = box.cfg.replication_synchro_quorum > -orig_synchro_timeout = box.cfg.replication_synchro_timeout > - > -NUM_INSTANCES = 2 > -BROKEN_QUORUM = NUM_INSTANCES + 1 > - > -box.schema.user.grant('guest', 'replication') > - > --- Setup a cluster with anonymous replica. > -test_run:cmd('create server replica_anon with rpl_master=default, > script="replication/anon1.lua"') > -test_run:cmd('start server replica_anon') > -test_run:cmd('switch replica_anon') > - > --- [RFC, Asynchronous replication] successful transaction applied on > async > --- replica. > --- Testcase setup. > -test_run:switch('default') > -box.cfg{replication_synchro_quorum=NUM_INSTANCES, > replication_synchro_timeout=1000} > -_ = box.schema.space.create('sync', {is_sync=true, engine=engine}) > -_ = box.space.sync:create_index('pk') > -box.ctl.promote() > --- Testcase body. > -test_run:switch('default') > -box.space.sync:insert{1} -- success > -box.space.sync:insert{2} -- success > -box.space.sync:insert{3} -- success > -test_run:cmd('switch replica_anon') > -box.space.sync:select{} -- 1, 2, 3 > --- Testcase cleanup. > -test_run:switch('default') > -box.space.sync:drop() > - > --- [RFC, Asynchronous replication] failed transaction rolled back on > async > --- replica. > --- Testcase setup. > -box.cfg{replication_synchro_quorum = NUM_INSTANCES, > replication_synchro_timeout = 1000} > -_ = box.schema.space.create('sync', {is_sync=true, engine=engine}) > -_ = box.space.sync:create_index('pk') > --- Write something to flush the current master's state to replica. > -_ = box.space.sync:insert{1} > -_ = box.space.sync:delete{1} > - > -box.cfg{replication_synchro_quorum = BROKEN_QUORUM, > replication_synchro_timeout = 1000} > -fiber = require('fiber') > -ok, err = nil > -f = fiber.create(function()                 \ > -    ok, err = pcall(box.space.sync.insert, box.space.sync, {1})       >           \ > -end) > - > -test_run:cmd('switch replica_anon') > -test_run:wait_cond(function() return box.space.sync:count() == 1 end) > -box.space.sync:select{} > - > -test_run:switch('default') > -box.cfg{replication_synchro_timeout = 0.001} > -test_run:wait_cond(function() return f:status() == 'dead' end) > -box.space.sync:select{} > - > -test_run:cmd('switch replica_anon') > -test_run:wait_cond(function() return box.space.sync:count() == 0 end) > -box.space.sync:select{} > - > -test_run:switch('default') > -box.cfg{replication_synchro_quorum=NUM_INSTANCES, > replication_synchro_timeout=1000} > -box.space.sync:insert{1} -- success > -test_run:cmd('switch replica_anon') > -box.space.sync:select{} -- 1 > --- Testcase cleanup. > -test_run:switch('default') > -box.space.sync:drop() > - > --- Teardown. > -test_run:switch('default') > -test_run:cmd('stop server replica_anon') > -test_run:cmd('delete server replica_anon') > -box.schema.user.revoke('guest', 'replication') > -box.cfg{                  \ > -    replication_synchro_quorum = orig_synchro_quorum,                 \ > -    replication_synchro_timeout = orig_synchro_timeout,                 \ > -} > -box.ctl.demote() > -test_run:cleanup_cluster() > > -- > Yan Shtunder > > пт, 29 окт. 2021 г. в 11:06, Serge Petrenko : > > > > 28.10.2021 18:56, Ян Штундер пишет: > > Hi! Thank you for the review! > > I have fixed the errors > > > >     Nit: better say "Transactions should be committed". > >     reaches -> reach. > > > > > > Transactions should be committed after they reach quorum of "real" > > cluster members. > > > >     Please, find a more informative test name. > >     For example, "gh_5418_qsync_with_anon_test.lua* > > > > > > gh_5418_test.lua -> gh_5418_qsync_with_anon_test.lua > > > >     Please, use `t.helpers.retrying()` here. > > > > > >  I used the wait_vclock function from the luatest_helpers.lua file > > > > -- > > Yan Shtunder > > Good job on the fixes! > LGTM. > > > > > > пн, 25 окт. 2021 г. в 16:32, Serge Petrenko > : > > > > > > > >     25.10.2021 12:52, Yan Shtunder via Tarantool-patches пишет: > > > >     Hi! Good job on porting the test to the current luatest version! > >     Please, find a couple of comments below. > > > >     > Transactions have to committed after they reaches quorum > of "real" > > > >     Nit: better say "Transactions should be committed". > >     reaches -> reach. > > > >     > cluster members. Therefore, anonymous replicas don't have to > >     > participate in the quorum. > >     > > >     > Closes #5418 > >     > --- > >     > Issue: https://github.com/tarantool/tarantool/issues/5418 > >     > Patch: > > > https://github.com/tarantool/tarantool/tree/yshtunder/gh-5418-qsync-with-anon-replicas > >     > > >     >   src/box/relay.cc                          |  3 +- > >     >   test/replication-luatest/gh_5418_test.lua | 82 > >     +++++++++++++++++++++++ > >     >   2 files changed, 84 insertions(+), 1 deletion(-) > >     >   create mode 100644 test/replication-luatest/gh_5418_test.lua > >     > > >     > diff --git a/src/box/relay.cc b/src/box/relay.cc > >     > index f5852df7b..cf569e8e2 100644 > >     > --- a/src/box/relay.cc > >     > +++ b/src/box/relay.cc > >     > @@ -543,6 +543,7 @@ tx_status_update(struct cmsg *msg) > >     >       struct replication_ack ack; > >     >       ack.source = status->relay->replica->id; > >     >       ack.vclock = &status->vclock; > >     > +     bool anon = status->relay->replica->anon; > >     >       /* > >     >        * Let pending synchronous transactions know, which of > >     >        * them were successfully sent to the replica. Acks are > >     > @@ -550,7 +551,7 @@ tx_status_update(struct cmsg *msg) > >     >        * the single master in 100% so far). Other > instances wait > >     >        * for master's CONFIRM message instead. > >     >        */ > >     > -     if (txn_limbo.owner_id == instance_id) { > >     > +     if (txn_limbo.owner_id == instance_id && !anon) { > >     >               txn_limbo_ack(&txn_limbo, ack.source, > >     >  vclock_get(ack.vclock, instance_id)); > >     >       } > > > >     I can't build your patch to test it manually, compilation > fails with > >     some ERRINJ-related errors. > > > >     Seems like the commit "replication: fill > replicaset.applier.vclock > >     after > >     local recovery" > >     you have on the branch is extraneous. And it causes the error. > > > >     Please remove it. > > > >     > diff --git a/test/replication-luatest/gh_5418_test.lua > >     b/test/replication-luatest/gh_5418_test.lua > >     > new file mode 100644 > >     > index 000000000..265d28ccb > >     > --- /dev/null > >     > +++ b/test/replication-luatest/gh_5418_test.lua > > > >     Please, find a more informative test name. > >     For example, "gh_5418_qsync_with_anon_test.lua* > > > >     > @@ -0,0 +1,82 @@ > >     > +local fio = require('fio') > >     > +local log = require('log') > >     > +local fiber = require('fiber') > >     > +local t = require('luatest') > >     > +local cluster = require('test.luatest_helpers.cluster') > >     > +local helpers = require('test.luatest_helpers.helpers') > >     > + > >     > +local g = t.group('gh-5418') > >     > + > >     > +g.before_test('test_qsync_with_anon', function() > >     > +    g.cluster = cluster:new({}) > >     > + > >     > +    local box_cfg = { > >     > +        replication         = > {helpers.instance_uri('master')}, > >     > +        replication_synchro_quorum = 2, > >     > +        replication_timeout = 0.1 > >     > +    } > >     > + > >     > +    g.master = g.cluster:build_server({alias = 'master'}, > >     engine, box_cfg) > >     > + > >     > +    local box_cfg = { > >     > +        replication         = { > >     > +            helpers.instance_uri('master'), > >     > +            helpers.instance_uri('replica') > >     > +        }, > >     > +        replication_timeout = 0.1, > >     > +        replication_connect_timeout = 0.5, > >     > +        read_only           = true, > >     > +        replication_anon    = true > >     > +    } > >     > + > >     > +    g.replica = g.cluster:build_server({alias = 'replica'}, > >     engine, box_cfg) > >     > + > >     > +    g.cluster:join_server(g.master) > >     > +    g.cluster:join_server(g.replica) > >     > +    g.cluster:start() > >     > + log.info ('Everything > is started') > >     > +end) > >     > + > >     > +g.after_test('test_qsync_with_anon', function() > >     > +    g.cluster:stop() > >     > +    fio.rmtree(g.master.workdir) > >     > +    fio.rmtree(g.replica.workdir) > >     > +end) > >     > + > >     > +local function wait_vclock(timeout) > >     > +    local started_at = fiber.clock() > >     > +    local lsn = g.master:eval("return box.info.vclock[1]") > >     > + > >     > +    local _, tbl = g.master:eval("return > >     next(box.info.replication_anon())") > >     > +    local to_lsn = tbl.downstream.vclock[1] > >     > + > >     > +    while to_lsn == nil or to_lsn < lsn do > >     > +        fiber.sleep(0.001) > >     > + > >     > +        if (fiber.clock() - started_at) > timeout then > >     > +            return false > >     > +        end > >     > + > >     > +        _, tbl = g.master:eval("return > >     next(box.info.replication_anon())") > >     > +        to_lsn = tbl.downstream.vclock[1] > >     > + > >     > + log.info > (string.format("master lsn: %d; > >     replica_anon lsn: %d", > >     > +            lsn, to_lsn)) > >     > +    end > >     > + > >     > +    return true > >     > +end > >     > + > >     > +g.test_qsync_with_anon = function() > >     > + g.master:eval("box.schema.space.create('sync', {is_sync = > >     true})") > >     > + g.master:eval("box.space.sync:create_index('pk')") > >     > + > >     > +    t.assert_error_msg_content_equals("Quorum collection > for a > >     synchronous transaction is timed out", > >     > +        function() g.master:eval("return > >     box.space.sync:insert{1}") end) > >     > + > >     > +    -- Wait until everything is replicated from the master to > >     the replica > >     > +    t.assert(wait_vclock(1)) > > > >     Please, use `t.helpers.retrying()` here. > >     It receives a timeout and a function to call. > >     Like `t.helpter.retrying({timeout=5}, wait_vclock)` > >     And wait_vclock should simply return true or false based on > >     whether the replica has reached master's vclock. > > > >     Also, please choose a bigger timeout. Like 5 or 10 seconds. > >     Otherwise the test will be flaky on slow testing machines in > our CI. > > > >     > + > >     > +    t.assert_equals(g.master:eval("return > >     box.space.sync:select()"), {}) > >     > +    t.assert_equals(g.replica:eval("return > >     box.space.sync:select()"), {}) > >     > +end > >     > -- > >     > 2.25.1 > >     > > > > >     -- > >     Serge Petrenko > > > > -- > Serge Petrenko > -- Serge Petrenko