[Tarantool-patches] [PATCH v20 3/3] test: add gh-6036-qsync-order test

Cyrill Gorcunov gorcunov at gmail.com
Wed Oct 6 23:15:22 MSK 2021


To test that promotion requests are handled only when appropriate
write to WAL completes, because we update memory data before the
write finishes.

Note that without the patch this test fires assertion

> tarantool: src/box/txn_limbo.c:481: txn_limbo_read_rollback: Assertion `e->txn->signature >= 0' failed.

Part-of #6036

Signed-off-by: Cyrill Gorcunov <gorcunov at gmail.com>
---
 test/replication/election_replica.lua         |   3 +-
 test/replication/gh-6036-qsync-order.result   | 224 ++++++++++++++++++
 test/replication/gh-6036-qsync-order.test.lua | 103 ++++++++
 test/replication/suite.cfg                    |   1 +
 test/replication/suite.ini                    |   2 +-
 5 files changed, 331 insertions(+), 2 deletions(-)
 create mode 100644 test/replication/gh-6036-qsync-order.result
 create mode 100644 test/replication/gh-6036-qsync-order.test.lua

diff --git a/test/replication/election_replica.lua b/test/replication/election_replica.lua
index 3b4d9a123..1dbfa96dc 100644
--- a/test/replication/election_replica.lua
+++ b/test/replication/election_replica.lua
@@ -6,6 +6,7 @@ local SYNCHRO_QUORUM = arg[1] and tonumber(arg[1]) or 3
 local ELECTION_TIMEOUT = arg[2] and tonumber(arg[2]) or 0.1
 local ELECTION_MODE = arg[3] or 'candidate'
 local CONNECT_QUORUM = arg[4] and tonumber(arg[4]) or 3
+local SYNCHRO_TIMEOUT = arg[5] and tonumber(arg[5]) or 0.1
 
 local function instance_uri(instance_id)
     return SOCKET_DIR..'/election_replica'..instance_id..'.sock';
@@ -25,7 +26,7 @@ box.cfg({
     election_mode = ELECTION_MODE,
     election_timeout = ELECTION_TIMEOUT,
     replication_synchro_quorum = SYNCHRO_QUORUM,
-    replication_synchro_timeout = 0.1,
+    replication_synchro_timeout = SYNCHRO_TIMEOUT,
     -- To reveal more election logs.
     log_level = 6,
 })
diff --git a/test/replication/gh-6036-qsync-order.result b/test/replication/gh-6036-qsync-order.result
new file mode 100644
index 000000000..34a7e7803
--- /dev/null
+++ b/test/replication/gh-6036-qsync-order.result
@@ -0,0 +1,224 @@
+-- test-run result file version 2
+--
+-- gh-6036: verify that terms are locked when we're inside journal
+-- write routine, because parallel appliers may ignore the fact that
+-- the term is updated already but not yet written leading to data
+-- inconsistency.
+--
+test_run = require('test_run').new()
+ | ---
+ | ...
+
+test_run:cmd('create server master with script="replication/election_replica1.lua"')
+ | ---
+ | - true
+ | ...
+test_run:cmd('create server replica1 with script="replication/election_replica2.lua"')
+ | ---
+ | - true
+ | ...
+test_run:cmd('create server replica2 with script="replication/election_replica3.lua"')
+ | ---
+ | - true
+ | ...
+
+test_run:cmd("start server master with wait=False, args='1 nil manual 1 10000'")
+ | ---
+ | - true
+ | ...
+test_run:cmd("start server replica1 with wait=False, args='1 nil manual 1 10000'")
+ | ---
+ | - true
+ | ...
+test_run:cmd("start server replica2 with wait=False, args='1 nil manual 1 10000'")
+ | ---
+ | - true
+ | ...
+
+test_run:wait_fullmesh({"master", "replica1", "replica2"})
+ | ---
+ | ...
+
+--
+-- Create a synchro space on the master node and make
+-- sure the write processed just fine.
+test_run:switch("master")
+ | ---
+ | - true
+ | ...
+box.ctl.promote()
+ | ---
+ | ...
+s = box.schema.create_space('test', {is_sync = true})
+ | ---
+ | ...
+_ = s:create_index('pk')
+ | ---
+ | ...
+s:insert{1}
+ | ---
+ | - [1]
+ | ...
+
+test_run:switch("replica1")
+ | ---
+ | - true
+ | ...
+test_run:wait_lsn('replica1', 'master')
+ | ---
+ | ...
+
+test_run:switch("replica2")
+ | ---
+ | - true
+ | ...
+test_run:wait_lsn('replica2', 'master')
+ | ---
+ | ...
+
+--
+-- Drop connection between master and replica1.
+test_run:switch("master")
+ | ---
+ | - true
+ | ...
+box.cfg({                                   \
+    replication = {                         \
+        "unix/:./election_replica1.sock",   \
+        "unix/:./election_replica3.sock",   \
+    },                                      \
+})
+ | ---
+ | ...
+--
+-- Drop connection between replica1 and master.
+test_run:switch("replica1")
+ | ---
+ | - true
+ | ...
+test_run:wait_cond(function() return box.space.test:get{1} ~= nil end)
+ | ---
+ | - true
+ | ...
+box.cfg({                                   \
+    replication = {                         \
+        "unix/:./election_replica2.sock",   \
+        "unix/:./election_replica3.sock",   \
+    },                                      \
+})
+ | ---
+ | ...
+
+--
+-- Here we have the following scheme
+--
+--              replica2 (will be delayed)
+--              /     \
+--          master    replica1
+
+--
+-- Initiate disk delay in a bit tricky way: the next write will
+-- fall into forever sleep.
+test_run:switch("replica2")
+ | ---
+ | - true
+ | ...
+box.error.injection.set('ERRINJ_WAL_DELAY_COUNTDOWN', 1)
+ | ---
+ | - ok
+ | ...
+cnt_before = box.error.injection.get('ERRINJ_WAL_DELAY_COUNTDOWN')
+ | ---
+ | ...
+--
+-- Make replica1 been a leader and start writting data,
+-- the PROMOTE request get queued on replica2 and not
+-- yet processed, same time INSERT won't complete either
+-- waiting for PROMOTE completion first. Note that we
+-- enter replica2 as well just to be sure the PROMOTE
+-- reached it.
+test_run:switch("replica1")
+ | ---
+ | - true
+ | ...
+box.ctl.promote()
+ | ---
+ | ...
+test_run:switch("replica2")
+ | ---
+ | - true
+ | ...
+test_run:wait_cond(function() return box.error.injection.get('ERRINJ_WAL_DELAY_COUNTDOWN') < cnt_before end)
+ | ---
+ | - true
+ | ...
+test_run:switch("replica1")
+ | ---
+ | - true
+ | ...
+_ = require('fiber').create(function() box.space.test:insert{2} end)
+ | ---
+ | ...
+
+--
+-- The master node has no clue that there is a new leader
+-- and continue writing data with obsolete term. Since replica2
+-- is delayed now the INSERT won't proceed yet but get queued.
+test_run:switch("master")
+ | ---
+ | - true
+ | ...
+_ = require('fiber').create(function() box.space.test:insert{3} end)
+ | ---
+ | ...
+
+--
+-- Finally enable replica2 back. Make sure the data from new replica1
+-- leader get writing while old leader's data ignored.
+test_run:switch("replica2")
+ | ---
+ | - true
+ | ...
+box.error.injection.set('ERRINJ_WAL_DELAY', false)
+ | ---
+ | - ok
+ | ...
+test_run:wait_cond(function() return box.space.test:get{2} ~= nil end)
+ | ---
+ | - true
+ | ...
+box.space.test:select{}
+ | ---
+ | - - [1]
+ |   - [2]
+ | ...
+
+test_run:switch("default")
+ | ---
+ | - true
+ | ...
+test_run:cmd('stop server master')
+ | ---
+ | - true
+ | ...
+test_run:cmd('stop server replica1')
+ | ---
+ | - true
+ | ...
+test_run:cmd('stop server replica2')
+ | ---
+ | - true
+ | ...
+
+test_run:cmd('delete server master')
+ | ---
+ | - true
+ | ...
+test_run:cmd('delete server replica1')
+ | ---
+ | - true
+ | ...
+test_run:cmd('delete server replica2')
+ | ---
+ | - true
+ | ...
diff --git a/test/replication/gh-6036-qsync-order.test.lua b/test/replication/gh-6036-qsync-order.test.lua
new file mode 100644
index 000000000..47996998d
--- /dev/null
+++ b/test/replication/gh-6036-qsync-order.test.lua
@@ -0,0 +1,103 @@
+--
+-- gh-6036: verify that terms are locked when we're inside journal
+-- write routine, because parallel appliers may ignore the fact that
+-- the term is updated already but not yet written leading to data
+-- inconsistency.
+--
+test_run = require('test_run').new()
+
+test_run:cmd('create server master with script="replication/election_replica1.lua"')
+test_run:cmd('create server replica1 with script="replication/election_replica2.lua"')
+test_run:cmd('create server replica2 with script="replication/election_replica3.lua"')
+
+test_run:cmd("start server master with wait=False, args='1 nil manual 1 10000'")
+test_run:cmd("start server replica1 with wait=False, args='1 nil manual 1 10000'")
+test_run:cmd("start server replica2 with wait=False, args='1 nil manual 1 10000'")
+
+test_run:wait_fullmesh({"master", "replica1", "replica2"})
+
+--
+-- Create a synchro space on the master node and make
+-- sure the write processed just fine.
+test_run:switch("master")
+box.ctl.promote()
+s = box.schema.create_space('test', {is_sync = true})
+_ = s:create_index('pk')
+s:insert{1}
+
+test_run:switch("replica1")
+test_run:wait_lsn('replica1', 'master')
+
+test_run:switch("replica2")
+test_run:wait_lsn('replica2', 'master')
+
+--
+-- Drop connection between master and replica1.
+test_run:switch("master")
+box.cfg({                                   \
+    replication = {                         \
+        "unix/:./election_replica1.sock",   \
+        "unix/:./election_replica3.sock",   \
+    },                                      \
+})
+--
+-- Drop connection between replica1 and master.
+test_run:switch("replica1")
+test_run:wait_cond(function() return box.space.test:get{1} ~= nil end)
+box.cfg({                                   \
+    replication = {                         \
+        "unix/:./election_replica2.sock",   \
+        "unix/:./election_replica3.sock",   \
+    },                                      \
+})
+
+--
+-- Here we have the following scheme
+--
+--              replica2 (will be delayed)
+--              /     \
+--          master    replica1
+
+--
+-- Initiate disk delay in a bit tricky way: the next write will
+-- fall into forever sleep.
+test_run:switch("replica2")
+box.error.injection.set('ERRINJ_WAL_DELAY_COUNTDOWN', 1)
+cnt_before = box.error.injection.get('ERRINJ_WAL_DELAY_COUNTDOWN')
+--
+-- Make replica1 been a leader and start writting data,
+-- the PROMOTE request get queued on replica2 and not
+-- yet processed, same time INSERT won't complete either
+-- waiting for PROMOTE completion first. Note that we
+-- enter replica2 as well just to be sure the PROMOTE
+-- reached it.
+test_run:switch("replica1")
+box.ctl.promote()
+test_run:switch("replica2")
+test_run:wait_cond(function() return box.error.injection.get('ERRINJ_WAL_DELAY_COUNTDOWN') < cnt_before end)
+test_run:switch("replica1")
+_ = require('fiber').create(function() box.space.test:insert{2} end)
+
+--
+-- The master node has no clue that there is a new leader
+-- and continue writing data with obsolete term. Since replica2
+-- is delayed now the INSERT won't proceed yet but get queued.
+test_run:switch("master")
+_ = require('fiber').create(function() box.space.test:insert{3} end)
+
+--
+-- Finally enable replica2 back. Make sure the data from new replica1
+-- leader get writing while old leader's data ignored.
+test_run:switch("replica2")
+box.error.injection.set('ERRINJ_WAL_DELAY', false)
+test_run:wait_cond(function() return box.space.test:get{2} ~= nil end)
+box.space.test:select{}
+
+test_run:switch("default")
+test_run:cmd('stop server master')
+test_run:cmd('stop server replica1')
+test_run:cmd('stop server replica2')
+
+test_run:cmd('delete server master')
+test_run:cmd('delete server replica1')
+test_run:cmd('delete server replica2')
diff --git a/test/replication/suite.cfg b/test/replication/suite.cfg
index 3eee0803c..ed09b2087 100644
--- a/test/replication/suite.cfg
+++ b/test/replication/suite.cfg
@@ -59,6 +59,7 @@
     "gh-6094-rs-uuid-mismatch.test.lua": {},
     "gh-6127-election-join-new.test.lua": {},
     "gh-6035-applier-filter.test.lua": {},
+    "gh-6036-qsync-order.test.lua": {},
     "election-candidate-promote.test.lua": {},
     "*": {
         "memtx": {"engine": "memtx"},
diff --git a/test/replication/suite.ini b/test/replication/suite.ini
index 77eb95f49..080e4fbf4 100644
--- a/test/replication/suite.ini
+++ b/test/replication/suite.ini
@@ -3,7 +3,7 @@ core = tarantool
 script =  master.lua
 description = tarantool/box, replication
 disabled = consistent.test.lua
-release_disabled = catch.test.lua errinj.test.lua gc.test.lua gc_no_space.test.lua before_replace.test.lua qsync_advanced.test.lua qsync_errinj.test.lua quorum.test.lua recover_missing_xlog.test.lua sync.test.lua long_row_timeout.test.lua gh-4739-vclock-assert.test.lua gh-4730-applier-rollback.test.lua gh-5140-qsync-casc-rollback.test.lua gh-5144-qsync-dup-confirm.test.lua gh-5167-qsync-rollback-snap.test.lua gh-5430-qsync-promote-crash.test.lua gh-5430-cluster-mvcc.test.lua  gh-5506-election-on-off.test.lua gh-5536-wal-limit.test.lua hang_on_synchro_fail.test.lua anon_register_gap.test.lua gh-5213-qsync-applier-order.test.lua gh-5213-qsync-applier-order-3.test.lua gh-6027-applier-error-show.test.lua gh-6032-promote-wal-write.test.lua gh-6057-qsync-confirm-async-no-wal.test.lua gh-5447-downstream-lag.test.lua gh-4040-invalid-msgpack.test.lua
+release_disabled = catch.test.lua errinj.test.lua gc.test.lua gc_no_space.test.lua before_replace.test.lua qsync_advanced.test.lua qsync_errinj.test.lua quorum.test.lua recover_missing_xlog.test.lua sync.test.lua long_row_timeout.test.lua gh-4739-vclock-assert.test.lua gh-4730-applier-rollback.test.lua gh-5140-qsync-casc-rollback.test.lua gh-5144-qsync-dup-confirm.test.lua gh-5167-qsync-rollback-snap.test.lua gh-5430-qsync-promote-crash.test.lua gh-5430-cluster-mvcc.test.lua  gh-5506-election-on-off.test.lua gh-5536-wal-limit.test.lua hang_on_synchro_fail.test.lua anon_register_gap.test.lua gh-5213-qsync-applier-order.test.lua gh-5213-qsync-applier-order-3.test.lua gh-6027-applier-error-show.test.lua gh-6032-promote-wal-write.test.lua gh-6057-qsync-confirm-async-no-wal.test.lua gh-5447-downstream-lag.test.lua gh-4040-invalid-msgpack.test.lua gh-6036-qsync-order.test.lua
 config = suite.cfg
 lua_libs = lua/fast_replica.lua lua/rlimit.lua
 use_unix_sockets = True
-- 
2.31.1



More information about the Tarantool-patches mailing list