From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from [87.239.111.99] (localhost [127.0.0.1]) by dev.tarantool.org (Postfix) with ESMTP id BAF0B6E454; Wed, 2 Mar 2022 23:28:55 +0300 (MSK) DKIM-Filter: OpenDKIM Filter v2.11.0 dev.tarantool.org BAF0B6E454 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=tarantool.org; s=dev; t=1646252935; bh=yZukZEX2mNFYMvfQz04cUPTCyfJQBmmGBfzbY55BXuo=; h=To:Date:In-Reply-To:References:Subject:List-Id:List-Unsubscribe: List-Archive:List-Post:List-Help:List-Subscribe:From:Reply-To:Cc: From; b=UXOzKvitR4QOONTqIj3I+/qLeQHxfsbduDkW22Zn1kIBhkmIxdiCpQw9y6hvLR9SV 9BBjf7oui5LHLgGJe3XVliIzygNS/+MxaNLnq3H0mWW9rc52aHN/7gQRgGRnxkzIya FjIFtZa7XHpQMpk1Ispe8bbJ0a0tCcNZk9VTwmHs= Received: from mail-lj1-f171.google.com (mail-lj1-f171.google.com [209.85.208.171]) (using TLSv1.3 with cipher TLS_AES_128_GCM_SHA256 (128/128 bits) key-exchange X25519 server-signature RSA-PSS (2048 bits) server-digest SHA256) (No client certificate requested) by dev.tarantool.org (Postfix) with ESMTPS id 4E5996E454 for ; Wed, 2 Mar 2022 23:27:49 +0300 (MSK) DKIM-Filter: OpenDKIM Filter v2.11.0 dev.tarantool.org 4E5996E454 Received: by mail-lj1-f171.google.com with SMTP id u11so3939214lju.4 for ; Wed, 02 Mar 2022 12:27:49 -0800 (PST) X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; h=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to :references:mime-version:content-transfer-encoding; bh=4oMD5RKpb6+MQHjAF6gJ0UR0336YFsZKZ/1ss3o/zbs=; b=x8vb4RT3sjGTBT1raTl9H12DHNahhRG+xt2aPZqYKW4YX0CPgZntWkXcwnh9bFJAWk Oze+aQHXMwGU69V2oZS1sMUJIrUuYbJa7sRzorKbgRWzqxCbEHzTG287sNCxZPiWe1tc sqjULNz+ZIF8S0jVHggc0qJAROnPNl5PkISknIgNVa6HJwO9gFeLtXsd0ZKFheRx4CLo Jg+KHz8jvFHOSFtsng6vJEZX00Bs+WWMSoNK9fDEs64mSjzpOwJnFq8oPkqYkBFCNhD/ xf28erAXvVwyfva3lBM7CrJG7yneYqUfXrhZv1wRRG9u6RjD2YC1pieq/lXKQrSmvrTj vWvQ== X-Gm-Message-State: AOAM530EMyTYoYEhg/3/NhH3ybDH7beGRLmOp4/ZwF8fbFNZWBIVSqY0 eSmHzDgu5JenLX5+vPUJtChtTCXWfL5P9Q== X-Google-Smtp-Source: ABdhPJwSeFT1CyuEWwCGPXroikS9zuVp7YINdc8bGhAXAUx5hC4JYFkLE4t5ZVfY+gINdiFf1J3hCg== X-Received: by 2002:a2e:99d6:0:b0:23a:925:6aa0 with SMTP id l22-20020a2e99d6000000b0023a09256aa0mr21986318ljj.110.1646252868379; Wed, 02 Mar 2022 12:27:48 -0800 (PST) Received: from grain.localdomain ([5.18.251.97]) by smtp.gmail.com with ESMTPSA id z21-20020a0565120c1500b00443b9d49960sm15007lfu.59.2022.03.02.12.27.47 (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256); Wed, 02 Mar 2022 12:27:47 -0800 (PST) Received: by grain.localdomain (Postfix, from userid 1000) id 67B4D5A0023; Wed, 2 Mar 2022 23:27:12 +0300 (MSK) To: tml Date: Wed, 2 Mar 2022 23:27:11 +0300 Message-Id: <20220302202711.1003906-4-gorcunov@gmail.com> X-Mailer: git-send-email 2.35.1 In-Reply-To: <20220302202711.1003906-1-gorcunov@gmail.com> References: <20220302202711.1003906-1-gorcunov@gmail.com> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit Subject: [Tarantool-patches] [PATCH v31 3/3] test: add gh-6036-qsync-order test X-BeenThere: tarantool-patches@dev.tarantool.org X-Mailman-Version: 2.1.34 Precedence: list List-Id: Tarantool development patches List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , From: Cyrill Gorcunov via Tarantool-patches Reply-To: Cyrill Gorcunov Cc: Vladislav Shpilevoy Errors-To: tarantool-patches-bounces@dev.tarantool.org Sender: "Tarantool-patches" To test that promotion requests are handled only when appropriate write to WAL completes, because we update memory data before the write finishes. Part-of #6036 Signed-off-by: Cyrill Gorcunov --- .../gh_6036_qsync_order_test.lua | 198 ++++++++++++++++++ test/replication-luatest/suite.ini | 1 + 2 files changed, 199 insertions(+) create mode 100644 test/replication-luatest/gh_6036_qsync_order_test.lua diff --git a/test/replication-luatest/gh_6036_qsync_order_test.lua b/test/replication-luatest/gh_6036_qsync_order_test.lua new file mode 100644 index 000000000..c23c7a3a1 --- /dev/null +++ b/test/replication-luatest/gh_6036_qsync_order_test.lua @@ -0,0 +1,198 @@ +local t = require('luatest') +local cluster = require('test.luatest_helpers.cluster') +local server = require('test.luatest_helpers.server') +local fiber = require('fiber') + +local g = t.group('gh-6036') + +g.before_all(function(cg) + cg.cluster = cluster:new({}) + + cg.box_cfg = { + replication = { + server.build_instance_uri('r1'), + server.build_instance_uri('r2'), + }, + replication_timeout = 0.1, + replication_connect_quorum = 1, + election_mode = 'manual', + election_timeout = 0.1, + replication_synchro_quorum = 1, + replication_synchro_timeout = 0.1, + log_level = 6, + } + + cg.r1 = cg.cluster:build_server({ alias = 'r1', box_cfg = cg.box_cfg }) + cg.r2 = cg.cluster:build_server({ alias = 'r2', box_cfg = cg.box_cfg }) + + cg.cluster:add_server(cg.r1) + cg.cluster:add_server(cg.r2) + cg.cluster:start() +end) + +g.after_all(function(cg) + cg.cluster:drop() + cg.cluster.servers = nil +end) + +local function update_replication(...) + return (box.cfg{ replication = { ... } }) +end + +-- +-- The test requires 3rd replica to graft in. +g.before_test("test_qsync_order", function(cg) + cg.box_cfg.replication[3] = server.build_instance_uri("r3") + cg.r3 = cg.cluster:build_server({ alias = 'r3', box_cfg = cg.box_cfg }) + cg.cluster:add_server(cg.r3) + cg.r3:start() + cg.r1:exec(update_replication, cg.box_cfg.replication) + cg.r2:exec(update_replication, cg.box_cfg.replication) +end) + +g.test_qsync_order = function(cg) + cg.cluster:wait_fullmesh() + + -- + -- Create a synchro space on the r1 node and make + -- sure the write processed just fine. + cg.r1:exec(function() + box.ctl.promote() + box.ctl.wait_rw() + local s = box.schema.create_space('test', {is_sync = true}) + s:create_index('pk') + s:insert{1} + end) + + local vclock = cg.r1:get_vclock() + vclock[0] = nil + cg.r2:wait_vclock(vclock) + cg.r3:wait_vclock(vclock) + + t.assert_equals(cg.r1:eval("return box.space.test:select()"), {{1}}) + t.assert_equals(cg.r2:eval("return box.space.test:select()"), {{1}}) + t.assert_equals(cg.r3:eval("return box.space.test:select()"), {{1}}) + + -- + -- Drop connection between r1 and r2. + cg.r1:exec(update_replication, { + server.build_instance_uri("r1"), + server.build_instance_uri("r3"), + }) + + -- + -- Drop connection between r2 and r1. + cg.r2:exec(update_replication, { + server.build_instance_uri("r2"), + server.build_instance_uri("r3"), + }) + + -- + -- Here we have the following scheme + -- + -- r3 (WAL delay) + -- / \ + -- r1 r2 + -- + + -- + -- Initiate disk delay in a bit tricky way: the next write will + -- fall into forever sleep. + cg.r3:exec(function() + box.error.injection.set('ERRINJ_WAL_DELAY', true) + end) + + -- + -- Make r2 been a leader and start writting data, the PROMOTE + -- request get queued on r3 and not yet processed, same time + -- the INSERT won't complete either waiting for the PROMOTE + -- completion first. Note that we enter r3 as well just to be + -- sure the PROMOTE has reached it via queue state test. + cg.r2:exec(function() + box.ctl.promote() + box.ctl.wait_rw() + end) + t.helpers.retrying({}, function() + assert(cg.r3:exec(function() + return box.info.synchro.queue.busy == true + end)) + end) + cg.r2:exec(function() + box.space.test:insert{2} + end) + + -- + -- The r1 node has no clue that there is a new leader and continue + -- writing data with obsolete term. Since r3 is delayed now + -- the INSERT won't proceed yet but get queued. + cg.r1:exec(function() + box.space.test:insert{3} + end) + + -- + -- Finally enable r3 back. Make sure the data from new r2 leader get + -- writing while old leader's data ignored. + cg.r3:exec(function() + box.error.injection.set('ERRINJ_WAL_DELAY', false) + end) + t.helpers.retrying({}, function() + assert(cg.r3:exec(function() + return box.space.test:get{2} ~= nil + end)) + end) + + t.assert_equals(cg.r3:eval("return box.space.test:select()"), {{1},{2}}) +end + +-- +-- Drop the r3 replica, since it is no longer needed for this test. +g.after_test("test_qsync_order", function(cg) + cg.box_cfg.replication[3] = nil + cg.r1:exec(update_replication, cg.box_cfg.replication) + cg.r2:exec(update_replication, cg.box_cfg.replication) + cg.r3:stop() + cg.r3:cleanup() + cg.r3 = nil +end) + +g.test_promote_order = function(cg) + -- + -- Make sure that while we're processing PROMOTE no other records + -- get sneaked in via applier code from other replicas. For this + -- sake initiate voting and stop inside wal thread just before + -- PROMOTE get written. Another replica sends us new record and + -- it should be dropped. + cg.r1:exec(function() + box.ctl.promote() + box.ctl.wait_rw() + end) + local vclock = cg.r1:get_vclock() + vclock[0] = nil + cg.r2:wait_vclock(vclock) + + -- + -- Drop connection between r1 and the rest of the cluster. + -- Otherwise r1 might become Raft follower before attempting + -- insert{4}. + cg.r1:exec(function() box.cfg{replication=""} end) + cg.r2:exec(function() + box.error.injection.set('ERRINJ_WAL_DELAY_COUNTDOWN', 2) + require('fiber').create(function() box.ctl.promote() end) + end) + t.helpers.retrying({}, function() + t.assert(cg.r2:exec(function() + return box.info.synchro.queue.busy + end)) + end) + t.assert(cg.r1:exec(function() return box.info.ro == false end)) + cg.r1:exec(function() + box.space.test:insert{4} + end) + cg.r2:exec(function() + assert(box.info.synchro.queue.busy == true) + box.error.injection.set('ERRINJ_WAL_DELAY', false) + box.ctl.wait_rw() + end) + + t.assert_equals(cg.r2:eval("return box.space.test:select()"), {{1},{2}}) +end diff --git a/test/replication-luatest/suite.ini b/test/replication-luatest/suite.ini index 374f1b87a..07ec93a52 100644 --- a/test/replication-luatest/suite.ini +++ b/test/replication-luatest/suite.ini @@ -2,3 +2,4 @@ core = luatest description = replication luatests is_parallel = True +release_disabled = gh_6036_qsync_order_test.lua -- 2.35.1