Tarantool development patches archive
 help / color / mirror / Atom feed
* [PATCH v2] replication: enter orphan mode on every erroneous config change
@ 2019-08-23 15:33 Serge Petrenko
  2019-08-27 12:38 ` Vladimir Davydov
  0 siblings, 1 reply; 2+ messages in thread
From: Serge Petrenko @ 2019-08-23 15:33 UTC (permalink / raw)
  To: vdavydov.dev; +Cc: tarantool-patches, Serge Petrenko

We only entered orphan mode on bootstrap and local recovery, but threw an
error when replicaton config was changed on the fly.
For consistency, in this case we should also enter orphan mode when
an instance fails to connect to quorum remote instances.

Closes #4424

@TarantoolBot document
Title: document reaction on error in replication configuration change.

Now calling `box.cfg{replication={uri1, uri2, ...}}` will never throw an
error in case replication quorum cannot be reached. It will just switch
the instance to orphan state.
(Previously the instance switched to orphan mode in case of an error in
initial configuration, and an error was thrown if quorum couldn't be
reached on subsequent box.cfg call. Now instance always switches to
orphan if quorum cannot be reached)
---
https://github.com/tarantool/tarantool/issues/4424
https://github.com/tarantool/tarantool/tree/sp/gh-4424-repl-config-errors-v2

 src/box/box.cc                 |   5 +-
 src/box/replication.cc         |  29 ++++-----
 src/box/replication.h          |   8 +--
 test/replication/misc.result   | 104 ++++++++++++++++++++++++++++++++-
 test/replication/misc.test.lua |  41 ++++++++++++-
 5 files changed, 160 insertions(+), 27 deletions(-)

diff --git a/src/box/box.cc b/src/box/box.cc
index 66cd6d3a4..272d9addb 100644
--- a/src/box/box.cc
+++ b/src/box/box.cc
@@ -679,7 +679,10 @@ box_sync_replication(bool connect_quorum)
 			applier_delete(appliers[i]); /* doesn't affect diag */
 	});
 
-	replicaset_connect(appliers, count, connect_quorum);
+	replicaset_connect(appliers, count);
+
+	if (connect_quorum)
+		replicaset_check_quorum();
 
 	guard.is_active = false;
 }
diff --git a/src/box/replication.cc b/src/box/replication.cc
index 28f7acedc..0f6e0ff83 100644
--- a/src/box/replication.cc
+++ b/src/box/replication.cc
@@ -599,8 +599,7 @@ applier_on_connect_f(struct trigger *trigger, void *event)
 }
 
 void
-replicaset_connect(struct applier **appliers, int count,
-		   bool connect_quorum)
+replicaset_connect(struct applier **appliers, int count)
 {
 	if (count == 0) {
 		/* Cleanup the replica set. */
@@ -658,12 +657,6 @@ replicaset_connect(struct applier **appliers, int count,
 	if (state.connected < count) {
 		say_crit("failed to connect to %d out of %d replicas",
 			 count - state.connected, count);
-		/* Timeout or connection failure. */
-		if (connect_quorum && state.connected < quorum) {
-			diag_set(ClientError, ER_CFG, "replication",
-				 "failed to connect to one or more replicas");
-			goto error;
-		}
 	} else {
 		say_info("connected to %d replicas", state.connected);
 	}
@@ -685,16 +678,14 @@ replicaset_connect(struct applier **appliers, int count,
 	try {
 		replicaset_update(appliers, count);
 	} catch (Exception *e) {
-		goto error;
+		/* Destroy appliers */
+		for (int i = 0; i < count; i++) {
+			trigger_clear(&triggers[i].base);
+			applier_stop(appliers[i]);
+		}
+		diag_raise();
 	}
 	return;
-error:
-	/* Destroy appliers */
-	for (int i = 0; i < count; i++) {
-		trigger_clear(&triggers[i].base);
-		applier_stop(appliers[i]);
-	}
-	diag_raise();
 }
 
 bool
@@ -772,7 +763,6 @@ void
 replicaset_sync(void)
 {
 	int quorum = replicaset_quorum();
-
 	if (quorum == 0) {
 		/*
 		 * Quorum is 0 or replication is not configured.
@@ -816,8 +806,11 @@ replicaset_sync(void)
 void
 replicaset_check_quorum(void)
 {
-	if (replicaset.applier.synced >= replicaset_quorum())
+	if (replicaset.applier.synced >= replicaset_quorum()) {
 		box_set_orphan(false);
+	} else if (replicaset.applier.connected < replicaset_quorum()) {
+		box_set_orphan(true);
+	}
 }
 
 void
diff --git a/src/box/replication.h b/src/box/replication.h
index 19f283c7d..999c8acd2 100644
--- a/src/box/replication.h
+++ b/src/box/replication.h
@@ -379,8 +379,7 @@ replicaset_add(uint32_t replica_id, const struct tt_uuid *instance_uuid);
  *                       appliers have successfully connected.
  */
 void
-replicaset_connect(struct applier **appliers, int count,
-		   bool connect_quorum);
+replicaset_connect(struct applier **appliers, int count);
 
 /**
  * Check if the current instance fell too much behind its
@@ -406,8 +405,9 @@ void
 replicaset_sync(void);
 
 /**
- * Check if a replication quorum has been formed and
- * switch the server to the write mode if so.
+ * Check whether a replication quorum is formed.
+ * If it is, switch to write mode. Switch to readonly
+ * mode otherwise.
  */
 void
 replicaset_check_quorum(void);
diff --git a/test/replication/misc.result b/test/replication/misc.result
index 0a57edda5..ae72ce3e4 100644
--- a/test/replication/misc.result
+++ b/test/replication/misc.result
@@ -18,10 +18,19 @@ replication_connect_timeout = box.cfg.replication_connect_timeout
 box.cfg{replication_timeout=0.05, replication_connect_timeout=0.05, replication={}}
 ---
 ...
+box.cfg{replication_connect_quorum=2}
+---
+...
 box.cfg{replication = {'127.0.0.1:12345', box.cfg.listen}}
 ---
-- error: 'Incorrect value for option ''replication'': failed to connect to one or
-    more replicas'
+...
+box.info.status
+---
+- orphan
+...
+box.info.ro
+---
+- true
 ...
 -- gh-3606 - Tarantool crashes if box.cfg.replication is updated concurrently
 fiber = require('fiber')
@@ -47,8 +56,16 @@ c:get()
 ---
 - true
 ...
-box.cfg{replication_timeout = replication_timeout, replication_connect_timeout = replication_connect_timeout}
+box.cfg{replication = "", replication_timeout = replication_timeout, replication_connect_timeout = replication_connect_timeout}
+---
+...
+box.info.status
+---
+- running
+...
+box.info.ro
 ---
+- false
 ...
 -- gh-3111 - Allow to rebootstrap a replica from a read-only master
 replica_uuid = uuid.new()
@@ -729,3 +746,84 @@ test_run:cleanup_cluster()
 box.schema.user.revoke('guest', 'replication')
 ---
 ...
+--
+-- gh-4424 Always enter orphan mode on error in replication
+-- configuration change.
+--
+replication_connect_timeout = box.cfg.replication_connect_timeout
+---
+...
+replication_connect_quorum = box.cfg.replication_connect_quorum
+---
+...
+box.cfg{replication="12345", replication_connect_timeout=0.1, replication_connect_quorum=1}
+---
+...
+box.info.status
+---
+- orphan
+...
+box.info.ro
+---
+- true
+...
+-- reset replication => leave orphan mode
+box.cfg{replication=""}
+---
+...
+box.info.status
+---
+- running
+...
+box.info.ro
+---
+- false
+...
+-- no switch to orphan when quorum == 0
+box.cfg{replication="12345", replication_connect_quorum=0}
+---
+...
+box.info.status
+---
+- running
+...
+box.info.ro
+---
+- false
+...
+-- we could connect to one out of two replicas. Set orphan.
+box.cfg{replication_connect_quorum=2}
+---
+...
+box.cfg{replication={box.cfg.listen, "12345"}}
+---
+...
+box.info.status
+---
+- orphan
+...
+box.info.ro
+---
+- true
+...
+-- lower quorum => leave orphan mode
+box.cfg{replication_connect_quorum=1}
+---
+...
+box.info.status
+---
+- running
+...
+box.info.ro
+---
+- false
+...
+box.cfg{replication=""}
+---
+...
+box.cfg{replication_connect_timeout=replication_connect_timeout}
+---
+...
+box.cfg{replication_connect_quorum=replication_connect_quorum}
+---
+...
diff --git a/test/replication/misc.test.lua b/test/replication/misc.test.lua
index 99e995509..16e7e9e42 100644
--- a/test/replication/misc.test.lua
+++ b/test/replication/misc.test.lua
@@ -8,7 +8,10 @@ box.schema.user.grant('guest', 'replication')
 replication_timeout = box.cfg.replication_timeout
 replication_connect_timeout = box.cfg.replication_connect_timeout
 box.cfg{replication_timeout=0.05, replication_connect_timeout=0.05, replication={}}
+box.cfg{replication_connect_quorum=2}
 box.cfg{replication = {'127.0.0.1:12345', box.cfg.listen}}
+box.info.status
+box.info.ro
 
 -- gh-3606 - Tarantool crashes if box.cfg.replication is updated concurrently
 fiber = require('fiber')
@@ -19,7 +22,9 @@ f()
 c:get()
 c:get()
 
-box.cfg{replication_timeout = replication_timeout, replication_connect_timeout = replication_connect_timeout}
+box.cfg{replication = "", replication_timeout = replication_timeout, replication_connect_timeout = replication_connect_timeout}
+box.info.status
+box.info.ro
 
 -- gh-3111 - Allow to rebootstrap a replica from a read-only master
 replica_uuid = uuid.new()
@@ -293,3 +298,37 @@ test_run:cmd("cleanup server replica")
 test_run:cmd("delete server replica")
 test_run:cleanup_cluster()
 box.schema.user.revoke('guest', 'replication')
+
+--
+-- gh-4424 Always enter orphan mode on error in replication
+-- configuration change.
+--
+replication_connect_timeout = box.cfg.replication_connect_timeout
+replication_connect_quorum = box.cfg.replication_connect_quorum
+box.cfg{replication="12345", replication_connect_timeout=0.1, replication_connect_quorum=1}
+box.info.status
+box.info.ro
+-- reset replication => leave orphan mode
+box.cfg{replication=""}
+box.info.status
+box.info.ro
+-- no switch to orphan when quorum == 0
+box.cfg{replication="12345", replication_connect_quorum=0}
+box.info.status
+box.info.ro
+
+-- we could connect to one out of two replicas. Set orphan.
+box.cfg{replication_connect_quorum=2}
+box.cfg{replication={box.cfg.listen, "12345"}}
+box.info.status
+box.info.ro
+-- lower quorum => leave orphan mode
+box.cfg{replication_connect_quorum=1}
+box.info.status
+box.info.ro
+
+box.cfg{replication=""}
+
+
+box.cfg{replication_connect_timeout=replication_connect_timeout}
+box.cfg{replication_connect_quorum=replication_connect_quorum}
-- 
2.20.1 (Apple Git-117)

^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2019-08-27 12:38 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-08-23 15:33 [PATCH v2] replication: enter orphan mode on every erroneous config change Serge Petrenko
2019-08-27 12:38 ` Vladimir Davydov

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox