Tarantool development patches archive
 help / color / mirror / Atom feed
* [PATCH] replication: move cluster id match check to replica
@ 2019-01-31 13:25 Serge Petrenko
  2019-01-31 13:30 ` Serge Petrenko
  2019-02-06 10:48 ` Vladimir Davydov
  0 siblings, 2 replies; 5+ messages in thread
From: Serge Petrenko @ 2019-01-31 13:25 UTC (permalink / raw)
  To: vdavydov.dev; +Cc: tarantool-patches, georgy, Serge Petrenko

On replica subscribe master checks that replica's cluster id matches
master's one, and disallows replication in case of mismatch.
This behaviour blocks impplementation of anonymous replicas, which
shouldn't pollute _cluster space and could accumulate changes from
multiple clusters at once.
So let's move the check to replica to let it decide which action to take
in case of mismatch.

Needed for #3186
Closes #3704
---
Issue: https://github.com/tarantool/tarantool/issues/3704
Branch: https://github.com/tarantool/tarantool/tree/sp/gh-3704-cluster-id-check-on-replica

 src/box/applier.cc             | 24 +++++++++++-
 src/box/box.cc                 | 22 +++++------
 src/box/xrow.c                 | 26 +++++++++++++
 src/box/xrow.h                 | 51 ++++++++++++++++++++++++
 test/replication/misc.result   | 71 ++++++++++++++++++++++++++++++++++
 test/replication/misc.test.lua | 26 +++++++++++++
 6 files changed, 206 insertions(+), 14 deletions(-)

diff --git a/src/box/applier.cc b/src/box/applier.cc
index 21d2e6bcb..704ed6c5b 100644
--- a/src/box/applier.cc
+++ b/src/box/applier.cc
@@ -391,6 +391,7 @@ applier_subscribe(struct applier *applier)
 	struct ibuf *ibuf = &applier->ibuf;
 	struct xrow_header row;
 	struct vclock remote_vclock_at_subscribe;
+	struct tt_uuid cluster_id = uuid_nil;
 
 	xrow_encode_subscribe_xc(&row, &REPLICASET_UUID, &INSTANCE_UUID,
 				 &replicaset.vclock);
@@ -408,9 +409,30 @@ applier_subscribe(struct applier *applier)
 		/*
 		 * In case of successful subscribe, the server
 		 * responds with its current vclock.
+		 *
+		 * Tarantool 2.1.1 also sends its cluster id to
+		 * the replica, and replica has to check whether
+		 * its and master's cluster ids match.
 		 */
 		vclock_create(&remote_vclock_at_subscribe);
-		xrow_decode_vclock_xc(&row, &remote_vclock_at_subscribe);
+		if (applier->version_id >= version_id(2, 1, 1)) {
+			xrow_decode_subscribe_response_xc(&row,
+							  &cluster_id,
+							  &remote_vclock_at_subscribe);
+			/*
+			 * If master didn't send us its cluster id
+			 * assume that it has done all the checks.
+			 * In this case cluster_id will remain zero.
+			 */
+			if (!tt_uuid_is_equal(&cluster_id, &uuid_nil) &&
+			    !tt_uuid_is_equal(&cluster_id, &REPLICASET_UUID)) {
+				tnt_raise(ClientError, ER_REPLICASET_UUID_MISMATCH,
+					  tt_uuid_str(&cluster_id),
+					  tt_uuid_str(&REPLICASET_UUID));
+			}
+		} else {
+			xrow_decode_vclock_xc(&row, &remote_vclock_at_subscribe);
+		}
 	}
 	/*
 	 * Tarantool < 1.6.7:
diff --git a/src/box/box.cc b/src/box/box.cc
index 8892d0f0e..8a8c08167 100644
--- a/src/box/box.cc
+++ b/src/box/box.cc
@@ -1584,18 +1584,6 @@ box_process_subscribe(struct ev_io *io, struct xrow_header *header)
 	/* Check permissions */
 	access_check_universe_xc(PRIV_R);
 
-	/**
-	 * Check that the given UUID matches the UUID of the
-	 * replica set this replica belongs to. Used to handshake
-	 * replica connect, and refuse a connection from a replica
-	 * which belongs to a different replica set.
-	 */
-	if (!tt_uuid_is_equal(&replicaset_uuid, &REPLICASET_UUID)) {
-		tnt_raise(ClientError, ER_REPLICASET_UUID_MISMATCH,
-			  tt_uuid_str(&REPLICASET_UUID),
-			  tt_uuid_str(&replicaset_uuid));
-	}
-
 	/* Check replica uuid */
 	struct replica *replica = replica_by_uuid(&replica_uuid);
 	if (replica == NULL || replica->id == REPLICA_ID_NIL) {
@@ -1620,9 +1608,17 @@ box_process_subscribe(struct ev_io *io, struct xrow_header *header)
 	 * Send a response to SUBSCRIBE request, tell
 	 * the replica how many rows we have in stock for it,
 	 * and identify ourselves with our own replica id.
+	 *
+	 * Since Tarantool version 2.1.1 master doesn't check
+	 * that replica and master have the same cluster id.
+	 * It sends its cluster id to replica instead, and replica
+	 * checks that its cluster id matches master's one.
+	 * Older versions will just ignore the additional field.
 	 */
 	struct xrow_header row;
-	xrow_encode_vclock_xc(&row, &replicaset.vclock);
+	xrow_encode_subscribe_response_xc(&row,
+					  &REPLICASET_UUID,
+					  &replicaset.vclock);
 	/*
 	 * Identify the message with the replica id of this
 	 * instance, this is the only way for a replica to find
diff --git a/src/box/xrow.c b/src/box/xrow.c
index c4e3073be..84b3473b1 100644
--- a/src/box/xrow.c
+++ b/src/box/xrow.c
@@ -1170,6 +1170,32 @@ xrow_encode_vclock(struct xrow_header *row, const struct vclock *vclock)
 	return 0;
 }
 
+int
+xrow_encode_subscribe_response(struct xrow_header *row,
+			       const struct tt_uuid *replicaset_uuid,
+			       const struct vclock *vclock)
+{
+	memset(row, 0, sizeof(*row));
+	size_t size = XROW_BODY_LEN_MAX + UUID_STR_LEN + mp_sizeof_vclock(vclock);
+	char *buf = (char *) region_alloc(&fiber()->gc, size);
+	if (buf == NULL) {
+		diag_set(OutOfMemory, size, "region_alloc", "buf");
+		return -1;
+	}
+	char *data = buf;
+	data = mp_encode_map(data, 2);
+	data = mp_encode_uint(data, IPROTO_VCLOCK);
+	data = mp_encode_vclock(data, vclock);
+	data = mp_encode_uint(data, IPROTO_CLUSTER_UUID);
+	data = xrow_encode_uuid(data, replicaset_uuid);
+	assert(data <= buf + size);
+	row->body[0].iov_base = buf;
+	row->body[0].iov_len = (data - buf);
+	row->bodycnt = 1;
+	row->type = IPROTO_OK;
+	return 0;
+}
+
 void
 xrow_encode_timestamp(struct xrow_header *row, uint32_t replica_id, double tm)
 {
diff --git a/src/box/xrow.h b/src/box/xrow.h
index 2654e35e6..719add4f0 100644
--- a/src/box/xrow.h
+++ b/src/box/xrow.h
@@ -348,6 +348,37 @@ xrow_decode_vclock(struct xrow_header *row, struct vclock *vclock)
 	return xrow_decode_subscribe(row, NULL, NULL, vclock, NULL);
 }
 
+/**
+ * Encode a response to subscribe request.
+ * @param row[out] Row to encode into.
+ * @param replicaset_uuid.
+ * @param vclock.
+ *
+ * @retval 0 Success.
+ * @retval -1 Memory error.
+ */
+int
+xrow_encode_subscribe_response(struct xrow_header *row,
+			      const struct tt_uuid *replicaset_uuid,
+			      const struct vclock *vclock);
+
+/**
+ * Decode a response to subscribe request.
+ * @param row Row to decode.
+ * @param[out] replicaset_uuid.
+ * @param[out] vclock.
+ *
+ * @retval 0 Success.
+ * @retval -1 Memory or format error.
+ */
+static inline int
+xrow_decode_subscribe_response(struct xrow_header *row,
+			       struct tt_uuid *replicaset_uuid,
+			       struct vclock *vclock)
+{
+	return xrow_decode_subscribe(row, replicaset_uuid, NULL, vclock, NULL);
+}
+
 /**
  * Encode a heartbeat message.
  * @param row[out] Row to encode into.
@@ -759,6 +790,26 @@ xrow_decode_vclock_xc(struct xrow_header *row, struct vclock *vclock)
 		diag_raise();
 }
 
+/** @copydoc xrow_encode_subscribe_response. */
+static inline void
+xrow_encode_subscribe_response_xc(struct xrow_header *row,
+				  const struct tt_uuid *replicaset_uuid,
+				  const struct vclock *vclock)
+{
+	if (xrow_encode_subscribe_response(row, replicaset_uuid, vclock) != 0)
+		diag_raise();
+}
+
+/** @copydoc xrow_decode_subscribe_response. */
+static inline void
+xrow_decode_subscribe_response_xc(struct xrow_header *row,
+				  struct tt_uuid *replicaset_uuid,
+				  struct vclock *vclock)
+{
+	if (xrow_decode_subscribe_response(row, replicaset_uuid, vclock) != 0)
+		diag_raise();
+}
+
 /** @copydoc iproto_reply_ok. */
 static inline void
 iproto_reply_ok_xc(struct obuf *out, uint64_t sync, uint32_t schema_version)
diff --git a/test/replication/misc.result b/test/replication/misc.result
index c32681a7a..1f7e78a2f 100644
--- a/test/replication/misc.result
+++ b/test/replication/misc.result
@@ -604,3 +604,74 @@ test_run:cmd("delete server replica")
 test_run:cleanup_cluster()
 ---
 ...
+--
+-- gh-3704 move cluster id check to replica
+--
+test_run:cmd("create server replica with rpl_master=default, script='replication/replica.lua'")
+---
+- true
+...
+box.schema.user.grant("guest", "replication")
+---
+...
+test_run:cmd("start server replica")
+---
+- true
+...
+test_run:grep_log("replica", "REPLICASET_UUID_MISMATCH")
+---
+- null
+...
+box.info.replication[2].downstream ~= nil
+---
+- true
+...
+-- make master generate another cluster uuid and check that
+-- replica doesn't connect
+test_run:cmd("stop server replica")
+---
+- true
+...
+test_run:cmd("restart server default with cleanup=1")
+box.schema.user.grant("guest", "replication")
+---
+...
+test_run:cmd("start server replica")
+---
+- true
+...
+-- master believes replica is in cluster, but their UUIDS differ.
+replica_uuid = test_run:eval("replica", "box.info.uuid")[1]
+---
+...
+_ = box.space._cluster:insert{2, replica_uuid}
+---
+...
+test_run:cmd("restart server replica")
+---
+- true
+...
+test_run:grep_log("replica", "REPLICASET_UUID_MISMATCH")
+---
+- REPLICASET_UUID_MISMATCH
+...
+box.info.replication[2].downstream ~= nil
+---
+- false
+...
+test_run:cmd("restart server default with cleanup=1")
+---
+- true
+...
+test_run:cmd("stop server replica")
+---
+- true
+...
+test_run:cmd("cleanup server replica")
+---
+- true
+...
+test_run:cmd("delete server replica")
+---
+- true
+...
diff --git a/test/replication/misc.test.lua b/test/replication/misc.test.lua
index 6a8af05c3..967c4e912 100644
--- a/test/replication/misc.test.lua
+++ b/test/replication/misc.test.lua
@@ -243,3 +243,29 @@ test_run:cmd("stop server replica")
 test_run:cmd("cleanup server replica")
 test_run:cmd("delete server replica")
 test_run:cleanup_cluster()
+
+--
+-- gh-3704 move cluster id check to replica
+--
+test_run:cmd("create server replica with rpl_master=default, script='replication/replica.lua'")
+box.schema.user.grant("guest", "replication")
+test_run:cmd("start server replica")
+test_run:grep_log("replica", "REPLICASET_UUID_MISMATCH")
+box.info.replication[2].downstream ~= nil
+-- make master generate another cluster uuid and check that
+-- replica doesn't connect
+test_run:cmd("stop server replica")
+test_run:cmd("restart server default with cleanup=1")
+box.schema.user.grant("guest", "replication")
+test_run:cmd("start server replica")
+-- master believes replica is in cluster, but their UUIDS differ.
+replica_uuid = test_run:eval("replica", "box.info.uuid")[1]
+_ = box.space._cluster:insert{2, replica_uuid}
+test_run:cmd("restart server replica")
+test_run:grep_log("replica", "REPLICASET_UUID_MISMATCH")
+box.info.replication[2].downstream ~= nil
+
+test_run:cmd("restart server default with cleanup=1")
+test_run:cmd("stop server replica")
+test_run:cmd("cleanup server replica")
+test_run:cmd("delete server replica")
-- 
2.17.2 (Apple Git-113)

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH] replication: move cluster id match check to replica
  2019-01-31 13:25 [PATCH] replication: move cluster id match check to replica Serge Petrenko
@ 2019-01-31 13:30 ` Serge Petrenko
  2019-02-06 10:48 ` Vladimir Davydov
  1 sibling, 0 replies; 5+ messages in thread
From: Serge Petrenko @ 2019-01-31 13:30 UTC (permalink / raw)
  To: Vladimir Davydov; +Cc: tarantool-patches, Georgy Kirichenko

Sorry, the mailing list didn’t forward it for some reason

> 31 янв. 2019 г., в 16:25, Serge Petrenko <sergepetrenko@tarantool.org> написал(а):
> 
> On replica subscribe master checks that replica's cluster id matches
> master's one, and disallows replication in case of mismatch.
> This behaviour blocks impplementation of anonymous replicas, which
> shouldn't pollute _cluster space and could accumulate changes from
> multiple clusters at once.
> So let's move the check to replica to let it decide which action to take
> in case of mismatch.
> 
> Needed for #3186
> Closes #3704
> ---
> Issue: https://github.com/tarantool/tarantool/issues/3704
> Branch: https://github.com/tarantool/tarantool/tree/sp/gh-3704-cluster-id-check-on-replica
> 
> src/box/applier.cc             | 24 +++++++++++-
> src/box/box.cc                 | 22 +++++------
> src/box/xrow.c                 | 26 +++++++++++++
> src/box/xrow.h                 | 51 ++++++++++++++++++++++++
> test/replication/misc.result   | 71 ++++++++++++++++++++++++++++++++++
> test/replication/misc.test.lua | 26 +++++++++++++
> 6 files changed, 206 insertions(+), 14 deletions(-)
> 
> diff --git a/src/box/applier.cc b/src/box/applier.cc
> index 21d2e6bcb..704ed6c5b 100644
> --- a/src/box/applier.cc
> +++ b/src/box/applier.cc
> @@ -391,6 +391,7 @@ applier_subscribe(struct applier *applier)
> 	struct ibuf *ibuf = &applier->ibuf;
> 	struct xrow_header row;
> 	struct vclock remote_vclock_at_subscribe;
> +	struct tt_uuid cluster_id = uuid_nil;
> 
> 	xrow_encode_subscribe_xc(&row, &REPLICASET_UUID, &INSTANCE_UUID,
> 				 &replicaset.vclock);
> @@ -408,9 +409,30 @@ applier_subscribe(struct applier *applier)
> 		/*
> 		 * In case of successful subscribe, the server
> 		 * responds with its current vclock.
> +		 *
> +		 * Tarantool 2.1.1 also sends its cluster id to
> +		 * the replica, and replica has to check whether
> +		 * its and master's cluster ids match.
> 		 */
> 		vclock_create(&remote_vclock_at_subscribe);
> -		xrow_decode_vclock_xc(&row, &remote_vclock_at_subscribe);
> +		if (applier->version_id >= version_id(2, 1, 1)) {
> +			xrow_decode_subscribe_response_xc(&row,
> +							  &cluster_id,
> +							  &remote_vclock_at_subscribe);
> +			/*
> +			 * If master didn't send us its cluster id
> +			 * assume that it has done all the checks.
> +			 * In this case cluster_id will remain zero.
> +			 */
> +			if (!tt_uuid_is_equal(&cluster_id, &uuid_nil) &&
> +			    !tt_uuid_is_equal(&cluster_id, &REPLICASET_UUID)) {
> +				tnt_raise(ClientError, ER_REPLICASET_UUID_MISMATCH,
> +					  tt_uuid_str(&cluster_id),
> +					  tt_uuid_str(&REPLICASET_UUID));
> +			}
> +		} else {
> +			xrow_decode_vclock_xc(&row, &remote_vclock_at_subscribe);
> +		}
> 	}
> 	/*
> 	 * Tarantool < 1.6.7:
> diff --git a/src/box/box.cc b/src/box/box.cc
> index 8892d0f0e..8a8c08167 100644
> --- a/src/box/box.cc
> +++ b/src/box/box.cc
> @@ -1584,18 +1584,6 @@ box_process_subscribe(struct ev_io *io, struct xrow_header *header)
> 	/* Check permissions */
> 	access_check_universe_xc(PRIV_R);
> 
> -	/**
> -	 * Check that the given UUID matches the UUID of the
> -	 * replica set this replica belongs to. Used to handshake
> -	 * replica connect, and refuse a connection from a replica
> -	 * which belongs to a different replica set.
> -	 */
> -	if (!tt_uuid_is_equal(&replicaset_uuid, &REPLICASET_UUID)) {
> -		tnt_raise(ClientError, ER_REPLICASET_UUID_MISMATCH,
> -			  tt_uuid_str(&REPLICASET_UUID),
> -			  tt_uuid_str(&replicaset_uuid));
> -	}
> -
> 	/* Check replica uuid */
> 	struct replica *replica = replica_by_uuid(&replica_uuid);
> 	if (replica == NULL || replica->id == REPLICA_ID_NIL) {
> @@ -1620,9 +1608,17 @@ box_process_subscribe(struct ev_io *io, struct xrow_header *header)
> 	 * Send a response to SUBSCRIBE request, tell
> 	 * the replica how many rows we have in stock for it,
> 	 * and identify ourselves with our own replica id.
> +	 *
> +	 * Since Tarantool version 2.1.1 master doesn't check
> +	 * that replica and master have the same cluster id.
> +	 * It sends its cluster id to replica instead, and replica
> +	 * checks that its cluster id matches master's one.
> +	 * Older versions will just ignore the additional field.
> 	 */
> 	struct xrow_header row;
> -	xrow_encode_vclock_xc(&row, &replicaset.vclock);
> +	xrow_encode_subscribe_response_xc(&row,
> +					  &REPLICASET_UUID,
> +					  &replicaset.vclock);
> 	/*
> 	 * Identify the message with the replica id of this
> 	 * instance, this is the only way for a replica to find
> diff --git a/src/box/xrow.c b/src/box/xrow.c
> index c4e3073be..84b3473b1 100644
> --- a/src/box/xrow.c
> +++ b/src/box/xrow.c
> @@ -1170,6 +1170,32 @@ xrow_encode_vclock(struct xrow_header *row, const struct vclock *vclock)
> 	return 0;
> }
> 
> +int
> +xrow_encode_subscribe_response(struct xrow_header *row,
> +			       const struct tt_uuid *replicaset_uuid,
> +			       const struct vclock *vclock)
> +{
> +	memset(row, 0, sizeof(*row));
> +	size_t size = XROW_BODY_LEN_MAX + UUID_STR_LEN + mp_sizeof_vclock(vclock);
> +	char *buf = (char *) region_alloc(&fiber()->gc, size);
> +	if (buf == NULL) {
> +		diag_set(OutOfMemory, size, "region_alloc", "buf");
> +		return -1;
> +	}
> +	char *data = buf;
> +	data = mp_encode_map(data, 2);
> +	data = mp_encode_uint(data, IPROTO_VCLOCK);
> +	data = mp_encode_vclock(data, vclock);
> +	data = mp_encode_uint(data, IPROTO_CLUSTER_UUID);
> +	data = xrow_encode_uuid(data, replicaset_uuid);
> +	assert(data <= buf + size);
> +	row->body[0].iov_base = buf;
> +	row->body[0].iov_len = (data - buf);
> +	row->bodycnt = 1;
> +	row->type = IPROTO_OK;
> +	return 0;
> +}
> +
> void
> xrow_encode_timestamp(struct xrow_header *row, uint32_t replica_id, double tm)
> {
> diff --git a/src/box/xrow.h b/src/box/xrow.h
> index 2654e35e6..719add4f0 100644
> --- a/src/box/xrow.h
> +++ b/src/box/xrow.h
> @@ -348,6 +348,37 @@ xrow_decode_vclock(struct xrow_header *row, struct vclock *vclock)
> 	return xrow_decode_subscribe(row, NULL, NULL, vclock, NULL);
> }
> 
> +/**
> + * Encode a response to subscribe request.
> + * @param row[out] Row to encode into.
> + * @param replicaset_uuid.
> + * @param vclock.
> + *
> + * @retval 0 Success.
> + * @retval -1 Memory error.
> + */
> +int
> +xrow_encode_subscribe_response(struct xrow_header *row,
> +			      const struct tt_uuid *replicaset_uuid,
> +			      const struct vclock *vclock);
> +
> +/**
> + * Decode a response to subscribe request.
> + * @param row Row to decode.
> + * @param[out] replicaset_uuid.
> + * @param[out] vclock.
> + *
> + * @retval 0 Success.
> + * @retval -1 Memory or format error.
> + */
> +static inline int
> +xrow_decode_subscribe_response(struct xrow_header *row,
> +			       struct tt_uuid *replicaset_uuid,
> +			       struct vclock *vclock)
> +{
> +	return xrow_decode_subscribe(row, replicaset_uuid, NULL, vclock, NULL);
> +}
> +
> /**
>  * Encode a heartbeat message.
>  * @param row[out] Row to encode into.
> @@ -759,6 +790,26 @@ xrow_decode_vclock_xc(struct xrow_header *row, struct vclock *vclock)
> 		diag_raise();
> }
> 
> +/** @copydoc xrow_encode_subscribe_response. */
> +static inline void
> +xrow_encode_subscribe_response_xc(struct xrow_header *row,
> +				  const struct tt_uuid *replicaset_uuid,
> +				  const struct vclock *vclock)
> +{
> +	if (xrow_encode_subscribe_response(row, replicaset_uuid, vclock) != 0)
> +		diag_raise();
> +}
> +
> +/** @copydoc xrow_decode_subscribe_response. */
> +static inline void
> +xrow_decode_subscribe_response_xc(struct xrow_header *row,
> +				  struct tt_uuid *replicaset_uuid,
> +				  struct vclock *vclock)
> +{
> +	if (xrow_decode_subscribe_response(row, replicaset_uuid, vclock) != 0)
> +		diag_raise();
> +}
> +
> /** @copydoc iproto_reply_ok. */
> static inline void
> iproto_reply_ok_xc(struct obuf *out, uint64_t sync, uint32_t schema_version)
> diff --git a/test/replication/misc.result b/test/replication/misc.result
> index c32681a7a..1f7e78a2f 100644
> --- a/test/replication/misc.result
> +++ b/test/replication/misc.result
> @@ -604,3 +604,74 @@ test_run:cmd("delete server replica")
> test_run:cleanup_cluster()
> ---
> ...
> +--
> +-- gh-3704 move cluster id check to replica
> +--
> +test_run:cmd("create server replica with rpl_master=default, script='replication/replica.lua'")
> +---
> +- true
> +...
> +box.schema.user.grant("guest", "replication")
> +---
> +...
> +test_run:cmd("start server replica")
> +---
> +- true
> +...
> +test_run:grep_log("replica", "REPLICASET_UUID_MISMATCH")
> +---
> +- null
> +...
> +box.info.replication[2].downstream ~= nil
> +---
> +- true
> +...
> +-- make master generate another cluster uuid and check that
> +-- replica doesn't connect
> +test_run:cmd("stop server replica")
> +---
> +- true
> +...
> +test_run:cmd("restart server default with cleanup=1")
> +box.schema.user.grant("guest", "replication")
> +---
> +...
> +test_run:cmd("start server replica")
> +---
> +- true
> +...
> +-- master believes replica is in cluster, but their UUIDS differ.
> +replica_uuid = test_run:eval("replica", "box.info.uuid")[1]
> +---
> +...
> +_ = box.space._cluster:insert{2, replica_uuid}
> +---
> +...
> +test_run:cmd("restart server replica")
> +---
> +- true
> +...
> +test_run:grep_log("replica", "REPLICASET_UUID_MISMATCH")
> +---
> +- REPLICASET_UUID_MISMATCH
> +...
> +box.info.replication[2].downstream ~= nil
> +---
> +- false
> +...
> +test_run:cmd("restart server default with cleanup=1")
> +---
> +- true
> +...
> +test_run:cmd("stop server replica")
> +---
> +- true
> +...
> +test_run:cmd("cleanup server replica")
> +---
> +- true
> +...
> +test_run:cmd("delete server replica")
> +---
> +- true
> +...
> diff --git a/test/replication/misc.test.lua b/test/replication/misc.test.lua
> index 6a8af05c3..967c4e912 100644
> --- a/test/replication/misc.test.lua
> +++ b/test/replication/misc.test.lua
> @@ -243,3 +243,29 @@ test_run:cmd("stop server replica")
> test_run:cmd("cleanup server replica")
> test_run:cmd("delete server replica")
> test_run:cleanup_cluster()
> +
> +--
> +-- gh-3704 move cluster id check to replica
> +--
> +test_run:cmd("create server replica with rpl_master=default, script='replication/replica.lua'")
> +box.schema.user.grant("guest", "replication")
> +test_run:cmd("start server replica")
> +test_run:grep_log("replica", "REPLICASET_UUID_MISMATCH")
> +box.info.replication[2].downstream ~= nil
> +-- make master generate another cluster uuid and check that
> +-- replica doesn't connect
> +test_run:cmd("stop server replica")
> +test_run:cmd("restart server default with cleanup=1")
> +box.schema.user.grant("guest", "replication")
> +test_run:cmd("start server replica")
> +-- master believes replica is in cluster, but their UUIDS differ.
> +replica_uuid = test_run:eval("replica", "box.info.uuid")[1]
> +_ = box.space._cluster:insert{2, replica_uuid}
> +test_run:cmd("restart server replica")
> +test_run:grep_log("replica", "REPLICASET_UUID_MISMATCH")
> +box.info.replication[2].downstream ~= nil
> +
> +test_run:cmd("restart server default with cleanup=1")
> +test_run:cmd("stop server replica")
> +test_run:cmd("cleanup server replica")
> +test_run:cmd("delete server replica")
> -- 
> 2.17.2 (Apple Git-113)
> 

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH] replication: move cluster id match check to replica
  2019-01-31 13:25 [PATCH] replication: move cluster id match check to replica Serge Petrenko
  2019-01-31 13:30 ` Serge Petrenko
@ 2019-02-06 10:48 ` Vladimir Davydov
  2019-02-06 14:51   ` Serge Petrenko
  1 sibling, 1 reply; 5+ messages in thread
From: Vladimir Davydov @ 2019-02-06 10:48 UTC (permalink / raw)
  To: Serge Petrenko; +Cc: tarantool-patches, georgy

On Thu, Jan 31, 2019 at 04:25:26PM +0300, Serge Petrenko wrote:
> On replica subscribe master checks that replica's cluster id matches
> master's one, and disallows replication in case of mismatch.
> This behaviour blocks impplementation of anonymous replicas, which
> shouldn't pollute _cluster space and could accumulate changes from
> multiple clusters at once.
> So let's move the check to replica to let it decide which action to take
> in case of mismatch.
> 
> Needed for #3186
> Closes #3704
> ---
> Issue: https://github.com/tarantool/tarantool/issues/3704
> Branch: https://github.com/tarantool/tarantool/tree/sp/gh-3704-cluster-id-check-on-replica
> 
>  src/box/applier.cc             | 24 +++++++++++-
>  src/box/box.cc                 | 22 +++++------
>  src/box/xrow.c                 | 26 +++++++++++++
>  src/box/xrow.h                 | 51 ++++++++++++++++++++++++
>  test/replication/misc.result   | 71 ++++++++++++++++++++++++++++++++++
>  test/replication/misc.test.lua | 26 +++++++++++++
>  6 files changed, 206 insertions(+), 14 deletions(-)
> 
> diff --git a/src/box/applier.cc b/src/box/applier.cc
> index 21d2e6bcb..704ed6c5b 100644
> --- a/src/box/applier.cc
> +++ b/src/box/applier.cc
> @@ -391,6 +391,7 @@ applier_subscribe(struct applier *applier)
>  	struct ibuf *ibuf = &applier->ibuf;
>  	struct xrow_header row;
>  	struct vclock remote_vclock_at_subscribe;
> +	struct tt_uuid cluster_id = uuid_nil;
>  
>  	xrow_encode_subscribe_xc(&row, &REPLICASET_UUID, &INSTANCE_UUID,
>  				 &replicaset.vclock);
> @@ -408,9 +409,30 @@ applier_subscribe(struct applier *applier)
>  		/*
>  		 * In case of successful subscribe, the server
>  		 * responds with its current vclock.
> +		 *
> +		 * Tarantool 2.1.1 also sends its cluster id to
> +		 * the replica, and replica has to check whether
> +		 * its and master's cluster ids match.
>  		 */
>  		vclock_create(&remote_vclock_at_subscribe);
> -		xrow_decode_vclock_xc(&row, &remote_vclock_at_subscribe);
> +		if (applier->version_id >= version_id(2, 1, 1)) {

We already released 2.1.1. Should be 2.1.2? Anyway, do we really need to
check the version here? Let's unconditionally check replicaset_uuid in
case it's present. It shouldn't break anything, should it?

> +			xrow_decode_subscribe_response_xc(&row,
> +							  &cluster_id,
> +							  &remote_vclock_at_subscribe);
> +			/*
> +			 * If master didn't send us its cluster id
> +			 * assume that it has done all the checks.
> +			 * In this case cluster_id will remain zero.
> +			 */
> +			if (!tt_uuid_is_equal(&cluster_id, &uuid_nil) &&
> +			    !tt_uuid_is_equal(&cluster_id, &REPLICASET_UUID)) {
> +				tnt_raise(ClientError, ER_REPLICASET_UUID_MISMATCH,
> +					  tt_uuid_str(&cluster_id),
> +					  tt_uuid_str(&REPLICASET_UUID));
> +			}
> +		} else {
> +			xrow_decode_vclock_xc(&row, &remote_vclock_at_subscribe);
> +		}
>  	}
>  	/*
>  	 * Tarantool < 1.6.7:
> diff --git a/src/box/xrow.c b/src/box/xrow.c
> index c4e3073be..84b3473b1 100644
> --- a/src/box/xrow.c
> +++ b/src/box/xrow.c
> @@ -1170,6 +1170,32 @@ xrow_encode_vclock(struct xrow_header *row, const struct vclock *vclock)
>  	return 0;
>  }
>  
> +int
> +xrow_encode_subscribe_response(struct xrow_header *row,
> +			       const struct tt_uuid *replicaset_uuid,
> +			       const struct vclock *vclock)
> +{
> +	memset(row, 0, sizeof(*row));
> +	size_t size = XROW_BODY_LEN_MAX + UUID_STR_LEN + mp_sizeof_vclock(vclock);

XROW_BODY_LEN_MAX? Why is it here?

Let's estimate the response size with mp_sizeof please.

> +	char *buf = (char *) region_alloc(&fiber()->gc, size);
> +	if (buf == NULL) {
> +		diag_set(OutOfMemory, size, "region_alloc", "buf");
> +		return -1;
> +	}
> +	char *data = buf;
> +	data = mp_encode_map(data, 2);
> +	data = mp_encode_uint(data, IPROTO_VCLOCK);
> +	data = mp_encode_vclock(data, vclock);
> +	data = mp_encode_uint(data, IPROTO_CLUSTER_UUID);
> +	data = xrow_encode_uuid(data, replicaset_uuid);
> +	assert(data <= buf + size);
> +	row->body[0].iov_base = buf;
> +	row->body[0].iov_len = (data - buf);
> +	row->bodycnt = 1;
> +	row->type = IPROTO_OK;
> +	return 0;
> +}
> diff --git a/test/replication/misc.test.lua b/test/replication/misc.test.lua
> index 6a8af05c3..967c4e912 100644
> --- a/test/replication/misc.test.lua
> +++ b/test/replication/misc.test.lua
> @@ -243,3 +243,29 @@ test_run:cmd("stop server replica")
>  test_run:cmd("cleanup server replica")
>  test_run:cmd("delete server replica")
>  test_run:cleanup_cluster()
> +
> +--
> +-- gh-3704 move cluster id check to replica
> +--
> +test_run:cmd("create server replica with rpl_master=default, script='replication/replica.lua'")
> +box.schema.user.grant("guest", "replication")
> +test_run:cmd("start server replica")
> +test_run:grep_log("replica", "REPLICASET_UUID_MISMATCH")
> +box.info.replication[2].downstream ~= nil
> +-- make master generate another cluster uuid and check that
> +-- replica doesn't connect
> +test_run:cmd("stop server replica")
> +test_run:cmd("restart server default with cleanup=1")
> +box.schema.user.grant("guest", "replication")
> +test_run:cmd("start server replica")
> +-- master believes replica is in cluster, but their UUIDS differ.
> +replica_uuid = test_run:eval("replica", "box.info.uuid")[1]
> +_ = box.space._cluster:insert{2, replica_uuid}

You could instead call box.schema._replace{'cluster_id', new_uuid}.
Not sure if the test would be shorter if you'd done that though.

> +test_run:cmd("restart server replica")
> +test_run:grep_log("replica", "REPLICASET_UUID_MISMATCH")
> +box.info.replication[2].downstream ~= nil
> +
> +test_run:cmd("restart server default with cleanup=1")
> +test_run:cmd("stop server replica")
> +test_run:cmd("cleanup server replica")
> +test_run:cmd("delete server replica")

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH] replication: move cluster id match check to replica
  2019-02-06 10:48 ` Vladimir Davydov
@ 2019-02-06 14:51   ` Serge Petrenko
  2019-02-07 10:14     ` Vladimir Davydov
  0 siblings, 1 reply; 5+ messages in thread
From: Serge Petrenko @ 2019-02-06 14:51 UTC (permalink / raw)
  To: Vladimir Davydov; +Cc: tarantool-patches, Georgy Kirichenko

[-- Attachment #1: Type: text/plain, Size: 10161 bytes --]

Hi! Thankyou for review!

> 6 февр. 2019 г., в 13:48, Vladimir Davydov <vdavydov.dev@gmail.com> написал(а):
> 
> On Thu, Jan 31, 2019 at 04:25:26PM +0300, Serge Petrenko wrote:
>> On replica subscribe master checks that replica's cluster id matches
>> 
>> 		 * responds with its current vclock.
>> +		 *
>> +		 * Tarantool 2.1.1 also sends its cluster id to
>> +		 * the replica, and replica has to check whether
>> +		 * its and master's cluster ids match.
>> 		 */
>> 		vclock_create(&remote_vclock_at_subscribe);
>> -		xrow_decode_vclock_xc(&row, &remote_vclock_at_subscribe);
>> +		if (applier->version_id >= version_id(2, 1, 1)) {
> 
> We already released 2.1.1. Should be 2.1.2? Anyway, do we really need to
> check the version here? Let's unconditionally check replicaset_uuid in
> case it's present. It shouldn't break anything, should it?

Yes, you’re right. I removed the if-else.

> 
>> +			xrow_decode_subscribe_response_xc(&row,
>> +							  &cluster_id,
>> +							  &remote_vclock_at_subscribe);
>> +			/*
>> +			 * If master didn't send us its cluster id
>> +			 * assume that it has done all the checks.
>> +			 * In this case cluster_id will remain zero.
>> +			 */
>> +			if (!tt_uuid_is_equal(&cluster_id, &uuid_nil) &&
>> +			    !tt_uuid_is_equal(&cluster_id, &REPLICASET_UUID)) {
>> +				tnt_raise(ClientError, ER_REPLICASET_UUID_MISMATCH,
>> +					  tt_uuid_str(&cluster_id),
>> +					  tt_uuid_str(&REPLICASET_UUID));
>> +			}
>> +		} else {
>> +			xrow_decode_vclock_xc(&row, &remote_vclock_at_subscribe);
>> +		}
>> 	}
>> 	/*
>> 	 * Tarantool < 1.6.7:
>> diff --git a/src/box/xrow.c b/src/box/xrow.c
>> index c4e3073be..84b3473b1 100644
>> --- a/src/box/xrow.c
>> +++ b/src/box/xrow.c
>> @@ -1170,6 +1170,32 @@ xrow_encode_vclock(struct xrow_header *row, const struct vclock *vclock)
>> 	return 0;
>> }
>> 
>> +int
>> +xrow_encode_subscribe_response(struct xrow_header *row,
>> +			       const struct tt_uuid *replicaset_uuid,
>> +			       const struct vclock *vclock)
>> +{
>> +	memset(row, 0, sizeof(*row));
>> +	size_t size = XROW_BODY_LEN_MAX + UUID_STR_LEN + mp_sizeof_vclock(vclock);
> 
> XROW_BODY_LEN_MAX? Why is it here?
> 
> Let's estimate the response size with mp_sizeof please.

Ok, now I count it properly, sorry.

> 
>> +	char *buf = (char *) region_alloc(&fiber()->gc, size);
>> +	if (buf == NULL) {
>> +		diag_set(OutOfMemory, size, "region_alloc", "buf");
>> +		return -1;
>> +	}
>> +	char *data = buf;
>> +	data = mp_encode_map(data, 2);
>> +	data = mp_encode_uint(data, IPROTO_VCLOCK);
>> +	data = mp_encode_vclock(data, vclock);
>> +	data = mp_encode_uint(data, IPROTO_CLUSTER_UUID);
>> +	data = xrow_encode_uuid(data, replicaset_uuid);
>> +	assert(data <= buf + size);
>> +	row->body[0].iov_base = buf;
>> +	row->body[0].iov_len = (data - buf);
>> +	row->bodycnt = 1;
>> +	row->type = IPROTO_OK;
>> +	return 0;
>> +}
>> diff --git a/test/replication/misc.test.lua b/test/replication/misc.test.lua
>> index 6a8af05c3..967c4e912 100644
>> --- a/test/replication/misc.test.lua
>> +++ b/test/replication/misc.test.lua
>> @@ -243,3 +243,29 @@ test_run:cmd("stop server replica")
>> test_run:cmd("cleanup server replica")
>> test_run:cmd("delete server replica")
>> test_run:cleanup_cluster()
>> +
>> +--
>> +-- gh-3704 move cluster id check to replica
>> +--
>> +test_run:cmd("create server replica with rpl_master=default, script='replication/replica.lua'")
>> +box.schema.user.grant("guest", "replication")
>> +test_run:cmd("start server replica")
>> +test_run:grep_log("replica", "REPLICASET_UUID_MISMATCH")
>> +box.info.replication[2].downstream ~= nil
>> +-- make master generate another cluster uuid and check that
>> +-- replica doesn't connect
>> +test_run:cmd("stop server replica")
>> +test_run:cmd("restart server default with cleanup=1")
>> +box.schema.user.grant("guest", "replication")
>> +test_run:cmd("start server replica")
>> +-- master believes replica is in cluster, but their UUIDS differ.
>> +replica_uuid = test_run:eval("replica", "box.info.uuid")[1]
>> +_ = box.space._cluster:insert{2, replica_uuid}
> 
> You could instead call box.schema._replace{'cluster_id', new_uuid}.
> Not sure if the test would be shorter if you'd done that though.

Done, the test is a bit shorter, indeed.

> 
>> +test_run:cmd("restart server replica")
>> +test_run:grep_log("replica", "REPLICASET_UUID_MISMATCH")
>> +box.info.replication[2].downstream ~= nil
>> +
>> +test_run:cmd("restart server default with cleanup=1")
>> +test_run:cmd("stop server replica")
>> +test_run:cmd("cleanup server replica")
>> +test_run:cmd("delete server replica»)

Here’s an incremental diff. The fixes are on the branch.

diff --git a/src/box/applier.cc b/src/box/applier.cc
index 704ed6c5b..045c69102 100644
--- a/src/box/applier.cc
+++ b/src/box/applier.cc
@@ -415,23 +415,19 @@ applier_subscribe(struct applier *applier)
 		 * its and master's cluster ids match.
 		 */
 		vclock_create(&remote_vclock_at_subscribe);
-		if (applier->version_id >= version_id(2, 1, 1)) {
-			xrow_decode_subscribe_response_xc(&row,
-							  &cluster_id,
-							  &remote_vclock_at_subscribe);
-			/*
-			 * If master didn't send us its cluster id
-			 * assume that it has done all the checks.
-			 * In this case cluster_id will remain zero.
-			 */
-			if (!tt_uuid_is_equal(&cluster_id, &uuid_nil) &&
-			    !tt_uuid_is_equal(&cluster_id, &REPLICASET_UUID)) {
-				tnt_raise(ClientError, ER_REPLICASET_UUID_MISMATCH,
-					  tt_uuid_str(&cluster_id),
-					  tt_uuid_str(&REPLICASET_UUID));
-			}
-		} else {
-			xrow_decode_vclock_xc(&row, &remote_vclock_at_subscribe);
+		xrow_decode_subscribe_response_xc(&row,
+						  &cluster_id,
+						  &remote_vclock_at_subscribe);
+		/*
+		 * If master didn't send us its cluster id
+		 * assume that it has done all the checks.
+		 * In this case cluster_id will remain zero.
+		 */
+		if (!tt_uuid_is_equal(&cluster_id, &uuid_nil) &&
+		    !tt_uuid_is_equal(&cluster_id, &REPLICASET_UUID)) {
+			tnt_raise(ClientError, ER_REPLICASET_UUID_MISMATCH,
+				  tt_uuid_str(&cluster_id),
+				  tt_uuid_str(&REPLICASET_UUID));
 		}
 	}
 	/*
diff --git a/src/box/xrow.c b/src/box/xrow.c
index 84b3473b1..ba3671fed 100644
--- a/src/box/xrow.c
+++ b/src/box/xrow.c
@@ -1176,7 +1176,10 @@ xrow_encode_subscribe_response(struct xrow_header *row,
 			       const struct vclock *vclock)
 {
 	memset(row, 0, sizeof(*row));
-	size_t size = XROW_BODY_LEN_MAX + UUID_STR_LEN + mp_sizeof_vclock(vclock);
+	size_t size = mp_sizeof_map(2) +
+		      mp_sizeof_uint(IPROTO_VCLOCK) + mp_sizeof_vclock(vclock) +
+		      mp_sizeof_uint(IPROTO_CLUSTER_UUID) +
+		      mp_sizeof_str(UUID_STR_LEN);
 	char *buf = (char *) region_alloc(&fiber()->gc, size);
 	if (buf == NULL) {
 		diag_set(OutOfMemory, size, "region_alloc", "buf");
diff --git a/test/replication/misc.result b/test/replication/misc.result
index 1f7e78a2f..2401ac5cf 100644
--- a/test/replication/misc.result
+++ b/test/replication/misc.result
@@ -622,47 +622,32 @@ test_run:grep_log("replica", "REPLICASET_UUID_MISMATCH")
 ---
 - null
 ...
-box.info.replication[2].downstream ~= nil
+box.info.replication[2].downstream.status
 ---
-- true
+- follow
 ...
--- make master generate another cluster uuid and check that
--- replica doesn't connect
+-- change master's cluster uuid and check that replica doesn't connect.
 test_run:cmd("stop server replica")
 ---
 - true
 ...
-test_run:cmd("restart server default with cleanup=1")
-box.schema.user.grant("guest", "replication")
+_ = box.space._schema:replace{'cluster', tostring(uuid.new())}
 ---
 ...
+-- master believes replica is in cluster, but their cluster UUIDS differ.
 test_run:cmd("start server replica")
 ---
 - true
 ...
--- master believes replica is in cluster, but their UUIDS differ.
-replica_uuid = test_run:eval("replica", "box.info.uuid")[1]
----
-...
-_ = box.space._cluster:insert{2, replica_uuid}
----
-...
-test_run:cmd("restart server replica")
----
-- true
-...
-test_run:grep_log("replica", "REPLICASET_UUID_MISMATCH")
+test_run:wait_log("replica", "REPLICASET_UUID_MISMATCH", nil, 1.0)
 ---
 - REPLICASET_UUID_MISMATCH
 ...
-box.info.replication[2].downstream ~= nil
+box.info.replication[2].downstream.status
 ---
-- false
+- stopped
 ...
 test_run:cmd("restart server default with cleanup=1")
----
-- true
-...
 test_run:cmd("stop server replica")
 ---
 - true
diff --git a/test/replication/misc.test.lua b/test/replication/misc.test.lua
index 967c4e912..a1ddf75a8 100644
--- a/test/replication/misc.test.lua
+++ b/test/replication/misc.test.lua
@@ -251,19 +251,14 @@ test_run:cmd("create server replica with rpl_master=default, script='replication
 box.schema.user.grant("guest", "replication")
 test_run:cmd("start server replica")
 test_run:grep_log("replica", "REPLICASET_UUID_MISMATCH")
-box.info.replication[2].downstream ~= nil
--- make master generate another cluster uuid and check that
--- replica doesn't connect
+box.info.replication[2].downstream.status
+-- change master's cluster uuid and check that replica doesn't connect.
 test_run:cmd("stop server replica")
-test_run:cmd("restart server default with cleanup=1")
-box.schema.user.grant("guest", "replication")
+_ = box.space._schema:replace{'cluster', tostring(uuid.new())}
+-- master believes replica is in cluster, but their cluster UUIDS differ.
 test_run:cmd("start server replica")
--- master believes replica is in cluster, but their UUIDS differ.
-replica_uuid = test_run:eval("replica", "box.info.uuid")[1]
-_ = box.space._cluster:insert{2, replica_uuid}
-test_run:cmd("restart server replica")
-test_run:grep_log("replica", "REPLICASET_UUID_MISMATCH")
-box.info.replication[2].downstream ~= nil
+test_run:wait_log("replica", "REPLICASET_UUID_MISMATCH", nil, 1.0)
+box.info.replication[2].downstream.status
 
 test_run:cmd("restart server default with cleanup=1")
 test_run:cmd("stop server replica")


[-- Attachment #2: Type: text/html, Size: 28512 bytes --]

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH] replication: move cluster id match check to replica
  2019-02-06 14:51   ` Serge Petrenko
@ 2019-02-07 10:14     ` Vladimir Davydov
  0 siblings, 0 replies; 5+ messages in thread
From: Vladimir Davydov @ 2019-02-07 10:14 UTC (permalink / raw)
  To: Serge Petrenko; +Cc: tarantool-patches, Georgy Kirichenko

Pushed to 2.1 with some minor changes:

diff --git a/src/box/applier.cc b/src/box/applier.cc
index 045c6910..512d05df 100644
--- a/src/box/applier.cc
+++ b/src/box/applier.cc
@@ -410,7 +410,7 @@ applier_subscribe(struct applier *applier)
 		 * In case of successful subscribe, the server
 		 * responds with its current vclock.
 		 *
-		 * Tarantool 2.1.1 also sends its cluster id to
+		 * Tarantool > 2.1.1 also sends its cluster id to
 		 * the replica, and replica has to check whether
 		 * its and master's cluster ids match.
 		 */
@@ -423,7 +423,7 @@ applier_subscribe(struct applier *applier)
 		 * assume that it has done all the checks.
 		 * In this case cluster_id will remain zero.
 		 */
-		if (!tt_uuid_is_equal(&cluster_id, &uuid_nil) &&
+		if (!tt_uuid_is_nil(&cluster_id) &&
 		    !tt_uuid_is_equal(&cluster_id, &REPLICASET_UUID)) {
 			tnt_raise(ClientError, ER_REPLICASET_UUID_MISMATCH,
 				  tt_uuid_str(&cluster_id),
diff --git a/src/box/box.cc b/src/box/box.cc
index 8a8c0816..e12a1cba 100644
--- a/src/box/box.cc
+++ b/src/box/box.cc
@@ -1609,11 +1609,11 @@ box_process_subscribe(struct ev_io *io, struct xrow_header *header)
 	 * the replica how many rows we have in stock for it,
 	 * and identify ourselves with our own replica id.
 	 *
-	 * Since Tarantool version 2.1.1 master doesn't check
-	 * that replica and master have the same cluster id.
-	 * It sends its cluster id to replica instead, and replica
-	 * checks that its cluster id matches master's one.
-	 * Older versions will just ignore the additional field.
+	 * Tarantool > 2.1.1 master doesn't check that replica
+	 * has the same cluster id. Instead it sends its cluster
+	 * id to replica, and replica checks that its cluster id
+	 * matches master's one. Older versions will just ignore
+	 * the additional field.
 	 */
 	struct xrow_header row;
 	xrow_encode_subscribe_response_xc(&row,
diff --git a/test/replication/misc.result b/test/replication/misc.result
index 2401ac5c..ab827c50 100644
--- a/test/replication/misc.result
+++ b/test/replication/misc.result
@@ -634,7 +634,7 @@ test_run:cmd("stop server replica")
 _ = box.space._schema:replace{'cluster', tostring(uuid.new())}
 ---
 ...
--- master believes replica is in cluster, but their cluster UUIDS differ.
+-- master believes replica is in cluster, but their cluster UUIDs differ.
 test_run:cmd("start server replica")
 ---
 - true
@@ -647,7 +647,6 @@ box.info.replication[2].downstream.status
 ---
 - stopped
 ...
-test_run:cmd("restart server default with cleanup=1")
 test_run:cmd("stop server replica")
 ---
 - true
@@ -660,3 +659,9 @@ test_run:cmd("delete server replica")
 ---
 - true
 ...
+test_run:cleanup_cluster()
+---
+...
+box.schema.user.revoke('guest', 'replication')
+---
+...
diff --git a/test/replication/misc.test.lua b/test/replication/misc.test.lua
index a1ddf75a..eda5310b 100644
--- a/test/replication/misc.test.lua
+++ b/test/replication/misc.test.lua
@@ -255,12 +255,13 @@ box.info.replication[2].downstream.status
 -- change master's cluster uuid and check that replica doesn't connect.
 test_run:cmd("stop server replica")
 _ = box.space._schema:replace{'cluster', tostring(uuid.new())}
--- master believes replica is in cluster, but their cluster UUIDS differ.
+-- master believes replica is in cluster, but their cluster UUIDs differ.
 test_run:cmd("start server replica")
 test_run:wait_log("replica", "REPLICASET_UUID_MISMATCH", nil, 1.0)
 box.info.replication[2].downstream.status
 
-test_run:cmd("restart server default with cleanup=1")
 test_run:cmd("stop server replica")
 test_run:cmd("cleanup server replica")
 test_run:cmd("delete server replica")
+test_run:cleanup_cluster()
+box.schema.user.revoke('guest', 'replication')

^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2019-02-07 10:14 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-01-31 13:25 [PATCH] replication: move cluster id match check to replica Serge Petrenko
2019-01-31 13:30 ` Serge Petrenko
2019-02-06 10:48 ` Vladimir Davydov
2019-02-06 14:51   ` Serge Petrenko
2019-02-07 10:14     ` Vladimir Davydov

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox