Tarantool development patches archive
 help / color / mirror / Atom feed
* [Tarantool-patches] [RFC v6 0/3] limbo: implement packets filtering
@ 2021-07-16 21:19 Cyrill Gorcunov via Tarantool-patches
  2021-07-16 21:19 ` [Tarantool-patches] [RFC v6 1/3] limbo: gather promote tracking into a separate structure Cyrill Gorcunov via Tarantool-patches
                   ` (2 more replies)
  0 siblings, 3 replies; 5+ messages in thread
From: Cyrill Gorcunov via Tarantool-patches @ 2021-07-16 21:19 UTC (permalink / raw)
  To: tml; +Cc: Vladislav Shpilevoy

Guys, take a look please, I skip few preparatory commits
to not spam the list (since it is still RFC) and there
yet no tests, but wanna show an early draft anyway.

Comments are highly appreciated, and even existing tests
are not yet passing so the filtering is obviously too eager,
I'll address this.

branch gorcunov/gh-6036-rollback-confirm-06-notest

v6:
 - use txn_limbo_terms name for structure
 - rebase on fresh sp/gh-6034-empty-limbo-transition branch
 - rework filtering chains

Cyrill Gorcunov (3):
  limbo: gather promote tracking into a separate structure
  limbo: order access to the limbo terms terms
  limbo: filter incoming synchro requests

 src/box/applier.cc     |  16 ++-
 src/box/box.cc         |  18 +--
 src/box/memtx_engine.c |   3 +-
 src/box/txn_limbo.c    | 244 +++++++++++++++++++++++++++++++++++++----
 src/box/txn_limbo.h    | 126 +++++++++++++++++----
 5 files changed, 354 insertions(+), 53 deletions(-)

-- 
2.31.1


^ permalink raw reply	[flat|nested] 5+ messages in thread

* [Tarantool-patches] [RFC v6 1/3] limbo: gather promote tracking into a separate structure
  2021-07-16 21:19 [Tarantool-patches] [RFC v6 0/3] limbo: implement packets filtering Cyrill Gorcunov via Tarantool-patches
@ 2021-07-16 21:19 ` Cyrill Gorcunov via Tarantool-patches
  2021-07-16 21:19 ` [Tarantool-patches] [RFC v6 2/3] limbo: order access to the limbo terms terms Cyrill Gorcunov via Tarantool-patches
  2021-07-16 21:19 ` [Tarantool-patches] [RFC v6 3/3] limbo: filter incoming synchro requests Cyrill Gorcunov via Tarantool-patches
  2 siblings, 0 replies; 5+ messages in thread
From: Cyrill Gorcunov via Tarantool-patches @ 2021-07-16 21:19 UTC (permalink / raw)
  To: tml; +Cc: Vladislav Shpilevoy

It is needed to introduce ordered promote related data
modifications in next patch.

Part-of #6036

Signed-off-by: Cyrill Gorcunov <gorcunov@gmail.com>
---
 src/box/box.cc      | 12 +++++++----
 src/box/txn_limbo.c | 24 ++++++++++++++--------
 src/box/txn_limbo.h | 49 ++++++++++++++++++++++++++++-----------------
 3 files changed, 55 insertions(+), 30 deletions(-)

diff --git a/src/box/box.cc b/src/box/box.cc
index f68fffcab..64db2680a 100644
--- a/src/box/box.cc
+++ b/src/box/box.cc
@@ -1573,7 +1573,8 @@ box_run_elections(void)
 static int
 box_check_promote_term_changed(uint64_t promote_term)
 {
-	if (txn_limbo.promote_greatest_term != promote_term) {
+	const struct txn_limbo_terms *tr = &txn_limbo.terms;
+	if (tr->terms_max != promote_term) {
 		diag_set(ClientError, ER_INTERFERING_PROMOTE,
 			 txn_limbo.owner_id);
 		return -1;
@@ -1585,7 +1586,8 @@ box_check_promote_term_changed(uint64_t promote_term)
 static int
 box_trigger_elections(void)
 {
-	uint64_t promote_term = txn_limbo.promote_greatest_term;
+	const struct txn_limbo_terms *tr = &txn_limbo.terms;
+	uint64_t promote_term = tr->terms_max;
 	raft_new_term(box_raft());
 	if (box_raft_wait_term_persisted() < 0)
 		return -1;
@@ -1596,7 +1598,8 @@ box_trigger_elections(void)
 static int
 box_try_wait_confirm(double timeout)
 {
-	uint64_t promote_term = txn_limbo.promote_greatest_term;
+	const struct txn_limbo_terms *tr = &txn_limbo.terms;
+	uint64_t promote_term = tr->terms_max;
 	txn_limbo_wait_empty(&txn_limbo, timeout);
 	return box_check_promote_term_changed(promote_term);
 }
@@ -1612,7 +1615,8 @@ box_wait_limbo_acked(void)
 	if (txn_limbo_is_empty(&txn_limbo))
 		return txn_limbo.confirmed_lsn;
 
-	uint64_t promote_term = txn_limbo.promote_greatest_term;
+	const struct txn_limbo_terms *tr = &txn_limbo.terms;
+	uint64_t promote_term = tr->terms_max;
 	int quorum = replication_synchro_quorum;
 	struct txn_limbo_entry *last_entry;
 	last_entry = txn_limbo_last_synchro_entry(&txn_limbo);
diff --git a/src/box/txn_limbo.c b/src/box/txn_limbo.c
index 570f77c46..53c86f34e 100644
--- a/src/box/txn_limbo.c
+++ b/src/box/txn_limbo.c
@@ -37,6 +37,13 @@
 
 struct txn_limbo txn_limbo;
 
+static void
+txn_limbo_terms_create(struct txn_limbo_terms *tr)
+{
+	vclock_create(&tr->terms_map);
+	tr->terms_max = 0;
+}
+
 static inline void
 txn_limbo_create(struct txn_limbo *limbo)
 {
@@ -45,8 +52,7 @@ txn_limbo_create(struct txn_limbo *limbo)
 	limbo->owner_id = REPLICA_ID_NIL;
 	fiber_cond_create(&limbo->wait_cond);
 	vclock_create(&limbo->vclock);
-	vclock_create(&limbo->promote_term_map);
-	limbo->promote_greatest_term = 0;
+	txn_limbo_terms_create(&limbo->terms);
 	limbo->confirmed_lsn = 0;
 	limbo->rollback_count = 0;
 	limbo->is_in_rollback = false;
@@ -305,10 +311,11 @@ void
 txn_limbo_checkpoint(const struct txn_limbo *limbo,
 		     struct synchro_request *req)
 {
+	const struct txn_limbo_terms *tr = &limbo->terms;
 	req->type = IPROTO_PROMOTE;
 	req->replica_id = limbo->owner_id;
 	req->lsn = limbo->confirmed_lsn;
-	req->term = limbo->promote_greatest_term;
+	req->term = tr->terms_max;
 }
 
 static void
@@ -726,20 +733,21 @@ txn_limbo_wait_empty(struct txn_limbo *limbo, double timeout)
 void
 txn_limbo_process(struct txn_limbo *limbo, const struct synchro_request *req)
 {
+	struct txn_limbo_terms *tr = &limbo->terms;
 	uint64_t term = req->term;
 	uint32_t origin = req->origin_id;
 	if (txn_limbo_replica_term(limbo, origin) < term) {
-		vclock_follow(&limbo->promote_term_map, origin, term);
-		if (term > limbo->promote_greatest_term)
-			limbo->promote_greatest_term = term;
+		vclock_follow(&tr->terms_map, origin, term);
+		if (term > tr->terms_max)
+			tr->terms_max = term;
 	} else if (iproto_type_is_promote_request(req->type) &&
-		   limbo->promote_greatest_term > 1) {
+		   tr->terms_max > 1) {
 		/* PROMOTE for outdated term. Ignore. */
 		say_info("RAFT: ignoring %s request from instance "
 			 "id %u for term %llu. Greatest term seen "
 			 "before (%llu) is bigger.",
 			 iproto_type_name(req->type), origin, (long long)term,
-			 (long long)limbo->promote_greatest_term);
+			 (long long)tr->terms_max);
 		return;
 	}
 
diff --git a/src/box/txn_limbo.h b/src/box/txn_limbo.h
index 53e52f676..dc980bf7c 100644
--- a/src/box/txn_limbo.h
+++ b/src/box/txn_limbo.h
@@ -75,6 +75,31 @@ txn_limbo_entry_is_complete(const struct txn_limbo_entry *e)
 	return e->is_commit || e->is_rollback;
 }
 
+/**
+ * Keep state of promote requests to handle split-brain
+ * situation and other errors.
+ */
+struct txn_limbo_terms {
+	/**
+	 * Latest terms received with PROMOTE entries from remote instances.
+	 * Limbo uses them to filter out the transactions coming not from the
+	 * limbo owner, but so outdated that they are rolled back everywhere
+	 * except outdated nodes.
+	 */
+	struct vclock terms_map;
+	/**
+	 * The biggest PROMOTE term seen by the instance and persisted in WAL.
+	 * It is related to raft term, but not the same. Synchronous replication
+	 * represented by the limbo is interested only in the won elections
+	 * ended with PROMOTE request.
+	 * It means the limbo's term might be smaller than the raft term, while
+	 * there are ongoing elections, or the leader is already known and this
+	 * instance hasn't read its PROMOTE request yet. During other times the
+	 * limbo and raft are in sync and the terms are the same.
+	 */
+	uint64_t terms_max;
+};
+
 /**
  * Limbo is a place where transactions are stored, which are
  * finished, but not committed nor rolled back. These are
@@ -130,23 +155,9 @@ struct txn_limbo {
 	 */
 	struct vclock vclock;
 	/**
-	 * Latest terms received with PROMOTE entries from remote instances.
-	 * Limbo uses them to filter out the transactions coming not from the
-	 * limbo owner, but so outdated that they are rolled back everywhere
-	 * except outdated nodes.
-	 */
-	struct vclock promote_term_map;
-	/**
-	 * The biggest PROMOTE term seen by the instance and persisted in WAL.
-	 * It is related to raft term, but not the same. Synchronous replication
-	 * represented by the limbo is interested only in the won elections
-	 * ended with PROMOTE request.
-	 * It means the limbo's term might be smaller than the raft term, while
-	 * there are ongoing elections, or the leader is already known and this
-	 * instance hasn't read its PROMOTE request yet. During other times the
-	 * limbo and raft are in sync and the terms are the same.
+	 * Track promote requests.
 	 */
-	uint64_t promote_greatest_term;
+	struct txn_limbo_terms terms;
 	/**
 	 * Maximal LSN gathered quorum and either already confirmed in WAL, or
 	 * whose confirmation is in progress right now. Any attempt to confirm
@@ -218,7 +229,8 @@ txn_limbo_last_entry(struct txn_limbo *limbo)
 static inline uint64_t
 txn_limbo_replica_term(const struct txn_limbo *limbo, uint32_t replica_id)
 {
-	return vclock_get(&limbo->promote_term_map, replica_id);
+	const struct txn_limbo_terms *tr = &limbo->terms;
+	return vclock_get(&tr->terms_map, replica_id);
 }
 
 /**
@@ -229,8 +241,9 @@ static inline bool
 txn_limbo_is_replica_outdated(const struct txn_limbo *limbo,
 			      uint32_t replica_id)
 {
+	const struct txn_limbo_terms *tr = &limbo->terms;
 	return txn_limbo_replica_term(limbo, replica_id) <
-	       limbo->promote_greatest_term;
+	       tr->terms_max;
 }
 
 /**
-- 
2.31.1


^ permalink raw reply	[flat|nested] 5+ messages in thread

* [Tarantool-patches] [RFC v6 2/3] limbo: order access to the limbo terms terms
  2021-07-16 21:19 [Tarantool-patches] [RFC v6 0/3] limbo: implement packets filtering Cyrill Gorcunov via Tarantool-patches
  2021-07-16 21:19 ` [Tarantool-patches] [RFC v6 1/3] limbo: gather promote tracking into a separate structure Cyrill Gorcunov via Tarantool-patches
@ 2021-07-16 21:19 ` Cyrill Gorcunov via Tarantool-patches
  2021-07-16 21:19 ` [Tarantool-patches] [RFC v6 3/3] limbo: filter incoming synchro requests Cyrill Gorcunov via Tarantool-patches
  2 siblings, 0 replies; 5+ messages in thread
From: Cyrill Gorcunov via Tarantool-patches @ 2021-07-16 21:19 UTC (permalink / raw)
  To: tml; +Cc: Vladislav Shpilevoy

Limbo terms tracking is shared between appliers and when
one of appliers is waiting for write to complete inside
journal_write() routine, an other may need to access read
term value to figure out if promote request is valid to
apply. Due to cooperative multitasking access to the terms
is not consistent so we need to be sure that other fibers
either read up to date terms (ie written to the WAL).

For this sake we use latching mechanism, when one fiber
took terms-lock for updating other readers are waiting
until the operation is complete.

For example here is a call graph of two appliers

applier 1
---------
applier_apply_tx
  (promote term = 3
   current max term = 2)
  applier_synchro_filter_tx
  apply_synchro_row
    journal_write
      (sleeping)

at this moment another applier comes in with obsolete
data and term 2

                              applier 2
                              ---------
                              applier_apply_tx
                                (term 2)
                                applier_synchro_filter_tx
                                  txn_limbo_is_replica_outdated -> false
                                journal_write (sleep)

applier 1
---------
journal wakes up
  apply_synchro_row_cb
    set max term to 3

So the applier 2 didn't notice that term 3 is already seen
and wrote obsolete data. With locking the applier 2 will
wait until applier 1 has finished its write.

Part-of #6036

Signed-off-by: Cyrill Gorcunov <gorcunov@gmail.com>
---
 src/box/applier.cc  | 10 ++++--
 src/box/box.cc      | 16 ++++------
 src/box/txn_limbo.c | 17 +++++++++--
 src/box/txn_limbo.h | 74 ++++++++++++++++++++++++++++++++++++++++++---
 4 files changed, 97 insertions(+), 20 deletions(-)

diff --git a/src/box/applier.cc b/src/box/applier.cc
index 92ec088ea..765ffc670 100644
--- a/src/box/applier.cc
+++ b/src/box/applier.cc
@@ -854,7 +854,7 @@ apply_synchro_row_cb(struct journal_entry *entry)
 		applier_rollback_by_wal_io(entry->res);
 	} else {
 		replica_txn_wal_write_cb(synchro_entry->rcb);
-		txn_limbo_process(&txn_limbo, synchro_entry->req);
+		txn_limbo_process_locked(&txn_limbo, synchro_entry->req);
 		trigger_run(&replicaset.applier.on_wal_write, NULL);
 	}
 	fiber_wakeup(synchro_entry->owner);
@@ -870,6 +870,7 @@ apply_synchro_row(uint32_t replica_id, struct xrow_header *row)
 	if (xrow_decode_synchro(row, &req) != 0)
 		goto err;
 
+	txn_limbo_terms_lock(&txn_limbo);
 	struct replica_cb_data rcb_data;
 	struct synchro_entry entry;
 	/*
@@ -907,12 +908,15 @@ apply_synchro_row(uint32_t replica_id, struct xrow_header *row)
 	 * transactions side, including the async ones.
 	 */
 	if (journal_write(&entry.base) != 0)
-		goto err;
+		goto err_unlock;
 	if (entry.base.res < 0) {
 		diag_set_journal_res(entry.base.res);
-		goto err;
+		goto err_unlock;
 	}
+	txn_limbo_terms_unlock(&txn_limbo);
 	return 0;
+err_unlock:
+	txn_limbo_terms_unlock(&txn_limbo);
 err:
 	diag_log();
 	return -1;
diff --git a/src/box/box.cc b/src/box/box.cc
index 64db2680a..e590df425 100644
--- a/src/box/box.cc
+++ b/src/box/box.cc
@@ -1573,8 +1573,7 @@ box_run_elections(void)
 static int
 box_check_promote_term_changed(uint64_t promote_term)
 {
-	const struct txn_limbo_terms *tr = &txn_limbo.terms;
-	if (tr->terms_max != promote_term) {
+	if (txn_limbo_terms_max_raw(&txn_limbo) != promote_term) {
 		diag_set(ClientError, ER_INTERFERING_PROMOTE,
 			 txn_limbo.owner_id);
 		return -1;
@@ -1586,8 +1585,7 @@ box_check_promote_term_changed(uint64_t promote_term)
 static int
 box_trigger_elections(void)
 {
-	const struct txn_limbo_terms *tr = &txn_limbo.terms;
-	uint64_t promote_term = tr->terms_max;
+	uint64_t promote_term = txn_limbo_terms_max_raw(&txn_limbo);
 	raft_new_term(box_raft());
 	if (box_raft_wait_term_persisted() < 0)
 		return -1;
@@ -1598,8 +1596,7 @@ box_trigger_elections(void)
 static int
 box_try_wait_confirm(double timeout)
 {
-	const struct txn_limbo_terms *tr = &txn_limbo.terms;
-	uint64_t promote_term = tr->terms_max;
+	uint64_t promote_term = txn_limbo_terms_max_raw(&txn_limbo);
 	txn_limbo_wait_empty(&txn_limbo, timeout);
 	return box_check_promote_term_changed(promote_term);
 }
@@ -1615,8 +1612,7 @@ box_wait_limbo_acked(void)
 	if (txn_limbo_is_empty(&txn_limbo))
 		return txn_limbo.confirmed_lsn;
 
-	const struct txn_limbo_terms *tr = &txn_limbo.terms;
-	uint64_t promote_term = tr->terms_max;
+	uint64_t promote_term = txn_limbo_terms_max_raw(&txn_limbo);
 	int quorum = replication_synchro_quorum;
 	struct txn_limbo_entry *last_entry;
 	last_entry = txn_limbo_last_synchro_entry(&txn_limbo);
@@ -1753,7 +1749,7 @@ box_promote(void)
 	 * Currently active leader (the instance that is seen as leader by both
 	 * raft and txn_limbo) can't issue another PROMOTE.
 	 */
-	bool is_leader = txn_limbo_replica_term(&txn_limbo, instance_id) ==
+	bool is_leader = txn_limbo_term(&txn_limbo, instance_id) ==
 			 box_raft()->term && txn_limbo.owner_id == instance_id;
 	if (box_election_mode != ELECTION_MODE_OFF)
 		is_leader = is_leader && box_raft()->state == RAFT_STATE_LEADER;
@@ -1802,7 +1798,7 @@ box_demote(void)
 		return 0;
 
 	/* Currently active leader is the only one who can issue a DEMOTE. */
-	bool is_leader = txn_limbo_replica_term(&txn_limbo, instance_id) ==
+	bool is_leader = txn_limbo_term(&txn_limbo, instance_id) ==
 			 box_raft()->term && txn_limbo.owner_id == instance_id;
 	if (box_election_mode != ELECTION_MODE_OFF)
 		is_leader = is_leader && box_raft()->state == RAFT_STATE_LEADER;
diff --git a/src/box/txn_limbo.c b/src/box/txn_limbo.c
index 53c86f34e..437cf199b 100644
--- a/src/box/txn_limbo.c
+++ b/src/box/txn_limbo.c
@@ -40,6 +40,7 @@ struct txn_limbo txn_limbo;
 static void
 txn_limbo_terms_create(struct txn_limbo_terms *tr)
 {
+	latch_create(&tr->latch);
 	vclock_create(&tr->terms_map);
 	tr->terms_max = 0;
 }
@@ -731,12 +732,14 @@ txn_limbo_wait_empty(struct txn_limbo *limbo, double timeout)
 }
 
 void
-txn_limbo_process(struct txn_limbo *limbo, const struct synchro_request *req)
+txn_limbo_process_locked(struct txn_limbo *limbo,
+			 const struct synchro_request *req)
 {
 	struct txn_limbo_terms *tr = &limbo->terms;
 	uint64_t term = req->term;
 	uint32_t origin = req->origin_id;
-	if (txn_limbo_replica_term(limbo, origin) < term) {
+
+	if (txn_limbo_term_locked(limbo, origin) < term) {
 		vclock_follow(&tr->terms_map, origin, term);
 		if (term > tr->terms_max)
 			tr->terms_max = term;
@@ -794,6 +797,16 @@ txn_limbo_process(struct txn_limbo *limbo, const struct synchro_request *req)
 	return;
 }
 
+void
+txn_limbo_process(struct txn_limbo *limbo,
+		  const struct synchro_request *req)
+{
+	txn_limbo_terms_lock(limbo);
+	txn_limbo_process_locked(limbo, req);
+	txn_limbo_terms_unlock(limbo);
+	return 0;
+}
+
 void
 txn_limbo_on_parameters_change(struct txn_limbo *limbo)
 {
diff --git a/src/box/txn_limbo.h b/src/box/txn_limbo.h
index dc980bf7c..45687381f 100644
--- a/src/box/txn_limbo.h
+++ b/src/box/txn_limbo.h
@@ -31,6 +31,7 @@
  */
 #include "small/rlist.h"
 #include "vclock/vclock.h"
+#include "latch.h"
 
 #include <stdint.h>
 
@@ -80,6 +81,10 @@ txn_limbo_entry_is_complete(const struct txn_limbo_entry *e)
  * situation and other errors.
  */
 struct txn_limbo_terms {
+	/**
+	 * To order access to the promote data.
+	 */
+	struct latch latch;
 	/**
 	 * Latest terms received with PROMOTE entries from remote instances.
 	 * Limbo uses them to filter out the transactions coming not from the
@@ -222,15 +227,66 @@ txn_limbo_last_entry(struct txn_limbo *limbo)
 				in_queue);
 }
 
+/** Lock promote data. */
+static inline void
+txn_limbo_terms_lock(struct txn_limbo *limbo)
+{
+	struct txn_limbo_terms *tr = &limbo->terms;
+	latch_lock(&tr->latch);
+}
+
+/** Unlock promote data. */
+static void
+txn_limbo_terms_unlock(struct txn_limbo *limbo)
+{
+	struct txn_limbo_terms *tr = &limbo->terms;
+	latch_unlock(&tr->latch);
+}
+
+/** Test if promote data is locked. */
+static inline bool
+txn_limbo_terms_is_locked(const struct txn_limbo *limbo)
+{
+	const struct txn_limbo_terms *tr = &limbo->terms;
+	return latch_is_locked(&tr->latch);
+}
+
+/** Fetch replica's term with lock taken. */
+static inline uint64_t
+txn_limbo_term_locked(struct txn_limbo *limbo, uint32_t replica_id)
+{
+	const struct txn_limbo_terms *tr = &limbo->terms;
+	panic_on(!txn_limbo_terms_is_locked(limbo),
+		 "limbo: unlocked term read for replica %u",
+		 replica_id);
+	return vclock_get(&tr->terms_map, replica_id);
+}
+
 /**
  * Return the latest term as seen in PROMOTE requests from instance with id
  * @a replica_id.
  */
 static inline uint64_t
-txn_limbo_replica_term(const struct txn_limbo *limbo, uint32_t replica_id)
+txn_limbo_term(struct txn_limbo *limbo, uint32_t replica_id)
+{
+	txn_limbo_terms_lock(limbo);
+	uint64_t v = txn_limbo_term_locked(limbo, replica_id);
+	txn_limbo_terms_unlock(limbo);
+	return v;
+}
+
+/**
+ * Fiber's preempt not safe read of @a terms_max.
+ *
+ * Use it if you're interested in current value
+ * only and ready that the value is getting updated
+ * if after the read yield happens.
+ */
+static inline uint64_t
+txn_limbo_terms_max_raw(struct txn_limbo *limbo)
 {
 	const struct txn_limbo_terms *tr = &limbo->terms;
-	return vclock_get(&tr->terms_map, replica_id);
+	return tr->terms_max;
 }
 
 /**
@@ -238,12 +294,15 @@ txn_limbo_replica_term(const struct txn_limbo *limbo, uint32_t replica_id)
  * data from it. The check is only valid when elections are enabled.
  */
 static inline bool
-txn_limbo_is_replica_outdated(const struct txn_limbo *limbo,
+txn_limbo_is_replica_outdated(struct txn_limbo *limbo,
 			      uint32_t replica_id)
 {
 	const struct txn_limbo_terms *tr = &limbo->terms;
-	return txn_limbo_replica_term(limbo, replica_id) <
-	       tr->terms_max;
+	txn_limbo_terms_lock(limbo);
+	bool res = txn_limbo_term_locked(limbo, replica_id) <
+		tr->terms_max;
+	txn_limbo_terms_unlock(limbo);
+	return res;
 }
 
 /**
@@ -315,6 +374,11 @@ txn_limbo_wait_complete(struct txn_limbo *limbo, struct txn_limbo_entry *entry);
 
 /** Execute a synchronous replication request. */
 void
+txn_limbo_process_locked(struct txn_limbo *limbo,
+			 const struct synchro_request *req);
+
+/** Lock limbo terms and execute a synchronous replication request. */
+void
 txn_limbo_process(struct txn_limbo *limbo, const struct synchro_request *req);
 
 /**
-- 
2.31.1


^ permalink raw reply	[flat|nested] 5+ messages in thread

* [Tarantool-patches] [RFC v6 3/3] limbo: filter incoming synchro requests
  2021-07-16 21:19 [Tarantool-patches] [RFC v6 0/3] limbo: implement packets filtering Cyrill Gorcunov via Tarantool-patches
  2021-07-16 21:19 ` [Tarantool-patches] [RFC v6 1/3] limbo: gather promote tracking into a separate structure Cyrill Gorcunov via Tarantool-patches
  2021-07-16 21:19 ` [Tarantool-patches] [RFC v6 2/3] limbo: order access to the limbo terms terms Cyrill Gorcunov via Tarantool-patches
@ 2021-07-16 21:19 ` Cyrill Gorcunov via Tarantool-patches
  2021-07-19 17:42   ` Cyrill Gorcunov via Tarantool-patches
  2 siblings, 1 reply; 5+ messages in thread
From: Cyrill Gorcunov via Tarantool-patches @ 2021-07-16 21:19 UTC (permalink / raw)
  To: tml; +Cc: Vladislav Shpilevoy

When we receive synchro requests we can't just apply
them blindly because in worse case they may come from
split-brain configuration (where a cluster splitted into
several subclusters and each one has own leader elected,
then subclisters are trying to merge back into original
cluster). We need to do our best to detect such configs
and force these nodes to rejoin from the scratch for
data consistency sake.

Thus when we're processing requests we pass them to the
packet filter first which validates their contents and
refuse to apply if they are not matched.

Closes #6036

Signed-off-by: Cyrill Gorcunov <gorcunov@gmail.com>
---
 src/box/applier.cc     |   6 +-
 src/box/box.cc         |   6 +-
 src/box/memtx_engine.c |   3 +-
 src/box/txn_limbo.c    | 209 ++++++++++++++++++++++++++++++++++++++---
 src/box/txn_limbo.h    |   9 +-
 5 files changed, 216 insertions(+), 17 deletions(-)

diff --git a/src/box/applier.cc b/src/box/applier.cc
index 765ffc670..f07d4c7b0 100644
--- a/src/box/applier.cc
+++ b/src/box/applier.cc
@@ -458,7 +458,8 @@ applier_wait_snapshot(struct applier *applier)
 				struct synchro_request req;
 				if (xrow_decode_synchro(&row, &req) != 0)
 					diag_raise();
-				txn_limbo_process(&txn_limbo, &req);
+				if (txn_limbo_process(&txn_limbo, &req) != 0)
+					diag_raise();
 			} else if (iproto_type_is_raft_request(row.type)) {
 				struct raft_request req;
 				if (xrow_decode_raft(&row, &req, NULL) != 0)
@@ -871,6 +872,9 @@ apply_synchro_row(uint32_t replica_id, struct xrow_header *row)
 		goto err;
 
 	txn_limbo_terms_lock(&txn_limbo);
+	if (txn_limbo_filter_locked(&txn_limbo, &req) != 0)
+		goto err_unlock;
+
 	struct replica_cb_data rcb_data;
 	struct synchro_entry entry;
 	/*
diff --git a/src/box/box.cc b/src/box/box.cc
index e590df425..6bef10219 100644
--- a/src/box/box.cc
+++ b/src/box/box.cc
@@ -1675,7 +1675,8 @@ box_issue_promote(uint32_t prev_leader_id, int64_t promote_lsn)
 		.lsn = promote_lsn,
 		.term = box_raft()->term,
 	};
-	txn_limbo_process(&txn_limbo, &req);
+	if (txn_limbo_process(&txn_limbo, &req) != 0)
+		diag_raise();
 	assert(txn_limbo_is_empty(&txn_limbo));
 }
 
@@ -1694,7 +1695,8 @@ box_issue_demote(uint32_t prev_leader_id, int64_t promote_lsn)
 		.lsn = promote_lsn,
 		.term = box_raft()->term,
 	};
-	txn_limbo_process(&txn_limbo, &req);
+	if (txn_limbo_process(&txn_limbo, &req) != 0)
+		diag_raise();
 	assert(txn_limbo_is_empty(&txn_limbo));
 }
 
diff --git a/src/box/memtx_engine.c b/src/box/memtx_engine.c
index 0b06e5e63..4aed24fe3 100644
--- a/src/box/memtx_engine.c
+++ b/src/box/memtx_engine.c
@@ -238,7 +238,8 @@ memtx_engine_recover_synchro(const struct xrow_header *row)
 	 * because all its rows have a zero replica_id.
 	 */
 	req.origin_id = req.replica_id;
-	txn_limbo_process(&txn_limbo, &req);
+	if (txn_limbo_process(&txn_limbo, &req) != 0)
+		return -1;
 	return 0;
 }
 
diff --git a/src/box/txn_limbo.c b/src/box/txn_limbo.c
index 437cf199b..8a34f3151 100644
--- a/src/box/txn_limbo.c
+++ b/src/box/txn_limbo.c
@@ -731,27 +731,211 @@ txn_limbo_wait_empty(struct txn_limbo *limbo, double timeout)
 	return 0;
 }
 
+enum filter_chain {
+	FILTER_CONFIRM,
+	FILTER_ROLLBACK,
+	FILTER_PROMOTE,
+	FILTER_MAX,
+};
+
+/**
+ * Filter CONFIRM and ROLLBACK packets.
+ */
+static int
+filter_confirm_rollback(struct txn_limbo *limbo,
+			const struct synchro_request *req)
+{
+	/*
+	 * When limbo is empty we have nothing to
+	 * confirm/commit and if this request comes
+	 * in it means the split brain has happened.
+	 */
+	if (!txn_limbo_is_empty(limbo))
+		return 0;
+
+	say_info("RAFT: rejecting %s request from "
+		 "instance id %u for term %llu. "
+		 "Empty limbo detected.",
+		 iproto_type_name(req->type),
+		 req->origin_id,
+		 (long long)req->term);
+
+	diag_set(ClientError, ER_UNSUPPORTED,
+		 "Replication",
+		 "confirm/rollback with empty limbo");
+	return -1;
+}
+
+/**
+ * Filter PROMOTE packets.
+ */
+static int
+filter_promote(struct txn_limbo *limbo, const struct synchro_request *req)
+{
+	struct txn_limbo_terms *tr = &limbo->terms;
+	int64_t promote_lsn = req->lsn;
+
+	/*
+	 * If the term is already seen it means it comes
+	 * from a node which didn't notice new elections,
+	 * thus been living in subdomain and its data is
+	 * no longer consistent.
+	 */
+	if (tr->terms_max > 1 && tr->terms_max > req->term) {
+		say_info("RAFT: rejecting %s request from "
+			 "instance id %u for term %llu. "
+			 "Max term seen is %llu.",
+			 iproto_type_name(req->type),
+			 req->origin_id,
+			 (long long)req->term,
+			 (long long)tr->terms_max);
+
+		diag_set(ClientError, ER_UNSUPPORTED,
+			 "Replication", "obsolete terms");
+		return -1;
+	}
+
+	/*
+	 * Either the limbo is empty or new promote will
+	 * rollback all waiting transactions. Which
+	 * is fine.
+	 */
+	if (limbo->confirmed_lsn == promote_lsn)
+		return 0;
+
+	/*
+	 * Explicit split brain situation. Promote
+	 * comes in with an old LSN which we've already
+	 * processed.
+	 */
+	if (limbo->confirmed_lsn > promote_lsn) {
+		say_info("RAFT: rejecting %s request from "
+			 "instance id %u for term %llu. "
+			 "confirmed_lsn %lld > promote_lsn %lld.",
+			 iproto_type_name(req->type),
+			 req->origin_id, (long long)req->term,
+			 (long long)limbo->confirmed_lsn,
+			 (long long)promote_lsn);
+
+		diag_set(ClientError, ER_UNSUPPORTED,
+			 "Replication",
+			 "backward promote LSN (split brain)");
+		return -1;
+	}
+
+	/*
+	 * The last case requires a few subcases.
+	 */
+	assert(limbo->confirmed_lsn < promote_lsn);
+
+	if (txn_limbo_is_empty(limbo)) {
+		/*
+		 * Transactions are already rolled back
+		 * since the limbo is empty.
+		 */
+		say_info("RAFT: rejecting %s request from "
+			 "instance id %u for term %llu. "
+			 "confirmed_lsn %lld < promote_lsn %lld "
+			 "and empty limbo.",
+			 iproto_type_name(req->type),
+			 req->origin_id, (long long)req->term,
+			 (long long)limbo->confirmed_lsn,
+			 (long long)promote_lsn);
+
+		diag_set(ClientError, ER_UNSUPPORTED,
+			 "Replication",
+			 "forward promote LSN "
+			 "(empty limbo, split brain)");
+		return -1;
+	} else {
+		/*
+		 * Some entries are present in the limbo,
+		 * and if first entry's LSN is greater than
+		 * requested then old data either commited
+		 * or rolled back, so can't continue.
+		 */
+		struct txn_limbo_entry *first;
+
+		first = txn_limbo_first_entry(limbo);
+		if (first->lsn > promote_lsn) {
+			say_info("RAFT: rejecting %s request from "
+				 "instance id %u for term %llu. "
+				 "confirmed_lsn %lld < promote_lsn %lld "
+				 "and limbo first lsn %lld.",
+				 iproto_type_name(req->type),
+				 req->origin_id, (long long)req->term,
+				 (long long)limbo->confirmed_lsn,
+				 (long long)promote_lsn,
+				 (long long)first->lsn);
+
+			diag_set(ClientError, ER_UNSUPPORTED,
+				 "Replication",
+				 "promote LSN confilict "
+				 "(limbo LSN ahead, split brain)");
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+static int (*filter_req[FILTER_MAX])
+(struct txn_limbo *limbo, const struct synchro_request *req) = {
+	[FILTER_CONFIRM]	= filter_confirm_rollback,
+	[FILTER_ROLLBACK]	= filter_confirm_rollback,
+	[FILTER_PROMOTE]	= filter_promote,
+};
+
+int
+txn_limbo_filter_locked(struct txn_limbo *limbo,
+			const struct synchro_request *req)
+{
+	unsigned int mask = 0;
+	unsigned int pos = 0;
+
+	switch (req->type) {
+	case IPROTO_CONFIRM:
+		mask |= (1u << FILTER_CONFIRM);
+		break;
+	case IPROTO_ROLLBACK:
+		mask |= (1u << FILTER_ROLLBACK);
+		break;
+	case IPROTO_PROMOTE:
+		mask |= (1u << FILTER_PROMOTE);
+		break;
+	case IPROTO_DEMOTE:
+		/* Do nothing for a while */
+		break;
+	default:
+		panic("limbo: unexpected request %u",
+		      req->type);
+	}
+
+	while (mask != 0) {
+		if ((mask & 1) != 0) {
+			assert(pos < lengthof(filter_req));
+			if (filter_req[pos](limbo, req) != 0)
+				return -1;
+		}
+		pos++;
+		mask >>= 1;
+	};
+
+	return 0;
+}
+
 void
 txn_limbo_process_locked(struct txn_limbo *limbo,
 			 const struct synchro_request *req)
 {
 	struct txn_limbo_terms *tr = &limbo->terms;
-	uint64_t term = req->term;
 	uint32_t origin = req->origin_id;
+	uint64_t term = req->term;
 
 	if (txn_limbo_term_locked(limbo, origin) < term) {
 		vclock_follow(&tr->terms_map, origin, term);
 		if (term > tr->terms_max)
 			tr->terms_max = term;
-	} else if (iproto_type_is_promote_request(req->type) &&
-		   tr->terms_max > 1) {
-		/* PROMOTE for outdated term. Ignore. */
-		say_info("RAFT: ignoring %s request from instance "
-			 "id %u for term %llu. Greatest term seen "
-			 "before (%llu) is bigger.",
-			 iproto_type_name(req->type), origin, (long long)term,
-			 (long long)tr->terms_max);
-		return;
 	}
 
 	int64_t lsn = req->lsn;
@@ -794,14 +978,15 @@ txn_limbo_process_locked(struct txn_limbo *limbo,
 	default:
 		unreachable();
 	}
-	return;
 }
 
-void
+int
 txn_limbo_process(struct txn_limbo *limbo,
 		  const struct synchro_request *req)
 {
 	txn_limbo_terms_lock(limbo);
+	if (txn_limbo_filter_locked(limbo, req) != 0)
+		return -1;
 	txn_limbo_process_locked(limbo, req);
 	txn_limbo_terms_unlock(limbo);
 	return 0;
diff --git a/src/box/txn_limbo.h b/src/box/txn_limbo.h
index 45687381f..e89cf6e79 100644
--- a/src/box/txn_limbo.h
+++ b/src/box/txn_limbo.h
@@ -372,13 +372,20 @@ txn_limbo_ack(struct txn_limbo *limbo, uint32_t replica_id, int64_t lsn);
 int
 txn_limbo_wait_complete(struct txn_limbo *limbo, struct txn_limbo_entry *entry);
 
+/**
+ * Verify if the request is valid for processing.
+ */
+int
+txn_limbo_filter_locked(struct txn_limbo *limbo,
+			const struct synchro_request *req);
+
 /** Execute a synchronous replication request. */
 void
 txn_limbo_process_locked(struct txn_limbo *limbo,
 			 const struct synchro_request *req);
 
 /** Lock limbo terms and execute a synchronous replication request. */
-void
+int
 txn_limbo_process(struct txn_limbo *limbo, const struct synchro_request *req);
 
 /**
-- 
2.31.1


^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [Tarantool-patches] [RFC v6 3/3] limbo: filter incoming synchro requests
  2021-07-16 21:19 ` [Tarantool-patches] [RFC v6 3/3] limbo: filter incoming synchro requests Cyrill Gorcunov via Tarantool-patches
@ 2021-07-19 17:42   ` Cyrill Gorcunov via Tarantool-patches
  0 siblings, 0 replies; 5+ messages in thread
From: Cyrill Gorcunov via Tarantool-patches @ 2021-07-19 17:42 UTC (permalink / raw)
  To: tml; +Cc: Vladislav Shpilevoy

Guys, here is a slightly updated version which passes current
replication's tests.
---
From: Cyrill Gorcunov <gorcunov@gmail.com>
Date: Sat, 17 Jul 2021 00:14:47 +0300
Subject: [PATCH] limbo: filter incoming synchro requests

When we receive synchro requests we can't just apply
them blindly because in worse case they may come from
split-brain configuration (where a cluster splitted into
several subclusters and each one has own leader elected,
then subclisters are trying to merge back into original
cluster). We need to do our best to detect such configs
and force these nodes to rejoin from the scratch for
data consistency sake.

Thus when we're processing requests we pass them to the
packet filter first which validates their contents and
refuse to apply if they are not matched.

Depending on request type each packet traverse an
appropriate chain(s)

FILTER_IN
 - Common chain for any synchro packet. We verify
   that if replica_id is nil then it shall be
   PROMOTE request with lsn 0 to migrate limbo owner

FILTER_CONFIRM
FILTER_ROLLBACK
 - Both confirm and rollback requests shall not come
   with empty limbo since it measn the synchro queue
   is already processed and the peer didn't notice
   that

FILTER_PROMOTE
 - Promote request should come in with new terms only,
   otherwise it means the peer didn't notice election

 - If limbo's confirmed_lsn is equal to promote LSN then
   it is a valid request to process

 - If limbo's confirmed_lsn is bigger than requested then
   it is valid in one case only -- limbo migration so the
   queue shall be empty

 - If limbo's confirmed_lsn is less than promote LSN then
   - If queue is empty then it means the transactions are
     already rolled back and request is invalid
   - If queue is not empty then its first entry might be
     greater than promote LSN and it means that old data
     either committed or rolled back already and request
     is invalid

FILTER_DEMOTE
 - NOP, reserved for future use

Closes #6036

Signed-off-by: Cyrill Gorcunov <gorcunov@gmail.com>
---
 src/box/applier.cc     |   6 +-
 src/box/box.cc         |   6 +-
 src/box/memtx_engine.c |   3 +-
 src/box/txn_limbo.c    | 289 ++++++++++++++++++++++++++++++++++++++---
 src/box/txn_limbo.h    |   9 +-
 5 files changed, 289 insertions(+), 24 deletions(-)

diff --git a/src/box/applier.cc b/src/box/applier.cc
index b5c3a7b67..0f356018b 100644
--- a/src/box/applier.cc
+++ b/src/box/applier.cc
@@ -458,7 +458,8 @@ applier_wait_snapshot(struct applier *applier)
 				struct synchro_request req;
 				if (xrow_decode_synchro(&row, &req) != 0)
 					diag_raise();
-				txn_limbo_process(&txn_limbo, &req);
+				if (txn_limbo_process(&txn_limbo, &req) != 0)
+					diag_raise();
 			} else if (iproto_type_is_raft_request(row.type)) {
 				struct raft_request req;
 				if (xrow_decode_raft(&row, &req, NULL) != 0)
@@ -873,6 +874,9 @@ apply_synchro_row(uint32_t replica_id, struct xrow_header *row)
 		goto err;
 
 	txn_limbo_terms_lock(&txn_limbo);
+	if (txn_limbo_filter_locked(&txn_limbo, &req) != 0)
+		goto err_unlock;
+
 	struct replica_cb_data rcb_data;
 	struct synchro_entry entry;
 	/*
diff --git a/src/box/box.cc b/src/box/box.cc
index e590df425..6bef10219 100644
--- a/src/box/box.cc
+++ b/src/box/box.cc
@@ -1675,7 +1675,8 @@ box_issue_promote(uint32_t prev_leader_id, int64_t promote_lsn)
 		.lsn = promote_lsn,
 		.term = box_raft()->term,
 	};
-	txn_limbo_process(&txn_limbo, &req);
+	if (txn_limbo_process(&txn_limbo, &req) != 0)
+		diag_raise();
 	assert(txn_limbo_is_empty(&txn_limbo));
 }
 
@@ -1694,7 +1695,8 @@ box_issue_demote(uint32_t prev_leader_id, int64_t promote_lsn)
 		.lsn = promote_lsn,
 		.term = box_raft()->term,
 	};
-	txn_limbo_process(&txn_limbo, &req);
+	if (txn_limbo_process(&txn_limbo, &req) != 0)
+		diag_raise();
 	assert(txn_limbo_is_empty(&txn_limbo));
 }
 
diff --git a/src/box/memtx_engine.c b/src/box/memtx_engine.c
index 0b06e5e63..4aed24fe3 100644
--- a/src/box/memtx_engine.c
+++ b/src/box/memtx_engine.c
@@ -238,7 +238,8 @@ memtx_engine_recover_synchro(const struct xrow_header *row)
 	 * because all its rows have a zero replica_id.
 	 */
 	req.origin_id = req.replica_id;
-	txn_limbo_process(&txn_limbo, &req);
+	if (txn_limbo_process(&txn_limbo, &req) != 0)
+		return -1;
 	return 0;
 }
 
diff --git a/src/box/txn_limbo.c b/src/box/txn_limbo.c
index 437cf199b..b043124a5 100644
--- a/src/box/txn_limbo.c
+++ b/src/box/txn_limbo.c
@@ -731,37 +731,287 @@ txn_limbo_wait_empty(struct txn_limbo *limbo, double timeout)
 	return 0;
 }
 
+enum filter_chain {
+	FILTER_IN,
+	FILTER_CONFIRM,
+	FILTER_ROLLBACK,
+	FILTER_PROMOTE,
+	FILTER_DEMOTE,
+	FILTER_MAX,
+};
+
+/**
+ * Common chain for any incoming packet.
+ */
+static int
+filter_in(struct txn_limbo *limbo, const struct synchro_request *req)
+{
+	(void)limbo;
+
+	if (req->replica_id == REPLICA_ID_NIL) {
+		/*
+		 * The limbo was empty on the instance issuing
+		 * the request. This means this instance must
+		 * empty its limbo as well.
+		 */
+		if (req->lsn != 0 ||
+		    !iproto_type_is_promote_request(req->type)) {
+			say_info("RAFT: rejecting %s request from "
+				 "instance id %u for term %llu. "
+				 "req->replica_id = 0 but lsn %lld.",
+				 iproto_type_name(req->type),
+				 req->origin_id, (long long)req->term,
+				 (long long)req->lsn);
+
+			diag_set(ClientError, ER_UNSUPPORTED,
+				 "Replication",
+				 "empty replica_id with nonzero LSN");
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * Filter CONFIRM and ROLLBACK packets.
+ */
+static int
+filter_confirm_rollback(struct txn_limbo *limbo,
+			const struct synchro_request *req)
+{
+	/*
+	 * When limbo is empty we have nothing to
+	 * confirm/commit and if this request comes
+	 * in it means the split brain has happened.
+	 */
+	if (!txn_limbo_is_empty(limbo))
+		return 0;
+
+	say_info("RAFT: rejecting %s request from "
+		 "instance id %u for term %llu. "
+		 "Empty limbo detected.",
+		 iproto_type_name(req->type),
+		 req->origin_id,
+		 (long long)req->term);
+
+	diag_set(ClientError, ER_UNSUPPORTED,
+		 "Replication",
+		 "confirm/rollback with empty limbo");
+	return -1;
+}
+
+/**
+ * Filter PROMOTE packets.
+ */
+static int
+filter_promote(struct txn_limbo *limbo, const struct synchro_request *req)
+{
+	struct txn_limbo_terms *tr = &limbo->terms;
+	int64_t promote_lsn = req->lsn;
+
+	/*
+	 * If the term is already seen it means it comes
+	 * from a node which didn't notice new elections,
+	 * thus been living in subdomain and its data is
+	 * no longer consistent.
+	 */
+	if (tr->terms_max > 1 && tr->terms_max > req->term) {
+		say_info("RAFT: rejecting %s request from "
+			 "instance id %u for term %llu. "
+			 "Max term seen is %llu.",
+			 iproto_type_name(req->type),
+			 req->origin_id,
+			 (long long)req->term,
+			 (long long)tr->terms_max);
+
+		diag_set(ClientError, ER_UNSUPPORTED,
+			 "Replication", "obsolete terms");
+		return -1;
+	}
+
+	/*
+	 * Either the limbo is empty or new promote will
+	 * rollback all waiting transactions. Which
+	 * is fine.
+	 */
+	if (limbo->confirmed_lsn == 0 ||
+	    limbo->confirmed_lsn == promote_lsn)
+		return 0;
+
+	/*
+	 * Explicit split brain situation. Promote
+	 * comes in with an old LSN which we've already
+	 * processed.
+	 */
+	if (limbo->confirmed_lsn > promote_lsn) {
+		/*
+		 * If limbo is empty we're migrating
+		 * the owner.
+		 */
+		if (txn_limbo_is_empty(limbo))
+			return 0;
+
+		say_info("RAFT: rejecting %s request from "
+			 "instance id %u for term %llu. "
+			 "confirmed_lsn %lld > promote_lsn %lld.",
+			 iproto_type_name(req->type),
+			 req->origin_id, (long long)req->term,
+			 (long long)limbo->confirmed_lsn,
+			 (long long)promote_lsn);
+
+		diag_set(ClientError, ER_UNSUPPORTED,
+			 "Replication",
+			 "backward promote LSN (split brain)");
+		return -1;
+	}
+
+	/*
+	 * The last case requires a few subcases.
+	 */
+	assert(limbo->confirmed_lsn < promote_lsn);
+
+	if (txn_limbo_is_empty(limbo)) {
+		/*
+		 * Transactions are already rolled back
+		 * since the limbo is empty.
+		 */
+		say_info("RAFT: rejecting %s request from "
+			 "instance id %u for term %llu. "
+			 "confirmed_lsn %lld < promote_lsn %lld "
+			 "and empty limbo.",
+			 iproto_type_name(req->type),
+			 req->origin_id, (long long)req->term,
+			 (long long)limbo->confirmed_lsn,
+			 (long long)promote_lsn);
+
+		diag_set(ClientError, ER_UNSUPPORTED,
+			 "Replication",
+			 "forward promote LSN "
+			 "(empty limbo, split brain)");
+		return -1;
+	} else {
+		/*
+		 * Some entries are present in the limbo,
+		 * and if first entry's LSN is greater than
+		 * requested then old data either commited
+		 * or rolled back, so can't continue.
+		 */
+		struct txn_limbo_entry *first;
+
+		first = txn_limbo_first_entry(limbo);
+		if (first->lsn > promote_lsn) {
+			say_info("RAFT: rejecting %s request from "
+				 "instance id %u for term %llu. "
+				 "confirmed_lsn %lld < promote_lsn %lld "
+				 "and limbo first lsn %lld.",
+				 iproto_type_name(req->type),
+				 req->origin_id, (long long)req->term,
+				 (long long)limbo->confirmed_lsn,
+				 (long long)promote_lsn,
+				 (long long)first->lsn);
+
+			diag_set(ClientError, ER_UNSUPPORTED,
+				 "Replication",
+				 "promote LSN confilict "
+				 "(limbo LSN ahead, split brain)");
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * Filter DEMOTE packets.
+ */
+static int
+filter_demote(struct txn_limbo *limbo, const struct synchro_request *req)
+{
+	(void)limbo;
+	(void)req;
+	return 0;
+}
+
+static int (*filter_req[FILTER_MAX])
+(struct txn_limbo *limbo, const struct synchro_request *req) = {
+	[FILTER_IN]		= filter_in,
+	[FILTER_CONFIRM]	= filter_confirm_rollback,
+	[FILTER_ROLLBACK]	= filter_confirm_rollback,
+	[FILTER_PROMOTE]	= filter_promote,
+	[FILTER_DEMOTE]		= filter_demote,
+};
+
+int
+txn_limbo_filter_locked(struct txn_limbo *limbo,
+			const struct synchro_request *req)
+{
+	unsigned int mask = (1u << FILTER_IN);
+	unsigned int pos = 0;
+
+	say_info("limbo: filter %s replica_id %u origin_id %u "
+		 "term %lld lsn %lld, queue owner_id %u len %lld "
+		 "confirmed_lsn %lld",
+		 iproto_type_name(req->type),
+		 req->replica_id, req->origin_id,
+		 (long long)req->term, (long long)req->lsn,
+		 limbo->owner_id, (long long)limbo->len,
+		 (long long)limbo->confirmed_lsn);
+
+	switch (req->type) {
+	case IPROTO_CONFIRM:
+		mask |= (1u << FILTER_CONFIRM);
+		break;
+	case IPROTO_ROLLBACK:
+		mask |= (1u << FILTER_ROLLBACK);
+		break;
+	case IPROTO_PROMOTE:
+		mask |= (1u << FILTER_PROMOTE);
+		break;
+	case IPROTO_DEMOTE:
+		mask |= (1u << FILTER_DEMOTE);
+		break;
+	default:
+		say_info("RAFT: rejecting unexpected %d "
+			 "request from instance id %u "
+			 "for term %llu.",
+			 req->type, req->origin_id,
+			 (long long)req->term);
+		diag_set(ClientError, ER_UNSUPPORTED,
+			 "Replication",
+			 "unexpected request type");
+		return -1;
+	}
+
+	while (mask != 0) {
+		if ((mask & 1) != 0) {
+			assert(pos < lengthof(filter_req));
+			if (filter_req[pos](limbo, req) != 0)
+				return -1;
+		}
+		pos++;
+		mask >>= 1;
+	};
+
+	return 0;
+}
+
 void
 txn_limbo_process_locked(struct txn_limbo *limbo,
 			 const struct synchro_request *req)
 {
 	struct txn_limbo_terms *tr = &limbo->terms;
-	uint64_t term = req->term;
 	uint32_t origin = req->origin_id;
+	uint64_t term = req->term;
 
 	if (txn_limbo_term_locked(limbo, origin) < term) {
 		vclock_follow(&tr->terms_map, origin, term);
 		if (term > tr->terms_max)
 			tr->terms_max = term;
-	} else if (iproto_type_is_promote_request(req->type) &&
-		   tr->terms_max > 1) {
-		/* PROMOTE for outdated term. Ignore. */
-		say_info("RAFT: ignoring %s request from instance "
-			 "id %u for term %llu. Greatest term seen "
-			 "before (%llu) is bigger.",
-			 iproto_type_name(req->type), origin, (long long)term,
-			 (long long)tr->terms_max);
-		return;
 	}
 
 	int64_t lsn = req->lsn;
-	if (req->replica_id == REPLICA_ID_NIL) {
-		/*
-		 * The limbo was empty on the instance issuing the request.
-		 * This means this instance must empty its limbo as well.
-		 */
-		assert(lsn == 0 && iproto_type_is_promote_request(req->type));
-	} else if (req->replica_id != limbo->owner_id) {
+	if (req->replica_id != limbo->owner_id) {
 		/*
 		 * Ignore CONFIRM/ROLLBACK messages for a foreign master.
 		 * These are most likely outdated messages for already confirmed
@@ -794,14 +1044,15 @@ txn_limbo_process_locked(struct txn_limbo *limbo,
 	default:
 		unreachable();
 	}
-	return;
 }
 
-void
+int
 txn_limbo_process(struct txn_limbo *limbo,
 		  const struct synchro_request *req)
 {
 	txn_limbo_terms_lock(limbo);
+	if (txn_limbo_filter_locked(limbo, req) != 0)
+		return -1;
 	txn_limbo_process_locked(limbo, req);
 	txn_limbo_terms_unlock(limbo);
 	return 0;
diff --git a/src/box/txn_limbo.h b/src/box/txn_limbo.h
index 45687381f..e89cf6e79 100644
--- a/src/box/txn_limbo.h
+++ b/src/box/txn_limbo.h
@@ -372,13 +372,20 @@ txn_limbo_ack(struct txn_limbo *limbo, uint32_t replica_id, int64_t lsn);
 int
 txn_limbo_wait_complete(struct txn_limbo *limbo, struct txn_limbo_entry *entry);
 
+/**
+ * Verify if the request is valid for processing.
+ */
+int
+txn_limbo_filter_locked(struct txn_limbo *limbo,
+			const struct synchro_request *req);
+
 /** Execute a synchronous replication request. */
 void
 txn_limbo_process_locked(struct txn_limbo *limbo,
 			 const struct synchro_request *req);
 
 /** Lock limbo terms and execute a synchronous replication request. */
-void
+int
 txn_limbo_process(struct txn_limbo *limbo, const struct synchro_request *req);
 
 /**
-- 
2.31.1


^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2021-07-19 17:42 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-07-16 21:19 [Tarantool-patches] [RFC v6 0/3] limbo: implement packets filtering Cyrill Gorcunov via Tarantool-patches
2021-07-16 21:19 ` [Tarantool-patches] [RFC v6 1/3] limbo: gather promote tracking into a separate structure Cyrill Gorcunov via Tarantool-patches
2021-07-16 21:19 ` [Tarantool-patches] [RFC v6 2/3] limbo: order access to the limbo terms terms Cyrill Gorcunov via Tarantool-patches
2021-07-16 21:19 ` [Tarantool-patches] [RFC v6 3/3] limbo: filter incoming synchro requests Cyrill Gorcunov via Tarantool-patches
2021-07-19 17:42   ` Cyrill Gorcunov via Tarantool-patches

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox