[Tarantool-patches] [PATCH v3] replication: fix join vclock obtainment in relay_initial_join
Ilya Kosarev
i.kosarev at tarantool.org
Tue Oct 29 17:23:56 MSK 2019
join_vclock test could fail on huge load due to vclock advance
comparing to an actual WAL. In order to fix this we updated wal_sync so
that now we can obtain up to date vclock on the flushed state using it.
It is also better to get max index and index count in single request in
join_vclock test. With fixes mentioned above it is not fragile anymore.
Closes #4160
---
https://github.com/tarantool/tarantool/tree/i.kosarev/gh-4160-fix-join-vclock
https://github.com/tarantool/tarantool/issues/4160
src/box/relay.cc | 7 +++----
src/box/vinyl.c | 4 ++--
src/box/wal.c | 23 +++++++++++++++++------
src/box/wal.h | 2 +-
test/replication/join_vclock.result | 5 +----
test/replication/join_vclock.test.lua | 3 +--
test/replication/suite.ini | 1 -
7 files changed, 25 insertions(+), 20 deletions(-)
diff --git a/src/box/relay.cc b/src/box/relay.cc
index e849fcf4f..b9ed27503 100644
--- a/src/box/relay.cc
+++ b/src/box/relay.cc
@@ -307,13 +307,12 @@ relay_initial_join(int fd, uint64_t sync, struct vclock *vclock)
/*
* Sync WAL to make sure that all changes visible from
- * the frozen read view are successfully committed.
+ * the frozen read view are successfully committed and
+ * obtain corresponding vclock.
*/
- if (wal_sync() != 0)
+ if (wal_sync(vclock) != 0)
diag_raise();
- vclock_copy(vclock, &replicaset.vclock);
-
/* Respond to the JOIN request with the current vclock. */
struct xrow_header row;
xrow_encode_vclock_xc(&row, vclock);
diff --git a/src/box/vinyl.c b/src/box/vinyl.c
index a2bbaa529..e160f3764 100644
--- a/src/box/vinyl.c
+++ b/src/box/vinyl.c
@@ -1087,7 +1087,7 @@ vinyl_space_check_format(struct space *space, struct tuple_format *format)
*/
int rc;
if (need_wal_sync) {
- rc = wal_sync();
+ rc = wal_sync(NULL);
if (rc != 0)
goto out;
}
@@ -4168,7 +4168,7 @@ vinyl_space_build_index(struct space *src_space, struct index *new_index,
*/
int rc;
if (need_wal_sync) {
- rc = wal_sync();
+ rc = wal_sync(NULL);
if (rc != 0)
goto out;
}
diff --git a/src/box/wal.c b/src/box/wal.c
index 5e2c13e0e..eabc73fff 100644
--- a/src/box/wal.c
+++ b/src/box/wal.c
@@ -519,21 +519,28 @@ wal_free(void)
wal_writer_destroy(writer);
}
+struct wal_vclock_msg {
+ struct cbus_call_msg base;
+ struct vclock *vclock;
+};
+
static int
-wal_sync_f(struct cbus_call_msg *msg)
+wal_sync_f(struct cbus_call_msg *data)
{
- (void)msg;
+ struct wal_vclock_msg *msg = (struct wal_vclock_msg *) data;
struct wal_writer *writer = &wal_writer_singleton;
if (writer->in_rollback.route != NULL) {
/* We're rolling back a failed write. */
diag_set(ClientError, ER_WAL_IO);
return -1;
}
+ if (msg->vclock != NULL)
+ vclock_copy(msg->vclock, &writer->vclock);
return 0;
}
int
-wal_sync(void)
+wal_sync(struct vclock *vclock)
{
ERROR_INJECT(ERRINJ_WAL_SYNC, {
diag_set(ClientError, ER_INJECTION, "wal sync");
@@ -541,17 +548,21 @@ wal_sync(void)
});
struct wal_writer *writer = &wal_writer_singleton;
- if (writer->wal_mode == WAL_NONE)
+ if (writer->wal_mode == WAL_NONE) {
+ if (vclock != NULL)
+ vclock_copy(vclock, &writer->vclock);
return 0;
+ }
if (!stailq_empty(&writer->rollback)) {
/* We're rolling back a failed write. */
diag_set(ClientError, ER_WAL_IO);
return -1;
}
bool cancellable = fiber_set_cancellable(false);
- struct cbus_call_msg msg;
+ struct wal_vclock_msg msg;
+ msg.vclock = vclock;
int rc = cbus_call(&writer->wal_pipe, &writer->tx_prio_pipe,
- &msg, wal_sync_f, NULL, TIMEOUT_INFINITY);
+ &msg.base, wal_sync_f, NULL, TIMEOUT_INFINITY);
fiber_set_cancellable(cancellable);
return rc;
}
diff --git a/src/box/wal.h b/src/box/wal.h
index b76b0a41f..d82934ffc 100644
--- a/src/box/wal.h
+++ b/src/box/wal.h
@@ -184,7 +184,7 @@ wal_mode();
* to disk. Returns 0 on success, -1 if write failed.
*/
int
-wal_sync(void);
+wal_sync(struct vclock *vclock);
struct wal_checkpoint {
struct cbus_call_msg base;
diff --git a/test/replication/join_vclock.result b/test/replication/join_vclock.result
index a9781073d..d6d9af783 100644
--- a/test/replication/join_vclock.result
+++ b/test/replication/join_vclock.result
@@ -67,10 +67,7 @@ test_run:cmd("switch replica1")
---
- true
...
-cnt = box.space.test.index[0]:count()
----
-...
-box.space.test.index.primary:max()[1] == cnt - 1
+box.space.test.index.primary:max()[1] == box.space.test.index.primary:count() - 1
---
- true
...
diff --git a/test/replication/join_vclock.test.lua b/test/replication/join_vclock.test.lua
index 0b60dffc2..a813ba31f 100644
--- a/test/replication/join_vclock.test.lua
+++ b/test/replication/join_vclock.test.lua
@@ -26,8 +26,7 @@ ch:get()
errinj.set("ERRINJ_RELAY_FINAL_SLEEP", false)
test_run:cmd("switch replica1")
-cnt = box.space.test.index[0]:count()
-box.space.test.index.primary:max()[1] == cnt - 1
+box.space.test.index.primary:max()[1] == box.space.test.index.primary:count() - 1
test_run:cmd("switch default")
replica_set.drop_all(test_run)
diff --git a/test/replication/suite.ini b/test/replication/suite.ini
index 384dac677..ed1de3140 100644
--- a/test/replication/suite.ini
+++ b/test/replication/suite.ini
@@ -12,7 +12,6 @@ long_run = prune.test.lua
is_parallel = True
pretest_clean = True
fragile = errinj.test.lua ; gh-3870
- join_vclock.test.lua ; gh-4160
long_row_timeout.test.lua ; gh-4351
skip_conflict_row.test.lua ; gh-4457
sync.test.lua ; gh-3835 gh-3877
--
2.17.1
More information about the Tarantool-patches
mailing list