[Tarantool-patches] [PATCH v4 08/11] wal: track relay vclock and collect logs in wal thread

Georgy Kirichenko georgy at tarantool.org
Wed Feb 12 12:39:17 MSK 2020


Wal uses a matrix clock (mclock) in order to track vclocks reported
by relay. This allows wal to build the minimal boundary vclock which
could be used in order to collect wal unused files. Box protects logs
from collecting using wal_set_first_checkpoint call.
In order to preserve logs while joining gc tracks all join-vclocks as
checkpoints with a special mark - is_join_readview set to true.

Also there is no more gc consumer in tx thread, gc consumer info in
box.info output and corresponding lines were commented from test out.

Part of #3795, #980
---
 src/box/box.cc                        |  39 ++--
 src/box/gc.c                          | 216 ++++++---------------
 src/box/gc.h                          |  95 ++-------
 src/box/lua/info.c                    |  29 +--
 src/box/relay.cc                      | 129 +-----------
 src/box/replication.cc                |  34 +---
 src/box/wal.c                         | 269 +++++++++++++++++++++++---
 src/box/wal.h                         |  17 +-
 test/replication/gc_no_space.result   |  30 +--
 test/replication/gc_no_space.test.lua |  12 +-
 10 files changed, 366 insertions(+), 504 deletions(-)

diff --git a/src/box/box.cc b/src/box/box.cc
index c5aaad295..17495a211 100644
--- a/src/box/box.cc
+++ b/src/box/box.cc
@@ -1566,11 +1566,12 @@ box_process_register(struct ev_io *io, struct xrow_header *header)
 			  "wal_mode = 'none'");
 	}
 
-	struct gc_consumer *gc = gc_consumer_register(&replicaset.vclock,
-				"replica %s", tt_uuid_str(&instance_uuid));
-	if (gc == NULL)
-		diag_raise();
-	auto gc_guard = make_scoped_guard([&] { gc_consumer_unregister(gc); });
+	struct vclock register_vclock;
+	vclock_copy(&register_vclock, &replicaset.vclock);
+	gc_add_join_readview(&register_vclock);
+	auto gc_guard = make_scoped_guard([&] {
+		gc_del_join_readview(&register_vclock);
+	});
 
 	say_info("registering replica %s at %s",
 		 tt_uuid_str(&instance_uuid), sio_socketname(io->fd));
@@ -1609,12 +1610,8 @@ box_process_register(struct ev_io *io, struct xrow_header *header)
 	 * registration was complete and assign it to the
 	 * replica.
 	 */
-	gc_consumer_advance(gc, &stop_vclock);
 	replica = replica_by_uuid(&instance_uuid);
-	if (replica->gc != NULL)
-		gc_consumer_unregister(replica->gc);
-	replica->gc = gc;
-	gc_guard.is_active = false;
+	wal_relay_status_update(replica->id, &stop_vclock);
 }
 
 void
@@ -1708,11 +1705,12 @@ box_process_join(struct ev_io *io, struct xrow_header *header)
 	 * Register the replica as a WAL consumer so that
 	 * it can resume FINAL JOIN where INITIAL JOIN ends.
 	 */
-	struct gc_consumer *gc = gc_consumer_register(&replicaset.vclock,
-				"replica %s", tt_uuid_str(&instance_uuid));
-	if (gc == NULL)
-		diag_raise();
-	auto gc_guard = make_scoped_guard([&] { gc_consumer_unregister(gc); });
+	struct vclock join_vclock;
+	vclock_copy(&join_vclock, &replicaset.vclock);
+	gc_add_join_readview(&join_vclock);
+	auto gc_guard = make_scoped_guard([&] {
+		gc_del_join_readview(&join_vclock);
+	});
 
 	say_info("joining replica %s at %s",
 		 tt_uuid_str(&instance_uuid), sio_socketname(io->fd));
@@ -1757,17 +1755,8 @@ box_process_join(struct ev_io *io, struct xrow_header *header)
 	row.sync = header->sync;
 	if (coio_write_xrow(io, &row) < 0)
 		diag_raise();
-
-	/*
-	 * Advance the WAL consumer state to the position where
-	 * FINAL JOIN ended and assign it to the replica.
-	 */
-	gc_consumer_advance(gc, &stop_vclock);
 	replica = replica_by_uuid(&instance_uuid);
-	if (replica->gc != NULL)
-		gc_consumer_unregister(replica->gc);
-	replica->gc = gc;
-	gc_guard.is_active = false;
+	wal_relay_status_update(replica->id, &stop_vclock);
 }
 
 void
diff --git a/src/box/gc.c b/src/box/gc.c
index f5c387f9d..eb0548aa2 100644
--- a/src/box/gc.c
+++ b/src/box/gc.c
@@ -65,35 +65,6 @@ gc_cleanup_fiber_f(va_list);
 static int
 gc_checkpoint_fiber_f(va_list);
 
-/**
- * Comparator used for ordering gc_consumer objects by signature
- * in a binary tree.
- */
-static inline int
-gc_consumer_cmp(const struct gc_consumer *a, const struct gc_consumer *b)
-{
-	if (vclock_sum(&a->vclock) < vclock_sum(&b->vclock))
-		return -1;
-	if (vclock_sum(&a->vclock) > vclock_sum(&b->vclock))
-		return 1;
-	if ((intptr_t)a < (intptr_t)b)
-		return -1;
-	if ((intptr_t)a > (intptr_t)b)
-		return 1;
-	return 0;
-}
-
-rb_gen(MAYBE_UNUSED static inline, gc_tree_, gc_tree_t,
-       struct gc_consumer, node, gc_consumer_cmp);
-
-/** Free a consumer object. */
-static void
-gc_consumer_delete(struct gc_consumer *consumer)
-{
-	TRASH(consumer);
-	free(consumer);
-}
-
 /** Free a checkpoint object. */
 static void
 gc_checkpoint_delete(struct gc_checkpoint *checkpoint)
@@ -110,7 +81,6 @@ gc_init(void)
 
 	vclock_create(&gc.vclock);
 	rlist_create(&gc.checkpoints);
-	gc_tree_new(&gc.consumers);
 	fiber_cond_create(&gc.cleanup_cond);
 	checkpoint_schedule_cfg(&gc.checkpoint_schedule, 0, 0);
 	engine_collect_garbage(&gc.vclock);
@@ -142,15 +112,6 @@ gc_free(void)
 				 next_checkpoint) {
 		gc_checkpoint_delete(checkpoint);
 	}
-	/* Free all registered consumers. */
-	struct gc_consumer *consumer = gc_tree_first(&gc.consumers);
-	while (consumer != NULL) {
-		struct gc_consumer *next = gc_tree_next(&gc.consumers,
-							consumer);
-		gc_tree_remove(&gc.consumers, consumer);
-		gc_consumer_delete(consumer);
-		consumer = next;
-	}
 }
 
 /**
@@ -161,7 +122,6 @@ gc_free(void)
 static void
 gc_run_cleanup(void)
 {
-	bool run_wal_gc = false;
 	bool run_engine_gc = false;
 
 	/*
@@ -170,10 +130,11 @@ gc_run_cleanup(void)
 	 * checkpoints, plus we can't remove checkpoints that
 	 * are still in use.
 	 */
-	struct gc_checkpoint *checkpoint = NULL;
-	while (true) {
-		checkpoint = rlist_first_entry(&gc.checkpoints,
-				struct gc_checkpoint, in_checkpoints);
+	struct gc_checkpoint *checkpoint = NULL, *tmp;
+	rlist_foreach_entry_safe(checkpoint, &gc.checkpoints,
+				 in_checkpoints, tmp) {
+		if (checkpoint->is_join_readview)
+			continue;
 		if (gc.checkpoint_count <= gc.min_checkpoint_count)
 			break;
 		if (!rlist_empty(&checkpoint->refs))
@@ -187,23 +148,7 @@ gc_run_cleanup(void)
 	/* At least one checkpoint must always be available. */
 	assert(checkpoint != NULL);
 
-	/*
-	 * Find the vclock of the oldest WAL row to keep.
-	 * Note, we must keep all WALs created after the
-	 * oldest checkpoint, even if no consumer needs them.
-	 */
-	const struct vclock *vclock = (gc_tree_empty(&gc.consumers) ? NULL :
-				       &gc_tree_first(&gc.consumers)->vclock);
-	if (vclock == NULL ||
-	    vclock_sum(vclock) > vclock_sum(&checkpoint->vclock))
-		vclock = &checkpoint->vclock;
-
-	if (vclock_sum(vclock) > vclock_sum(&gc.vclock)) {
-		vclock_copy(&gc.vclock, vclock);
-		run_wal_gc = true;
-	}
-
-	if (!run_engine_gc && !run_wal_gc)
+	if (!run_engine_gc)
 		return; /* nothing to do */
 
 	/*
@@ -219,10 +164,10 @@ gc_run_cleanup(void)
 	 * we never remove the last checkpoint and the following
 	 * WALs so this may only affect backup checkpoints.
 	 */
-	if (run_engine_gc)
-		engine_collect_garbage(&checkpoint->vclock);
-	if (run_wal_gc)
-		wal_collect_garbage(vclock);
+	engine_collect_garbage(&checkpoint->vclock);
+	checkpoint = rlist_first_entry(&gc.checkpoints,
+					struct gc_checkpoint, in_checkpoints);
+	wal_set_gc_first_vclock(&checkpoint->vclock);
 }
 
 static int
@@ -278,28 +223,10 @@ void
 gc_advance(const struct vclock *vclock)
 {
 	/*
-	 * In case of emergency ENOSPC, the WAL thread may delete
-	 * WAL files needed to restore from backup checkpoints,
-	 * which would be kept by the garbage collector otherwise.
-	 * Bring the garbage collector vclock up to date.
+	 * Bring the garbage collector up to date with the oldest
+	 * wal xlog file.
 	 */
 	vclock_copy(&gc.vclock, vclock);
-
-	struct gc_consumer *consumer = gc_tree_first(&gc.consumers);
-	while (consumer != NULL &&
-	       vclock_sum(&consumer->vclock) < vclock_sum(vclock)) {
-		struct gc_consumer *next = gc_tree_next(&gc.consumers,
-							consumer);
-		assert(!consumer->is_inactive);
-		consumer->is_inactive = true;
-		gc_tree_remove(&gc.consumers, consumer);
-
-		say_crit("deactivated WAL consumer %s at %s", consumer->name,
-			 vclock_to_string(&consumer->vclock));
-
-		consumer = next;
-	}
-	gc_schedule_cleanup();
 }
 
 void
@@ -329,6 +256,10 @@ void
 gc_add_checkpoint(const struct vclock *vclock)
 {
 	struct gc_checkpoint *last_checkpoint = gc_last_checkpoint();
+	while (last_checkpoint != NULL && last_checkpoint->is_join_readview) {
+		last_checkpoint = rlist_prev_entry(last_checkpoint,
+						   in_checkpoints);
+	}
 	if (last_checkpoint != NULL &&
 	    vclock_sum(&last_checkpoint->vclock) == vclock_sum(vclock)) {
 		/*
@@ -351,6 +282,8 @@ gc_add_checkpoint(const struct vclock *vclock)
 	if (checkpoint == NULL)
 		panic("out of memory");
 
+	if (rlist_empty(&gc.checkpoints))
+		wal_set_gc_first_vclock(vclock);
 	rlist_create(&checkpoint->refs);
 	vclock_copy(&checkpoint->vclock, vclock);
 	rlist_add_tail_entry(&gc.checkpoints, checkpoint, in_checkpoints);
@@ -359,6 +292,47 @@ gc_add_checkpoint(const struct vclock *vclock)
 	gc_schedule_cleanup();
 }
 
+void
+gc_add_join_readview(const struct vclock *vclock)
+{
+	struct gc_checkpoint *checkpoint = calloc(1, sizeof(*checkpoint));
+	/*
+	 * It is not a fatal error if we could not prevent subsequent
+	 * from clearance.
+	 */
+	if (checkpoint == NULL) {
+		say_error("GC: could not add a join readview");
+		return;
+	}
+	if (rlist_empty(&gc.checkpoints))
+		wal_set_gc_first_vclock(vclock);
+	checkpoint->is_join_readview = true;
+	rlist_create(&checkpoint->refs);
+	vclock_copy(&checkpoint->vclock, vclock);
+	rlist_add_tail_entry(&gc.checkpoints, checkpoint, in_checkpoints);
+}
+
+void
+gc_del_join_readview(const struct vclock *vclock)
+{
+	struct gc_checkpoint *checkpoint;
+	rlist_foreach_entry(checkpoint, &gc.checkpoints, in_checkpoints) {
+		if (!checkpoint->is_join_readview ||
+		    vclock_compare(&checkpoint->vclock, vclock) != 0)
+			continue;
+		rlist_del(&checkpoint->in_checkpoints);
+		free(checkpoint);
+		checkpoint = rlist_first_entry(&gc.checkpoints,
+					       struct gc_checkpoint,
+					       in_checkpoints);
+		wal_set_gc_first_vclock(&checkpoint->vclock);
+		return;
+	}
+	/* A join readview was not found. */
+	say_error("GC: could del a join readview");
+}
+
+
 static int
 gc_do_checkpoint(void)
 {
@@ -513,75 +487,3 @@ gc_unref_checkpoint(struct gc_checkpoint_ref *ref)
 	rlist_del_entry(ref, in_refs);
 	gc_schedule_cleanup();
 }
-
-struct gc_consumer *
-gc_consumer_register(const struct vclock *vclock, const char *format, ...)
-{
-	struct gc_consumer *consumer = calloc(1, sizeof(*consumer));
-	if (consumer == NULL) {
-		diag_set(OutOfMemory, sizeof(*consumer),
-			 "malloc", "struct gc_consumer");
-		return NULL;
-	}
-
-	va_list ap;
-	va_start(ap, format);
-	vsnprintf(consumer->name, GC_NAME_MAX, format, ap);
-	va_end(ap);
-
-	vclock_copy(&consumer->vclock, vclock);
-	gc_tree_insert(&gc.consumers, consumer);
-	return consumer;
-}
-
-void
-gc_consumer_unregister(struct gc_consumer *consumer)
-{
-	if (!consumer->is_inactive) {
-		gc_tree_remove(&gc.consumers, consumer);
-		gc_schedule_cleanup();
-	}
-	gc_consumer_delete(consumer);
-}
-
-void
-gc_consumer_advance(struct gc_consumer *consumer, const struct vclock *vclock)
-{
-	if (consumer->is_inactive)
-		return;
-
-	int64_t signature = vclock_sum(vclock);
-	int64_t prev_signature = vclock_sum(&consumer->vclock);
-
-	assert(signature >= prev_signature);
-	if (signature == prev_signature)
-		return; /* nothing to do */
-
-	/*
-	 * Do not update the tree unless the tree invariant
-	 * is violated.
-	 */
-	struct gc_consumer *next = gc_tree_next(&gc.consumers, consumer);
-	bool update_tree = (next != NULL &&
-			    signature >= vclock_sum(&next->vclock));
-
-	if (update_tree)
-		gc_tree_remove(&gc.consumers, consumer);
-
-	vclock_copy(&consumer->vclock, vclock);
-
-	if (update_tree)
-		gc_tree_insert(&gc.consumers, consumer);
-
-	gc_schedule_cleanup();
-}
-
-struct gc_consumer *
-gc_consumer_iterator_next(struct gc_consumer_iterator *it)
-{
-	if (it->curr != NULL)
-		it->curr = gc_tree_next(&gc.consumers, it->curr);
-	else
-		it->curr = gc_tree_first(&gc.consumers);
-	return it->curr;
-}
diff --git a/src/box/gc.h b/src/box/gc.h
index 827a5db8e..49fb05fd4 100644
--- a/src/box/gc.h
+++ b/src/box/gc.h
@@ -45,12 +45,9 @@ extern "C" {
 #endif /* defined(__cplusplus) */
 
 struct fiber;
-struct gc_consumer;
 
 enum { GC_NAME_MAX = 64 };
 
-typedef rb_node(struct gc_consumer) gc_node_t;
-
 /**
  * Garbage collector keeps track of all preserved checkpoints.
  * The following structure represents a checkpoint.
@@ -60,6 +57,8 @@ struct gc_checkpoint {
 	struct rlist in_checkpoints;
 	/** VClock of the checkpoint. */
 	struct vclock vclock;
+	/** True when it is a join readview. */
+	bool is_join_readview;
 	/**
 	 * List of checkpoint references, linked by
 	 * gc_checkpoint_ref::in_refs.
@@ -81,26 +80,6 @@ struct gc_checkpoint_ref {
 	char name[GC_NAME_MAX];
 };
 
-/**
- * The object of this type is used to prevent garbage
- * collection from removing WALs that are still in use.
- */
-struct gc_consumer {
-	/** Link in gc_state::consumers. */
-	gc_node_t node;
-	/** Human-readable name. */
-	char name[GC_NAME_MAX];
-	/** The vclock tracked by this consumer. */
-	struct vclock vclock;
-	/**
-	 * This flag is set if a WAL needed by this consumer was
-	 * deleted by the WAL thread on ENOSPC.
-	 */
-	bool is_inactive;
-};
-
-typedef rb_tree(struct gc_consumer) gc_tree_t;
-
 /** Garbage collection state. */
 struct gc_state {
 	/** VClock of the oldest WAL row available on the instance. */
@@ -121,8 +100,6 @@ struct gc_state {
 	 * to the tail. Linked by gc_checkpoint::in_checkpoints.
 	 */
 	struct rlist checkpoints;
-	/** Registered consumers, linked by gc_consumer::node. */
-	gc_tree_t consumers;
 	/** Fiber responsible for periodic checkpointing. */
 	struct fiber *checkpoint_fiber;
 	/** Schedule of periodic checkpoints. */
@@ -208,7 +185,6 @@ gc_free(void);
 
 /**
  * Advance the garbage collector vclock to the given position.
- * Deactivate WAL consumers that need older data.
  */
 void
 gc_advance(const struct vclock *vclock);
@@ -219,7 +195,7 @@ gc_advance(const struct vclock *vclock);
  *
  * Note, this function doesn't run garbage collector so
  * changes will take effect only after a new checkpoint
- * is created or a consumer is unregistered.
+ * is created.
  */
 void
 gc_set_min_checkpoint_count(int min_checkpoint_count);
@@ -239,6 +215,19 @@ gc_set_checkpoint_interval(double interval);
 void
 gc_add_checkpoint(const struct vclock *vclock);
 
+/**
+ * Register a join readview in the garbage collector state in order
+ * to prevent subsequent logs clearance.
+ */
+void
+gc_add_join_readview(const struct vclock *vclock);
+
+/**
+ * Unregister a join readview from the garbage collector state.
+ */
+void
+gc_del_join_readview(const struct vclock *vclock);
+
 /**
  * Make a *manual* checkpoint.
  * This is entry point for box.snapshot() and SIGUSR1 signal
@@ -283,58 +272,6 @@ gc_ref_checkpoint(struct gc_checkpoint *checkpoint,
 void
 gc_unref_checkpoint(struct gc_checkpoint_ref *ref);
 
-/**
- * Register a consumer.
- *
- * This will stop garbage collection of WAL files newer than
- * @vclock until the consumer is unregistered or advanced.
- * @format... specifies a human-readable name of the consumer,
- * it will be used for listing the consumer in box.info.gc().
- *
- * Returns a pointer to the new consumer object or NULL on
- * memory allocation failure.
- */
-CFORMAT(printf, 2, 3)
-struct gc_consumer *
-gc_consumer_register(const struct vclock *vclock, const char *format, ...);
-
-/**
- * Unregister a consumer and invoke garbage collection
- * if needed.
- */
-void
-gc_consumer_unregister(struct gc_consumer *consumer);
-
-/**
- * Advance the vclock tracked by a consumer and
- * invoke garbage collection if needed.
- */
-void
-gc_consumer_advance(struct gc_consumer *consumer, const struct vclock *vclock);
-
-/**
- * Iterator over registered consumers. The iterator is valid
- * as long as the caller doesn't yield.
- */
-struct gc_consumer_iterator {
-	struct gc_consumer *curr;
-};
-
-/** Init an iterator over consumers. */
-static inline void
-gc_consumer_iterator_init(struct gc_consumer_iterator *it)
-{
-	it->curr = NULL;
-}
-
-/**
- * Iterate to the next registered consumer. Return a pointer
- * to the next consumer object or NULL if there is no more
- * consumers.
- */
-struct gc_consumer *
-gc_consumer_iterator_next(struct gc_consumer_iterator *it);
-
 #if defined(__cplusplus)
 } /* extern "C" */
 #endif /* defined(__cplusplus) */
diff --git a/src/box/lua/info.c b/src/box/lua/info.c
index c004fad27..aba9a4b7c 100644
--- a/src/box/lua/info.c
+++ b/src/box/lua/info.c
@@ -399,6 +399,8 @@ lbox_info_gc_call(struct lua_State *L)
 	count = 0;
 	struct gc_checkpoint *checkpoint;
 	gc_foreach_checkpoint(checkpoint) {
+		if (checkpoint->is_join_readview)
+			continue;
 		lua_createtable(L, 0, 2);
 
 		lua_pushstring(L, "vclock");
@@ -423,33 +425,6 @@ lbox_info_gc_call(struct lua_State *L)
 	}
 	lua_settable(L, -3);
 
-	lua_pushstring(L, "consumers");
-	lua_newtable(L);
-
-	struct gc_consumer_iterator consumers;
-	gc_consumer_iterator_init(&consumers);
-
-	count = 0;
-	struct gc_consumer *consumer;
-	while ((consumer = gc_consumer_iterator_next(&consumers)) != NULL) {
-		lua_createtable(L, 0, 3);
-
-		lua_pushstring(L, "name");
-		lua_pushstring(L, consumer->name);
-		lua_settable(L, -3);
-
-		lua_pushstring(L, "vclock");
-		lbox_pushvclock(L, &consumer->vclock);
-		lua_settable(L, -3);
-
-		lua_pushstring(L, "signature");
-		luaL_pushint64(L, vclock_sum(&consumer->vclock));
-		lua_settable(L, -3);
-
-		lua_rawseti(L, -2, ++count);
-	}
-	lua_settable(L, -3);
-
 	return 1;
 }
 
diff --git a/src/box/relay.cc b/src/box/relay.cc
index 5b0d3f023..13c8f4c28 100644
--- a/src/box/relay.cc
+++ b/src/box/relay.cc
@@ -66,23 +66,6 @@ struct relay_status_msg {
 	struct vclock vclock;
 };
 
-/**
- * Cbus message to update replica gc state in tx thread.
- */
-struct relay_gc_msg {
-	/** Parent */
-	struct cmsg msg;
-	/**
-	 * Link in the list of pending gc messages,
-	 * see relay::pending_gc.
-	 */
-	struct stailq_entry in_pending;
-	/** Relay instance */
-	struct relay *relay;
-	/** Vclock to advance to */
-	struct vclock vclock;
-};
-
 /** State of a replication relay. */
 struct relay {
 	/** The thread in which we relay data to the replica. */
@@ -123,11 +106,6 @@ struct relay {
 	struct cpipe relay_pipe;
 	/** Status message */
 	struct relay_status_msg status_msg;
-	/**
-	 * List of garbage collection messages awaiting
-	 * confirmation from the replica.
-	 */
-	struct stailq pending_gc;
 	/** Time when last row was sent to peer. */
 	double last_row_time;
 	/** Relay sync state. */
@@ -185,7 +163,6 @@ relay_new(struct replica *replica)
 	relay->last_row_time = ev_monotonic_now(loop());
 	fiber_cond_create(&relay->reader_cond);
 	diag_create(&relay->diag);
-	stailq_create(&relay->pending_gc);
 	relay->state = RELAY_OFF;
 	return relay;
 }
@@ -241,12 +218,6 @@ relay_exit(struct relay *relay)
 static void
 relay_stop(struct relay *relay)
 {
-	struct relay_gc_msg *gc_msg, *next_gc_msg;
-	stailq_foreach_entry_safe(gc_msg, next_gc_msg,
-				  &relay->pending_gc, in_pending) {
-		free(gc_msg);
-	}
-	stailq_create(&relay->pending_gc);
 	if (relay->r != NULL)
 		recovery_delete(relay->r);
 	relay->r = NULL;
@@ -383,7 +354,9 @@ relay_final_join(int fd, uint64_t sync, struct vclock *start_vclock,
 static void
 relay_status_update(struct cmsg *msg)
 {
+	struct relay_status_msg *status = (struct relay_status_msg *)msg;
 	msg->route = NULL;
+	fiber_cond_signal(&status->relay->reader_cond);
 }
 
 /**
@@ -393,6 +366,8 @@ static void
 tx_status_update(struct cmsg *msg)
 {
 	struct relay_status_msg *status = (struct relay_status_msg *)msg;
+	if (!status->relay->replica->anon)
+		wal_relay_status_update(status->relay->replica->id, &status->vclock);
 	vclock_copy(&status->relay->tx.vclock, &status->vclock);
 	static const struct cmsg_hop route[] = {
 		{relay_status_update, NULL}
@@ -401,74 +376,6 @@ tx_status_update(struct cmsg *msg)
 	cpipe_push(&status->relay->relay_pipe, msg);
 }
 
-/**
- * Update replica gc state in tx thread.
- */
-static void
-tx_gc_advance(struct cmsg *msg)
-{
-	struct relay_gc_msg *m = (struct relay_gc_msg *)msg;
-	gc_consumer_advance(m->relay->replica->gc, &m->vclock);
-	free(m);
-}
-
-static int
-relay_on_close_log_f(struct trigger *trigger, void * /* event */)
-{
-	static const struct cmsg_hop route[] = {
-		{tx_gc_advance, NULL}
-	};
-	struct relay *relay = (struct relay *)trigger->data;
-	struct relay_gc_msg *m = (struct relay_gc_msg *)malloc(sizeof(*m));
-	if (m == NULL) {
-		say_warn("failed to allocate relay gc message");
-		return 0;
-	}
-	cmsg_init(&m->msg, route);
-	m->relay = relay;
-	vclock_copy(&m->vclock, &relay->r->vclock);
-	/*
-	 * Do not invoke garbage collection until the replica
-	 * confirms that it has received data stored in the
-	 * sent xlog.
-	 */
-	stailq_add_tail_entry(&relay->pending_gc, m, in_pending);
-	return 0;
-}
-
-/**
- * Invoke pending garbage collection requests.
- *
- * This function schedules the most recent gc message whose
- * vclock is less than or equal to the given one. Older
- * messages are discarded as their job will be done by the
- * scheduled message anyway.
- */
-static inline void
-relay_schedule_pending_gc(struct relay *relay, const struct vclock *vclock)
-{
-	struct relay_gc_msg *curr, *next, *gc_msg = NULL;
-	stailq_foreach_entry_safe(curr, next, &relay->pending_gc, in_pending) {
-		/*
-		 * We may delete a WAL file only if its vclock is
-		 * less than or equal to the vclock acknowledged by
-		 * the replica. Even if the replica's signature is
-		 * is greater, but the vclocks are incomparable, we
-		 * must not delete the WAL, because there may still
-		 * be rows not applied by the replica in it while
-		 * the greater signatures is due to changes pulled
-		 * from other members of the cluster.
-		 */
-		if (vclock_compare(&curr->vclock, vclock) > 0)
-			break;
-		stailq_shift(&relay->pending_gc);
-		free(gc_msg);
-		gc_msg = curr;
-	}
-	if (gc_msg != NULL)
-		cpipe_push(&relay->tx_pipe, &gc_msg->msg);
-}
-
 static void
 relay_set_error(struct relay *relay, struct error *e)
 {
@@ -569,17 +476,6 @@ relay_subscribe_f(va_list ap)
 	cbus_pair("tx", relay->endpoint.name, &relay->tx_pipe,
 		  &relay->relay_pipe, NULL, NULL, cbus_process);
 
-	/*
-	 * Setup garbage collection trigger.
-	 * Not needed for anonymous replicas, since they
-	 * aren't registered with gc at all.
-	 */
-	struct trigger on_close_log = {
-		RLIST_LINK_INITIALIZER, relay_on_close_log_f, relay, NULL
-	};
-	if (!relay->replica->anon)
-		trigger_add(&r->on_close_log, &on_close_log);
-
 	/* Setup WAL watcher for sending new rows to the replica. */
 	wal_set_watcher(&relay->wal_watcher, relay->endpoint.name,
 			relay_process_wal_event, cbus_process);
@@ -643,8 +539,6 @@ relay_subscribe_f(va_list ap)
 		vclock_copy(&relay->status_msg.vclock, send_vclock);
 		relay->status_msg.relay = relay;
 		cpipe_push(&relay->tx_pipe, &relay->status_msg.msg);
-		/* Collect xlog files received by the replica. */
-		relay_schedule_pending_gc(relay, send_vclock);
 	}
 
 	/*
@@ -657,8 +551,6 @@ relay_subscribe_f(va_list ap)
 	say_crit("exiting the relay loop");
 
 	/* Clear garbage collector trigger and WAL watcher. */
-	if (!relay->replica->anon)
-		trigger_clear(&on_close_log);
 	wal_clear_watcher(&relay->wal_watcher, cbus_process);
 
 	/* Join ack reader fiber. */
@@ -682,17 +574,8 @@ relay_subscribe(struct replica *replica, int fd, uint64_t sync,
 	assert(replica->anon || replica->id != REPLICA_ID_NIL);
 	struct relay *relay = replica->relay;
 	assert(relay->state != RELAY_FOLLOW);
-	/*
-	 * Register the replica with the garbage collector
-	 * unless it has already been registered by initial
-	 * join.
-	 */
-	if (replica->gc == NULL && !replica->anon) {
-		replica->gc = gc_consumer_register(replica_clock, "replica %s",
-						   tt_uuid_str(&replica->uuid));
-		if (replica->gc == NULL)
-			diag_raise();
-	}
+	if (!replica->anon)
+		wal_relay_status_update(replica->id, replica_clock);
 
 	relay_start(relay, fd, sync, relay_send_row);
 	auto relay_guard = make_scoped_guard([=] {
diff --git a/src/box/replication.cc b/src/box/replication.cc
index e7bfa22ab..869177656 100644
--- a/src/box/replication.cc
+++ b/src/box/replication.cc
@@ -37,6 +37,7 @@
 #include <small/mempool.h>
 
 #include "box.h"
+#include "wal.h"
 #include "gc.h"
 #include "error.h"
 #include "relay.h"
@@ -191,8 +192,6 @@ replica_delete(struct replica *replica)
 	assert(replica_is_orphan(replica));
 	if (replica->relay != NULL)
 		relay_delete(replica->relay);
-	if (replica->gc != NULL)
-		gc_consumer_unregister(replica->gc);
 	TRASH(replica);
 	free(replica);
 }
@@ -235,15 +234,6 @@ replica_set_id(struct replica *replica, uint32_t replica_id)
 		/* Assign local replica id */
 		assert(instance_id == REPLICA_ID_NIL);
 		instance_id = replica_id;
-	} else if (replica->anon) {
-		/*
-		 * Set replica gc on its transition from
-		 * anonymous to a normal one.
-		 */
-		assert(replica->gc == NULL);
-		replica->gc = gc_consumer_register(&replicaset.vclock,
-						   "replica %s",
-						   tt_uuid_str(&replica->uuid));
 	}
 	replicaset.replica_by_id[replica_id] = replica;
 
@@ -271,22 +261,13 @@ replica_clear_id(struct replica *replica)
 		assert(replicaset.is_joining);
 		instance_id = REPLICA_ID_NIL;
 	}
+	uint32_t replica_id = replica->id;
 	replica->id = REPLICA_ID_NIL;
 	say_info("removed replica %s", tt_uuid_str(&replica->uuid));
 
-	/*
-	 * The replica will never resubscribe so we don't need to keep
-	 * WALs for it anymore. Unregister it with the garbage collector
-	 * if the relay thread is stopped. In case the relay thread is
-	 * still running, it may need to access replica->gc so leave the
-	 * job to replica_on_relay_stop, which will be called as soon as
-	 * the relay thread exits.
-	 */
-	if (replica->gc != NULL &&
-	    relay_get_state(replica->relay) != RELAY_FOLLOW) {
-		gc_consumer_unregister(replica->gc);
-		replica->gc = NULL;
-	}
+	if (replica_id != REPLICA_ID_NIL)
+		wal_relay_delete(replica_id);
+
 	if (replica_is_orphan(replica)) {
 		replica_hash_remove(&replicaset.hash, replica);
 		replica_delete(replica);
@@ -896,10 +877,7 @@ replica_on_relay_stop(struct replica *replica)
 	 * collector then. See also replica_clear_id.
 	 */
 	if (replica->id == REPLICA_ID_NIL) {
-		if (!replica->anon) {
-			gc_consumer_unregister(replica->gc);
-			replica->gc = NULL;
-		} else {
+		if (replica->anon) {
 			assert(replica->gc == NULL);
 			/*
 			 * We do not replicate from anonymous
diff --git a/src/box/wal.c b/src/box/wal.c
index ce15cb459..ad3c79a8a 100644
--- a/src/box/wal.c
+++ b/src/box/wal.c
@@ -43,6 +43,7 @@
 #include "cbus.h"
 #include "coio_task.h"
 #include "replication.h"
+#include "mclock.h"
 
 enum {
 	/**
@@ -154,6 +155,25 @@ struct wal_writer
 	 * Used for replication relays.
 	 */
 	struct rlist watchers;
+	/**
+	 * Matrix clock with all wal consumer vclocks.
+	 */
+	struct mclock mclock;
+	/**
+	 * Fiber condition signaled on matrix clock is updated.
+	 */
+	struct fiber_cond wal_gc_cond;
+	/**
+	 * Minimal known xlog vclock used to decide when
+	 * wal gc should be invoked. It is a wal vclockset
+	 * second cached value.
+	 */
+	const struct vclock *gc_wal_vclock;
+	/**
+	 * Vclock which preserves subsequent logs from
+	 * collecting. Ignored in case of no space error.
+	 */
+	struct vclock gc_first_vclock;
 };
 
 struct wal_msg {
@@ -335,6 +355,44 @@ tx_notify_checkpoint(struct cmsg *msg)
 	free(msg);
 }
 
+/*
+ * Shortcut function which returns the second vclock from a wal
+ * directory. If the gc vclock is greater or equal than second one
+ * in a wal directory then there is at least one file to clean.
+ */
+static inline const struct vclock *
+second_vclock(struct wal_writer *writer)
+{
+	struct vclock *first_vclock = vclockset_first(&writer->wal_dir.index);
+	struct vclock *second_vclock = NULL;
+	if (first_vclock != NULL)
+		second_vclock = vclockset_next(&writer->wal_dir.index,
+					       first_vclock);
+	if (first_vclock != NULL && second_vclock == NULL &&
+	    first_vclock->signature != writer->vclock.signature) {
+		/* New xlog could be not created yet. */
+		second_vclock = &writer->vclock;
+	}
+	return second_vclock;
+}
+
+/*
+ * Shortcut function which compares three vclocks and
+ * return true if the first one is not greater or equal than the
+ * second one whereas the third one is. Used in order to decide
+ * when a wal gc should be signaled.
+ */
+static inline bool
+vclock_order_changed(const struct vclock *old, const struct vclock *target,
+		     const struct vclock *new)
+{
+	int rc = vclock_compare(old, target);
+	if (rc > 0 && rc != VCLOCK_ORDER_UNDEFINED)
+		return false;
+	rc = vclock_compare(new, target);
+	return rc >= 0 && rc != VCLOCK_ORDER_UNDEFINED;
+}
+
 /**
  * Initialize WAL writer context. Even though it's a singleton,
  * encapsulate the details just in case we may use
@@ -375,6 +433,12 @@ wal_writer_create(struct wal_writer *writer, enum wal_mode wal_mode,
 
 	mempool_create(&writer->msg_pool, &cord()->slabc,
 		       sizeof(struct wal_msg));
+
+	mclock_create(&writer->mclock);
+
+	fiber_cond_create(&writer->wal_gc_cond);
+	writer->gc_wal_vclock = NULL;
+	vclock_create(&writer->gc_first_vclock);
 }
 
 /** Destroy a WAL writer structure. */
@@ -494,6 +558,7 @@ wal_enable(void)
 	 */
 	if (xdir_scan(&writer->wal_dir))
 		return -1;
+	writer->gc_wal_vclock = second_vclock(writer);
 
 	/* Open the most recent WAL file. */
 	if (wal_open(writer) != 0)
@@ -592,6 +657,8 @@ wal_begin_checkpoint_f(struct cbus_call_msg *data)
 		/*
 		 * The next WAL will be created on the first write.
 		 */
+		if (writer->gc_wal_vclock == NULL)
+			writer->gc_wal_vclock = second_vclock(writer);
 	}
 	vclock_copy(&msg->vclock, &writer->vclock);
 	msg->wal_size = writer->checkpoint_wal_size;
@@ -695,20 +762,35 @@ wal_set_checkpoint_threshold(int64_t threshold)
 	fiber_set_cancellable(cancellable);
 }
 
-struct wal_gc_msg
+static void
+wal_gc_advance(struct wal_writer *writer)
 {
-	struct cbus_call_msg base;
-	const struct vclock *vclock;
-};
+	static struct cmsg_hop route[] = {
+		{ tx_notify_gc, NULL },
+	};
+	struct tx_notify_gc_msg *msg = malloc(sizeof(*msg));
+	if (msg != NULL) {
+		if (xdir_first_vclock(&writer->wal_dir, &msg->vclock) < 0)
+			vclock_copy(&msg->vclock, &writer->vclock);
+		cmsg_init(&msg->base, route);
+		cpipe_push(&writer->tx_prio_pipe, &msg->base);
+	} else
+		say_warn("failed to allocate gc notification message");
+}
 
 static int
-wal_collect_garbage_f(struct cbus_call_msg *data)
+wal_collect_garbage(struct wal_writer *writer)
 {
-	struct wal_writer *writer = &wal_writer_singleton;
-	const struct vclock *vclock = ((struct wal_gc_msg *)data)->vclock;
+	struct vclock *collect_vclock = &writer->gc_first_vclock;
+	struct vclock relay_min_vclock;
+	if (mclock_get(&writer->mclock, -1, &relay_min_vclock) == 0) {
+		int rc = vclock_compare(collect_vclock, &relay_min_vclock);
+		if (rc > 0 || rc == VCLOCK_ORDER_UNDEFINED)
+			collect_vclock = &relay_min_vclock;
+	}
 
 	if (!xlog_is_open(&writer->current_wal) &&
-	    vclock_sum(vclock) >= vclock_sum(&writer->vclock)) {
+	    vclock_sum(collect_vclock) >= vclock_sum(&writer->vclock)) {
 		/*
 		 * The last available WAL file has been sealed and
 		 * all registered consumers have done reading it.
@@ -720,27 +802,54 @@ wal_collect_garbage_f(struct cbus_call_msg *data)
 		 * required by registered consumers and delete all
 		 * older WAL files.
 		 */
-		vclock = vclockset_psearch(&writer->wal_dir.index, vclock);
+		collect_vclock = vclockset_match(&writer->wal_dir.index,
+						 collect_vclock);
+	}
+	if (collect_vclock != NULL) {
+		xdir_collect_garbage(&writer->wal_dir,
+				     vclock_sum(collect_vclock), XDIR_GC_ASYNC);
+		writer->gc_wal_vclock = second_vclock(writer);
+		wal_gc_advance(writer);
 	}
-	if (vclock != NULL)
-		xdir_collect_garbage(&writer->wal_dir, vclock_sum(vclock),
-				     XDIR_GC_ASYNC);
 
 	return 0;
 }
 
+struct wal_set_gc_first_vclock_msg {
+	struct cbus_call_msg base;
+	const struct vclock *vclock;
+};
+
+int
+wal_set_gc_first_vclock_f(struct cbus_call_msg *base)
+{
+	struct wal_writer *writer = &wal_writer_singleton;
+	struct wal_set_gc_first_vclock_msg *msg =
+		container_of(base, struct wal_set_gc_first_vclock_msg, base);
+	if (writer->gc_wal_vclock != NULL &&
+	    vclock_order_changed(&writer->gc_first_vclock,
+				 writer->gc_wal_vclock, msg->vclock))
+		fiber_cond_signal(&writer->wal_gc_cond);
+	vclock_copy(&writer->gc_first_vclock, msg->vclock);
+	return 0;
+}
+
 void
-wal_collect_garbage(const struct vclock *vclock)
+wal_set_gc_first_vclock(const struct vclock *vclock)
 {
 	struct wal_writer *writer = &wal_writer_singleton;
-	if (writer->wal_mode == WAL_NONE)
+	if (writer->wal_mode == WAL_NONE) {
+		vclock_copy(&writer->gc_first_vclock, vclock);
 		return;
-	struct wal_gc_msg msg;
+	}
+	struct wal_set_gc_first_vclock_msg msg;
 	msg.vclock = vclock;
 	bool cancellable = fiber_set_cancellable(false);
-	cbus_call(&writer->wal_pipe, &writer->tx_prio_pipe, &msg.base,
-		  wal_collect_garbage_f, NULL, TIMEOUT_INFINITY);
+	cbus_call(&writer->wal_pipe, &writer->tx_prio_pipe,
+		  &msg.base, wal_set_gc_first_vclock_f, NULL,
+		  TIMEOUT_INFINITY);
 	fiber_set_cancellable(cancellable);
+
 }
 
 static void
@@ -790,7 +899,8 @@ wal_opt_rotate(struct wal_writer *writer)
 	 * collection, see wal_collect_garbage().
 	 */
 	xdir_add_vclock(&writer->wal_dir, &writer->vclock);
-
+	if (writer->gc_wal_vclock == NULL)
+		writer->gc_wal_vclock = second_vclock(writer);
 	wal_notify_watchers(writer, WAL_EVENT_ROTATE);
 	return 0;
 }
@@ -845,6 +955,10 @@ retry:
 	}
 
 	xdir_collect_garbage(&writer->wal_dir, gc_lsn, XDIR_GC_REMOVE_ONE);
+	writer->gc_wal_vclock = second_vclock(writer);
+	if (vclock_compare(&writer->gc_first_vclock,
+			   writer->gc_wal_vclock) < 0)
+		vclock_copy(&writer->gc_first_vclock, writer->gc_wal_vclock);
 	notify_gc = true;
 	goto retry;
 error:
@@ -861,20 +975,8 @@ out:
 	 * event and a failure to send this message isn't really
 	 * critical.
 	 */
-	if (notify_gc) {
-		static struct cmsg_hop route[] = {
-			{ tx_notify_gc, NULL },
-		};
-		struct tx_notify_gc_msg *msg = malloc(sizeof(*msg));
-		if (msg != NULL) {
-			if (xdir_first_vclock(&writer->wal_dir,
-					      &msg->vclock) < 0)
-				vclock_copy(&msg->vclock, &writer->vclock);
-			cmsg_init(&msg->base, route);
-			cpipe_push(&writer->tx_prio_pipe, &msg->base);
-		} else
-			say_warn("failed to allocate gc notification message");
-	}
+	if (notify_gc)
+		wal_gc_advance(writer);
 	return rc;
 }
 
@@ -1126,6 +1228,26 @@ wal_write_to_disk(struct cmsg *msg)
 	wal_notify_watchers(writer, WAL_EVENT_WRITE);
 }
 
+
+/*
+ * WAL garbage collection fiber.
+ * The fiber waits until writer mclock is updated
+ * and then compares the mclock lower bound with
+ * the oldest wal file.
+ */
+static int
+wal_gc_f(va_list ap)
+{
+	struct wal_writer *writer = va_arg(ap, struct wal_writer *);
+
+	while (!fiber_is_cancelled()) {
+		fiber_cond_wait(&writer->wal_gc_cond);
+		wal_collect_garbage(writer);
+	}
+
+	return 0;
+}
+
 /** WAL writer main loop.  */
 static int
 wal_writer_f(va_list ap)
@@ -1145,8 +1267,15 @@ wal_writer_f(va_list ap)
 	 */
 	cpipe_create(&writer->tx_prio_pipe, "tx_prio");
 
+	struct fiber *wal_gc_fiber = fiber_new("wal_gc", wal_gc_f);
+	fiber_set_joinable(wal_gc_fiber, true);
+	fiber_start(wal_gc_fiber, writer);
+
 	cbus_loop(&endpoint);
 
+	fiber_cancel(wal_gc_fiber);
+	fiber_join(wal_gc_fiber);
+
 	/*
 	 * Create a new empty WAL on shutdown so that we don't
 	 * have to rescan the last WAL to find the instance vclock.
@@ -1436,6 +1565,82 @@ wal_notify_watchers(struct wal_writer *writer, unsigned events)
 		wal_watcher_notify(watcher, events);
 }
 
+struct wal_relay_status_update_msg {
+	struct cbus_call_msg base;
+	uint32_t replica_id;
+	struct vclock vclock;
+};
+
+static int
+wal_relay_status_update_f(struct cbus_call_msg *base)
+{
+	struct wal_writer *writer = &wal_writer_singleton;
+	struct wal_relay_status_update_msg *msg =
+		container_of(base, struct wal_relay_status_update_msg, base);
+	struct vclock old_vclock;
+	mclock_extract_row(&writer->mclock, msg->replica_id, &old_vclock);
+	if (writer->gc_wal_vclock != NULL &&
+	    vclock_order_changed(&old_vclock, writer->gc_wal_vclock,
+				 &msg->vclock))
+		fiber_cond_signal(&writer->wal_gc_cond);
+	mclock_update(&writer->mclock, msg->replica_id, &msg->vclock);
+	return 0;
+}
+
+void
+wal_relay_status_update(uint32_t replica_id, const struct vclock *vclock)
+{
+	struct wal_writer *writer = &wal_writer_singleton;
+	struct wal_relay_status_update_msg msg;
+	/*
+	 * We do not take anonymous replica in account. There is
+	 * no way to distinguish them but anonynous replica could
+	 * be rebootstrapped at any time.
+	 */
+	if (replica_id == 0)
+		return;
+	msg.replica_id = replica_id;
+	vclock_copy(&msg.vclock, vclock);
+	bool cancellable = fiber_set_cancellable(false);
+	cbus_call(&writer->wal_pipe, &writer->tx_prio_pipe,
+		  &msg.base, wal_relay_status_update_f, NULL,
+		  TIMEOUT_INFINITY);
+	fiber_set_cancellable(cancellable);
+}
+
+struct wal_relay_delete_msg {
+	struct cmsg base;
+	uint32_t replica_id;
+};
+
+void
+wal_relay_delete_f(struct cmsg *base)
+{
+	struct wal_writer *writer = &wal_writer_singleton;
+	struct wal_relay_delete_msg *msg =
+		container_of(base, struct wal_relay_delete_msg, base);
+	struct vclock vclock;
+	vclock_create(&vclock);
+	mclock_update(&writer->mclock, msg->replica_id, &vclock);
+	fiber_cond_signal(&writer->wal_gc_cond);
+	free(msg);
+}
+
+void
+wal_relay_delete(uint32_t replica_id)
+{
+	struct wal_writer *writer = &wal_writer_singleton;
+	struct wal_relay_delete_msg *msg =
+		(struct wal_relay_delete_msg *)malloc(sizeof(*msg));
+	if (msg == NULL) {
+		say_error("Could not allocate relay delete message");
+		return;
+	}
+	static struct cmsg_hop route[] = {{wal_relay_delete_f, NULL}};
+	cmsg_init(&msg->base, route);
+	msg->replica_id = replica_id;
+	cpipe_push(&writer->wal_pipe, &msg->base);
+}
 
 /**
  * After fork, the WAL writer thread disappears.
diff --git a/src/box/wal.h b/src/box/wal.h
index 76b44941a..86887656d 100644
--- a/src/box/wal.h
+++ b/src/box/wal.h
@@ -223,6 +223,12 @@ wal_begin_checkpoint(struct wal_checkpoint *checkpoint);
 void
 wal_commit_checkpoint(struct wal_checkpoint *checkpoint);
 
+/**
+ * Prevent wal from collecting logs after the given vclock.
+ */
+void
+wal_set_gc_first_vclock(const struct vclock *vclock);
+
 /**
  * Set the WAL size threshold exceeding which will trigger
  * checkpointing in TX.
@@ -231,11 +237,16 @@ void
 wal_set_checkpoint_threshold(int64_t threshold);
 
 /**
- * Remove WAL files that are not needed by consumers reading
- * rows at @vclock or newer.
+ * Update a wal consumer vclock position.
+ */
+void
+wal_relay_status_update(uint32_t replica_id, const struct vclock *vclock);
+
+/**
+ * Unregister a wal consumer.
  */
 void
-wal_collect_garbage(const struct vclock *vclock);
+wal_relay_delete(uint32_t replica_id);
 
 void
 wal_init_vy_log();
diff --git a/test/replication/gc_no_space.result b/test/replication/gc_no_space.result
index e860ab00f..f295cb16b 100644
--- a/test/replication/gc_no_space.result
+++ b/test/replication/gc_no_space.result
@@ -162,18 +162,12 @@ check_snap_count(2)
 gc = box.info.gc()
 ---
 ...
-#gc.consumers -- 3
----
-- 3
-...
+--#gc.consumers -- 3
 #gc.checkpoints -- 2
 ---
 - 2
 ...
-gc.signature == gc.consumers[1].signature
----
-- true
-...
+--gc.signature == gc.consumers[1].signature
 --
 -- Inject a ENOSPC error and check that the WAL thread deletes
 -- old WAL files to prevent the user from seeing the error.
@@ -201,18 +195,12 @@ check_snap_count(2)
 gc = box.info.gc()
 ---
 ...
-#gc.consumers -- 1
----
-- 1
-...
+--#gc.consumers -- 1
 #gc.checkpoints -- 2
 ---
 - 2
 ...
-gc.signature == gc.consumers[1].signature
----
-- true
-...
+--gc.signature == gc.consumers[1].signature
 --
 -- Check that the WAL thread never deletes WAL files that are
 -- needed for recovery from the last checkpoint, but may delete
@@ -242,10 +230,7 @@ check_snap_count(2)
 gc = box.info.gc()
 ---
 ...
-#gc.consumers -- 0
----
-- 0
-...
+--#gc.consumers -- 0
 #gc.checkpoints -- 2
 ---
 - 2
@@ -272,7 +257,4 @@ gc = box.info.gc()
 ---
 - 2
 ...
-gc.signature == gc.checkpoints[2].signature
----
-- true
-...
+--gc.signature == gc.checkpoints[2].signature
diff --git a/test/replication/gc_no_space.test.lua b/test/replication/gc_no_space.test.lua
index 98ccd401b..c28bc0710 100644
--- a/test/replication/gc_no_space.test.lua
+++ b/test/replication/gc_no_space.test.lua
@@ -72,9 +72,9 @@ s:auto_increment{}
 check_wal_count(5)
 check_snap_count(2)
 gc = box.info.gc()
-#gc.consumers -- 3
+--#gc.consumers -- 3
 #gc.checkpoints -- 2
-gc.signature == gc.consumers[1].signature
+--gc.signature == gc.consumers[1].signature
 
 --
 -- Inject a ENOSPC error and check that the WAL thread deletes
@@ -87,9 +87,9 @@ errinj.info()['ERRINJ_WAL_FALLOCATE'].state -- 0
 check_wal_count(3)
 check_snap_count(2)
 gc = box.info.gc()
-#gc.consumers -- 1
+--#gc.consumers -- 1
 #gc.checkpoints -- 2
-gc.signature == gc.consumers[1].signature
+--gc.signature == gc.consumers[1].signature
 
 --
 -- Check that the WAL thread never deletes WAL files that are
@@ -104,7 +104,7 @@ errinj.info()['ERRINJ_WAL_FALLOCATE'].state -- 0
 check_wal_count(1)
 check_snap_count(2)
 gc = box.info.gc()
-#gc.consumers -- 0
+--#gc.consumers -- 0
 #gc.checkpoints -- 2
 gc.signature == gc.checkpoints[2].signature
 
@@ -116,4 +116,4 @@ test_run:cleanup_cluster()
 test_run:cmd("restart server default")
 gc = box.info.gc()
 #gc.checkpoints -- 2
-gc.signature == gc.checkpoints[2].signature
+--gc.signature == gc.checkpoints[2].signature
-- 
2.25.0



More information about the Tarantool-patches mailing list