[PATCH 5/5] wal: delete old wal files when running out of disk space

Vladimir Davydov vdavydov.dev at gmail.com
Sun Oct 7 23:27:18 MSK 2018


Now if the WAL thread fails to preallocate disk space needed to commit
a transaction, it will delete old WAL files until it succeeds or it
deletes all files that are not needed for local recovery from the oldest
checkpoint. After it deletes a file, it notifies the garbage collector
via the WAL watcher interface. The latter then deactivates consumers
that would need deleted files.

The user doesn't see a ENOSPC error if the WAL thread successfully
allocates disk space after deleting old files. Here's what's printed
to the log when this happens:

  wal/101/main C> ran out of disk space, try to delete old WAL files
  wal/101/main I> removed /home/vlad/src/tarantool/test/var/001_replication/master/00000000000000000005.xlog
  wal/101/main I> removed /home/vlad/src/tarantool/test/var/001_replication/master/00000000000000000006.xlog
  wal/101/main I> removed /home/vlad/src/tarantool/test/var/001_replication/master/00000000000000000007.xlog
  main/105/main C> deactivated WAL consumer replica 82d0fa3f-6881-4bc5-a2c0-a0f5dcf80120 at {1: 5}
  main/105/main C> deactivated WAL consumer replica 98dce0a8-1213-4824-b31e-c7e3c4eaf437 at {1: 7}

Closes #3397
---
 src/box/box.cc                        |   9 +-
 src/box/gc.c                          |  67 +++++++++-
 src/box/gc.h                          |  31 +++++
 src/box/wal.c                         |  74 ++++++++---
 src/box/wal.h                         |  15 ++-
 src/box/xlog.c                        |   8 ++
 src/errinj.h                          |   1 +
 test/box/errinj.result                |   2 +
 test/replication/gc_no_space.result   | 234 ++++++++++++++++++++++++++++++++++
 test/replication/gc_no_space.test.lua | 103 +++++++++++++++
 test/replication/suite.ini            |   2 +-
 11 files changed, 514 insertions(+), 32 deletions(-)
 create mode 100644 test/replication/gc_no_space.result
 create mode 100644 test/replication/gc_no_space.test.lua

diff --git a/src/box/box.cc b/src/box/box.cc
index 7e32b9fc..409897f6 100644
--- a/src/box/box.cc
+++ b/src/box/box.cc
@@ -2093,14 +2093,19 @@ box_cfg_xc(void)
 		}
 	}
 
+	struct gc_checkpoint *first_checkpoint = gc_first_checkpoint();
+	assert(first_checkpoint != NULL);
+
 	/* Start WAL writer */
 	int64_t wal_max_rows = box_check_wal_max_rows(cfg_geti64("rows_per_wal"));
 	int64_t wal_max_size = box_check_wal_max_size(cfg_geti64("wal_max_size"));
 	enum wal_mode wal_mode = box_check_wal_mode(cfg_gets("wal_mode"));
-	if (wal_init(wal_mode, cfg_gets("wal_dir"), &INSTANCE_UUID,
-		      &replicaset.vclock, wal_max_rows, wal_max_size)) {
+	if (wal_init(wal_mode, cfg_gets("wal_dir"), wal_max_rows,
+		     wal_max_size, &INSTANCE_UUID, &replicaset.vclock,
+		     vclock_sum(&first_checkpoint->vclock))) {
 		diag_raise();
 	}
+	gc_set_wal_watcher();
 
 	rmean_cleanup(rmean_box);
 
diff --git a/src/box/gc.c b/src/box/gc.c
index becb5d09..c078a419 100644
--- a/src/box/gc.c
+++ b/src/box/gc.c
@@ -48,6 +48,7 @@
 #include "say.h"
 #include "latch.h"
 #include "vclock.h"
+#include "cbus.h"
 #include "engine.h"		/* engine_collect_garbage() */
 #include "wal.h"		/* wal_collect_garbage() */
 
@@ -102,9 +103,24 @@ gc_init(void)
 	latch_create(&gc.latch);
 }
 
+static void
+gc_process_wal_event(struct wal_watcher_msg *);
+
+void
+gc_set_wal_watcher(void)
+{
+	wal_set_watcher(&gc.wal_watcher, "tx", gc_process_wal_event,
+			cbus_process);
+}
+
 void
 gc_free(void)
 {
+	/*
+	 * Can't clear the WAL watcher as the event loop isn't
+	 * running when this function is called.
+	 */
+
 	/* Free checkpoints. */
 	struct gc_checkpoint *checkpoint, *next_checkpoint;
 	rlist_foreach_entry_safe(checkpoint, &gc.checkpoints, in_checkpoints,
@@ -175,6 +191,9 @@ gc_run(void)
 	if (!run_engine_gc && !run_wal_gc)
 		return; /* nothing to do */
 
+	int64_t wal_lsn = vclock_sum(vclock);
+	int64_t checkpoint_lsn = vclock_sum(&checkpoint->vclock);
+
 	/*
 	 * Engine callbacks may sleep, because they use coio for
 	 * removing files. Make sure we won't try to remove the
@@ -191,12 +210,45 @@ gc_run(void)
 	 */
 	int rc = 0;
 	if (run_engine_gc)
-		rc = engine_collect_garbage(vclock_sum(&checkpoint->vclock));
-	if (run_wal_gc && rc == 0)
-		wal_collect_garbage(vclock_sum(vclock));
+		rc = engine_collect_garbage(checkpoint_lsn);
+	/*
+	 * Run wal_collect_garbage() even if we don't need to
+	 * delete any WAL files to apprise the WAL thread of
+	 * the oldest checkpoint signature.
+	 */
+	if (rc == 0)
+		wal_collect_garbage(wal_lsn, checkpoint_lsn);
 	latch_unlock(&gc.latch);
 }
 
+/**
+ * Deactivate consumers that need files deleted by the WAL thread.
+ */
+static void
+gc_process_wal_event(struct wal_watcher_msg *msg)
+{
+	if ((msg->events & WAL_EVENT_GC) == 0)
+		return;
+
+	struct gc_consumer *consumer = gc_tree_first(&gc.consumers);
+	while (consumer != NULL &&
+	       vclock_sum(&consumer->vclock) < msg->gc_lsn) {
+		struct gc_consumer *next = gc_tree_next(&gc.consumers,
+							consumer);
+		assert(!consumer->is_inactive);
+		consumer->is_inactive = true;
+		gc_tree_remove(&gc.consumers, consumer);
+
+		char *vclock_str = vclock_to_string(&consumer->vclock);
+		say_crit("deactivated WAL consumer %s at %s",
+			 consumer->name, vclock_str);
+		free(vclock_str);
+
+		consumer = next;
+	}
+	gc_run();
+}
+
 void
 gc_set_min_checkpoint_count(int min_checkpoint_count)
 {
@@ -279,14 +331,19 @@ gc_consumer_register(const struct vclock *vclock, const char *format, ...)
 void
 gc_consumer_unregister(struct gc_consumer *consumer)
 {
-	gc_tree_remove(&gc.consumers, consumer);
+	if (!consumer->is_inactive) {
+		gc_tree_remove(&gc.consumers, consumer);
+		gc_run();
+	}
 	gc_consumer_delete(consumer);
-	gc_run();
 }
 
 void
 gc_consumer_advance(struct gc_consumer *consumer, const struct vclock *vclock)
 {
+	if (consumer->is_inactive)
+		return;
+
 	int64_t signature = vclock_sum(vclock);
 	int64_t prev_signature = vclock_sum(&consumer->vclock);
 
diff --git a/src/box/gc.h b/src/box/gc.h
index a5392cef..e1241baa 100644
--- a/src/box/gc.h
+++ b/src/box/gc.h
@@ -36,6 +36,7 @@
 
 #include "vclock.h"
 #include "latch.h"
+#include "wal.h"
 #include "trivia/util.h"
 
 #if defined(__cplusplus)
@@ -89,6 +90,11 @@ struct gc_consumer {
 	char name[GC_NAME_MAX];
 	/** The vclock tracked by this consumer. */
 	struct vclock vclock;
+	/**
+	 * This flag is set if a WAL needed by this consumer was
+	 * deleted by the WAL thread on ENOSPC.
+	 */
+	bool is_inactive;
 };
 
 typedef rb_tree(struct gc_consumer) gc_tree_t;
@@ -120,6 +126,11 @@ struct gc_state {
 	 * garbage collection callbacks.
 	 */
 	struct latch latch;
+	/**
+	 * WAL event watcher. Needed to shoot off stale consumers
+	 * when a WAL file is deleted due to ENOSPC.
+	 */
+	struct wal_watcher wal_watcher;
 };
 extern struct gc_state gc;
 
@@ -145,6 +156,20 @@ extern struct gc_state gc;
 	rlist_foreach_entry(ref, &(checkpoint)->refs, in_refs)
 
 /**
+ * Return the first (oldest) checkpoint known to the garbage
+ * collector. If there's no checkpoint, return NULL.
+ */
+static inline struct gc_checkpoint *
+gc_first_checkpoint(void)
+{
+	if (rlist_empty(&gc.checkpoints))
+		return NULL;
+
+	return rlist_first_entry(&gc.checkpoints, struct gc_checkpoint,
+				 in_checkpoints);
+}
+
+/**
  * Return the last (newest) checkpoint known to the garbage
  * collector. If there's no checkpoint, return NULL.
  */
@@ -165,6 +190,12 @@ void
 gc_init(void);
 
 /**
+ * Set WAL watcher. Called after WAL is initialized.
+ */
+void
+gc_set_wal_watcher(void);
+
+/**
  * Destroy the garbage collection state.
  */
 void
diff --git a/src/box/wal.c b/src/box/wal.c
index 20b85f43..6e7a6b3f 100644
--- a/src/box/wal.c
+++ b/src/box/wal.c
@@ -111,6 +111,12 @@ struct wal_writer
 	 * with this LSN and LSN becomes "real".
 	 */
 	struct vclock vclock;
+	/**
+	 * Signature of the oldest checkpoint available on the instance.
+	 * The WAL writer must not delete WAL files that are needed to
+	 * recover from it even if it is running out of disk space.
+	 */
+	int64_t checkpoint_lsn;
 	/** The current WAL file. */
 	struct xlog current_wal;
 	/**
@@ -282,9 +288,9 @@ tx_schedule_rollback(struct cmsg *msg)
  */
 static void
 wal_writer_create(struct wal_writer *writer, enum wal_mode wal_mode,
-		  const char *wal_dirname, const struct tt_uuid *instance_uuid,
-		  struct vclock *vclock, int64_t wal_max_rows,
-		  int64_t wal_max_size)
+		  const char *wal_dirname, int64_t wal_max_rows,
+		  int64_t wal_max_size, const struct tt_uuid *instance_uuid,
+		  const struct vclock *vclock, int64_t checkpoint_lsn)
 {
 	writer->wal_mode = wal_mode;
 	writer->wal_max_rows = wal_max_rows;
@@ -304,6 +310,7 @@ wal_writer_create(struct wal_writer *writer, enum wal_mode wal_mode,
 	vclock_create(&writer->vclock);
 	vclock_copy(&writer->vclock, vclock);
 
+	writer->checkpoint_lsn = checkpoint_lsn;
 	rlist_create(&writer->watchers);
 }
 
@@ -407,16 +414,16 @@ wal_open(struct wal_writer *writer)
  *        mode are closed. WAL thread has been started.
  */
 int
-wal_init(enum wal_mode wal_mode, const char *wal_dirname,
-	 const struct tt_uuid *instance_uuid, struct vclock *vclock,
-	 int64_t wal_max_rows, int64_t wal_max_size)
+wal_init(enum wal_mode wal_mode, const char *wal_dirname, int64_t wal_max_rows,
+	 int64_t wal_max_size, const struct tt_uuid *instance_uuid,
+	 const struct vclock *vclock, int64_t first_checkpoint_lsn)
 {
 	assert(wal_max_rows > 1);
 
 	struct wal_writer *writer = &wal_writer_singleton;
-
-	wal_writer_create(writer, wal_mode, wal_dirname, instance_uuid,
-			  vclock, wal_max_rows, wal_max_size);
+	wal_writer_create(writer, wal_mode, wal_dirname, wal_max_rows,
+			  wal_max_size, instance_uuid, vclock,
+			  first_checkpoint_lsn);
 
 	/*
 	 * Scan the WAL directory to build an index of all
@@ -534,27 +541,30 @@ wal_checkpoint(struct vclock *vclock, bool rotate)
 struct wal_gc_msg
 {
 	struct cbus_call_msg base;
-	int64_t lsn;
+	int64_t wal_lsn;
+	int64_t checkpoint_lsn;
 };
 
 static int
 wal_collect_garbage_f(struct cbus_call_msg *data)
 {
 	struct wal_writer *writer = &wal_writer_singleton;
-	int64_t lsn = ((struct wal_gc_msg *)data)->lsn;
-	xdir_collect_garbage(&writer->wal_dir, lsn, -1, false);
+	struct wal_gc_msg *msg = (struct wal_gc_msg *)data;
+	writer->checkpoint_lsn = msg->checkpoint_lsn;
+	xdir_collect_garbage(&writer->wal_dir, msg->wal_lsn, -1, false);
 	wal_notify_watchers(writer, WAL_EVENT_GC);
 	return 0;
 }
 
 void
-wal_collect_garbage(int64_t lsn)
+wal_collect_garbage(int64_t wal_lsn, int64_t checkpoint_lsn)
 {
 	struct wal_writer *writer = &wal_writer_singleton;
 	if (writer->wal_mode == WAL_NONE)
 		return;
 	struct wal_gc_msg msg;
-	msg.lsn = lsn;
+	msg.wal_lsn = wal_lsn;
+	msg.checkpoint_lsn = checkpoint_lsn;
 	bool cancellable = fiber_set_cancellable(false);
 	cbus_call(&wal_thread.wal_pipe, &wal_thread.tx_prio_pipe, &msg.base,
 		  wal_collect_garbage_f, NULL, TIMEOUT_INFINITY);
@@ -614,15 +624,43 @@ wal_opt_rotate(struct wal_writer *writer)
 /**
  * Make sure there's enough disk space to write @len bytes
  * of data to the current WAL.
+ *
+ * If fallocate() fails with ENOSPC, delete old WAL files
+ * that are not needed for recovery and retry.
  */
 static int
 wal_fallocate(struct wal_writer *writer, size_t len)
 {
-	if (xlog_fallocate(&writer->current_wal, len) < 0) {
-		diag_log();
-		return -1;
+	bool warn_no_space = true;
+retry:
+	if (xlog_fallocate(&writer->current_wal, len) >= 0) {
+		diag_clear(diag_get());
+		return 0;
 	}
-	return 0;
+	if (errno != ENOSPC)
+		goto error;
+
+	if (warn_no_space) {
+		say_crit("ran out of disk space, try to delete old WAL files");
+		warn_no_space = false;
+	}
+
+	/* Keep the original error. */
+	struct diag diag;
+	diag_create(&diag);
+	diag_move(diag_get(), &diag);
+	int rc = xdir_collect_garbage(&writer->wal_dir, writer->checkpoint_lsn,
+				      1, false);
+	diag_move(&diag, diag_get());
+	diag_destroy(&diag);
+	if (rc <= 0)
+		goto error;
+
+	wal_notify_watchers(writer, WAL_EVENT_GC);
+	goto retry;
+error:
+	diag_log();
+	return -1;
 }
 
 static void
diff --git a/src/box/wal.h b/src/box/wal.h
index 4867ec3b..6d5ee0a6 100644
--- a/src/box/wal.h
+++ b/src/box/wal.h
@@ -56,9 +56,9 @@ void
 wal_thread_start();
 
 int
-wal_init(enum wal_mode wal_mode, const char *wal_dirname,
-	 const struct tt_uuid *instance_uuid, struct vclock *vclock,
-	 int64_t wal_max_rows, int64_t wal_max_size);
+wal_init(enum wal_mode wal_mode, const char *wal_dirname, int64_t wal_max_rows,
+	 int64_t wal_max_size, const struct tt_uuid *instance_uuid,
+	 const struct vclock *vclock, int64_t first_checkpoint_lsn);
 
 void
 wal_thread_stop();
@@ -165,11 +165,14 @@ int
 wal_checkpoint(struct vclock *vclock, bool rotate);
 
 /**
- * Remove WAL files that are not needed to recover
- * from snapshot with @lsn or newer.
+ * Remove all WAL files whose signature is less than @wal_lsn.
+ * Update the oldest checkpoint signature with @checkpoint_lsn.
+ * WAL thread will delete WAL files that are not needed to
+ * recover from the oldest checkpoint if it runs out of disk
+ * space.
  */
 void
-wal_collect_garbage(int64_t lsn);
+wal_collect_garbage(int64_t wal_lsn, int64_t checkpoint_lsn);
 
 void
 wal_init_vy_log();
diff --git a/src/box/xlog.c b/src/box/xlog.c
index 1a6ead7f..bc558593 100644
--- a/src/box/xlog.c
+++ b/src/box/xlog.c
@@ -1027,6 +1027,14 @@ xlog_write_error(struct xlog *log)
 ssize_t
 xlog_fallocate(struct xlog *log, size_t len)
 {
+	struct errinj *inj = errinj(ERRINJ_XLOG_FALLOCATE, ERRINJ_INT);
+	if (inj != NULL && inj->iparam > 0) {
+		inj->iparam--;
+		diag_set(ClientError, ER_INJECTION, "xlog fallocate");
+		errno = ENOSPC;
+		return -1;
+	}
+
 #ifdef HAVE_POSIX_FALLOCATE
 	if (log->alloc_len > len)
 		return log->alloc_len;
diff --git a/src/errinj.h b/src/errinj.h
index 84a1fbb5..19304f8e 100644
--- a/src/errinj.h
+++ b/src/errinj.h
@@ -102,6 +102,7 @@ struct errinj {
 	_(ERRINJ_XLOG_GARBAGE, ERRINJ_BOOL, {.bparam = false}) \
 	_(ERRINJ_XLOG_META, ERRINJ_BOOL, {.bparam = false}) \
 	_(ERRINJ_XLOG_READ, ERRINJ_INT, {.iparam = -1}) \
+	_(ERRINJ_XLOG_FALLOCATE, ERRINJ_INT, {.iparam = 0}) \
 	_(ERRINJ_VYRUN_INDEX_GARBAGE, ERRINJ_BOOL, {.bparam = false}) \
 	_(ERRINJ_VYRUN_DATA_READ, ERRINJ_BOOL, {.bparam = false}) \
 	_(ERRINJ_BUILD_INDEX, ERRINJ_INT, {.iparam = -1}) \
diff --git a/test/box/errinj.result b/test/box/errinj.result
index c4a1326c..f7140143 100644
--- a/test/box/errinj.result
+++ b/test/box/errinj.result
@@ -24,6 +24,8 @@ errinj.info()
     state: 0
   ERRINJ_VY_SCHED_TIMEOUT:
     state: 0
+  ERRINJ_XLOG_FALLOCATE:
+    state: 0
   ERRINJ_WAL_WRITE_PARTIAL:
     state: -1
   ERRINJ_VY_GC:
diff --git a/test/replication/gc_no_space.result b/test/replication/gc_no_space.result
new file mode 100644
index 00000000..a84ae2db
--- /dev/null
+++ b/test/replication/gc_no_space.result
@@ -0,0 +1,234 @@
+--
+-- This test checks that when the WAL thread runs out of disk
+-- space it automatically deletes old WAL files and notifies
+-- the TX thread so that the latter can shoot off WAL consumers
+-- that need them. See gh-3397.
+--
+test_run = require('test_run').new()
+---
+...
+engine = test_run:get_cfg('engine')
+---
+...
+fio = require('fio')
+---
+...
+errinj = box.error.injection
+---
+...
+test_run:cmd("setopt delimiter ';'")
+---
+- true
+...
+function check_file_count(dir, glob, count)
+    local files = fio.glob(fio.pathjoin(dir, glob))
+    if #files == count then
+        return true
+    end
+    return false, files
+end;
+---
+...
+function check_wal_count(count)
+    return check_file_count(box.cfg.wal_dir, '*.xlog', count)
+end;
+---
+...
+function check_snap_count(count)
+    return check_file_count(box.cfg.memtx_dir, '*.snap', count)
+end;
+---
+...
+test_run:cmd("setopt delimiter ''");
+---
+- true
+...
+default_checkpoint_count = box.cfg.checkpoint_count
+---
+...
+box.cfg{checkpoint_count = 2}
+---
+...
+test_run:cleanup_cluster()
+---
+...
+box.schema.user.grant('guest', 'replication')
+---
+...
+s = box.schema.space.create('test', {engine = engine})
+---
+...
+_ = s:create_index('pk')
+---
+...
+box.snapshot()
+---
+- ok
+...
+--
+-- Create a few dead replicas to pin WAL files.
+--
+test_run:cmd("create server replica with rpl_master=default, script='replication/replica.lua'")
+---
+- true
+...
+test_run:cmd("start server replica")
+---
+- true
+...
+test_run:cmd("stop server replica")
+---
+- true
+...
+test_run:cmd("cleanup server replica")
+---
+- true
+...
+s:auto_increment{}
+---
+- [1]
+...
+box.snapshot()
+---
+- ok
+...
+test_run:cmd("start server replica")
+---
+- true
+...
+test_run:cmd("stop server replica")
+---
+- true
+...
+test_run:cmd("cleanup server replica")
+---
+- true
+...
+s:auto_increment{}
+---
+- [2]
+...
+box.snapshot()
+---
+- ok
+...
+test_run:cmd("start server replica")
+---
+- true
+...
+test_run:cmd("stop server replica")
+---
+- true
+...
+test_run:cmd("cleanup server replica")
+---
+- true
+...
+test_run:cmd("delete server replica")
+---
+- true
+...
+--
+-- Make a few checkpoints and check that old WAL files are not
+-- deleted.
+--
+s:auto_increment{}
+---
+- [3]
+...
+box.snapshot()
+---
+- ok
+...
+s:auto_increment{}
+---
+- [4]
+...
+box.snapshot()
+---
+- ok
+...
+s:auto_increment{}
+---
+- [5]
+...
+check_wal_count(7)
+---
+- true
+...
+check_snap_count(2)
+---
+- true
+...
+#box.info.gc().consumers -- 3
+---
+- 3
+...
+--
+-- Inject a ENOSPC error and check that the WAL thread deletes
+-- old WAL files to prevent the user from seeing the error.
+--
+errinj.set('ERRINJ_XLOG_FALLOCATE', 3)
+---
+- ok
+...
+s:auto_increment{} -- success
+---
+- [6]
+...
+errinj.info()['ERRINJ_XLOG_FALLOCATE'].state -- 0
+---
+- 0
+...
+check_wal_count(3)
+---
+- true
+...
+check_snap_count(2)
+---
+- true
+...
+#box.info.gc().consumers -- 1
+---
+- 1
+...
+--
+-- Check that the WAL thread never deletes WAL files that are
+-- needed for recovery from a checkpoint.
+--
+errinj.set('ERRINJ_XLOG_FALLOCATE', 2)
+---
+- ok
+...
+s:auto_increment{} -- failure
+---
+- error: Failed to write to disk
+...
+errinj.info()['ERRINJ_XLOG_FALLOCATE'].state -- 0
+---
+- 0
+...
+check_wal_count(2)
+---
+- true
+...
+check_snap_count(2)
+---
+- true
+...
+#box.info.gc().consumers -- 0
+---
+- 0
+...
+s:drop()
+---
+...
+box.schema.user.revoke('guest', 'replication')
+---
+...
+test_run:cleanup_cluster()
+---
+...
+box.cfg{checkpoint_count = default_checkpoint_count}
+---
+...
diff --git a/test/replication/gc_no_space.test.lua b/test/replication/gc_no_space.test.lua
new file mode 100644
index 00000000..32ad18f0
--- /dev/null
+++ b/test/replication/gc_no_space.test.lua
@@ -0,0 +1,103 @@
+--
+-- This test checks that when the WAL thread runs out of disk
+-- space it automatically deletes old WAL files and notifies
+-- the TX thread so that the latter can shoot off WAL consumers
+-- that need them. See gh-3397.
+--
+test_run = require('test_run').new()
+engine = test_run:get_cfg('engine')
+
+fio = require('fio')
+errinj = box.error.injection
+
+test_run:cmd("setopt delimiter ';'")
+function check_file_count(dir, glob, count)
+    local files = fio.glob(fio.pathjoin(dir, glob))
+    if #files == count then
+        return true
+    end
+    return false, files
+end;
+function check_wal_count(count)
+    return check_file_count(box.cfg.wal_dir, '*.xlog', count)
+end;
+function check_snap_count(count)
+    return check_file_count(box.cfg.memtx_dir, '*.snap', count)
+end;
+test_run:cmd("setopt delimiter ''");
+
+default_checkpoint_count = box.cfg.checkpoint_count
+box.cfg{checkpoint_count = 2}
+
+test_run:cleanup_cluster()
+box.schema.user.grant('guest', 'replication')
+s = box.schema.space.create('test', {engine = engine})
+_ = s:create_index('pk')
+box.snapshot()
+
+--
+-- Create a few dead replicas to pin WAL files.
+--
+test_run:cmd("create server replica with rpl_master=default, script='replication/replica.lua'")
+test_run:cmd("start server replica")
+test_run:cmd("stop server replica")
+test_run:cmd("cleanup server replica")
+
+s:auto_increment{}
+box.snapshot()
+
+test_run:cmd("start server replica")
+test_run:cmd("stop server replica")
+test_run:cmd("cleanup server replica")
+
+s:auto_increment{}
+box.snapshot()
+
+test_run:cmd("start server replica")
+test_run:cmd("stop server replica")
+test_run:cmd("cleanup server replica")
+test_run:cmd("delete server replica")
+
+--
+-- Make a few checkpoints and check that old WAL files are not
+-- deleted.
+--
+s:auto_increment{}
+box.snapshot()
+s:auto_increment{}
+box.snapshot()
+s:auto_increment{}
+
+check_wal_count(7)
+check_snap_count(2)
+#box.info.gc().consumers -- 3
+
+--
+-- Inject a ENOSPC error and check that the WAL thread deletes
+-- old WAL files to prevent the user from seeing the error.
+--
+errinj.set('ERRINJ_XLOG_FALLOCATE', 3)
+s:auto_increment{} -- success
+errinj.info()['ERRINJ_XLOG_FALLOCATE'].state -- 0
+
+check_wal_count(3)
+check_snap_count(2)
+#box.info.gc().consumers -- 1
+
+--
+-- Check that the WAL thread never deletes WAL files that are
+-- needed for recovery from a checkpoint.
+--
+errinj.set('ERRINJ_XLOG_FALLOCATE', 2)
+s:auto_increment{} -- failure
+errinj.info()['ERRINJ_XLOG_FALLOCATE'].state -- 0
+
+check_wal_count(2)
+check_snap_count(2)
+#box.info.gc().consumers -- 0
+
+s:drop()
+box.schema.user.revoke('guest', 'replication')
+test_run:cleanup_cluster()
+
+box.cfg{checkpoint_count = default_checkpoint_count}
diff --git a/test/replication/suite.ini b/test/replication/suite.ini
index f4abc7af..569c9048 100644
--- a/test/replication/suite.ini
+++ b/test/replication/suite.ini
@@ -3,7 +3,7 @@ core = tarantool
 script =  master.lua
 description = tarantool/box, replication
 disabled = consistent.test.lua
-release_disabled = catch.test.lua errinj.test.lua gc.test.lua before_replace.test.lua quorum.test.lua recover_missing_xlog.test.lua sync.test.lua
+release_disabled = catch.test.lua errinj.test.lua gc.test.lua gc_no_space.test.lua before_replace.test.lua quorum.test.lua recover_missing_xlog.test.lua sync.test.lua
 config = suite.cfg
 lua_libs = lua/fast_replica.lua lua/rlimit.lua
 long_run = prune.test.lua
-- 
2.11.0




More information about the Tarantool-patches mailing list