[PATCH v2 7/8] vinyl: factor out helpers for accounting dump/compaction

Vladimir Davydov vdavydov.dev at gmail.com
Sun Sep 16 20:06:50 MSK 2018


So that we can easily extend them to account the stats not only per LSM
tree, but also globally, in vy_lsm_env.
---
 src/box/vy_lsm.c       | 20 ++++++++++++++++++++
 src/box/vy_lsm.h       | 16 ++++++++++++++++
 src/box/vy_scheduler.c | 29 +++++++++++++++--------------
 3 files changed, 51 insertions(+), 14 deletions(-)

diff --git a/src/box/vy_lsm.c b/src/box/vy_lsm.c
index 6b9d0e6d..dbea2898 100644
--- a/src/box/vy_lsm.c
+++ b/src/box/vy_lsm.c
@@ -747,6 +747,26 @@ vy_lsm_unacct_range(struct vy_lsm *lsm, struct vy_range *range)
 				 &range->compact_queue);
 }
 
+void
+vy_lsm_acct_dump(struct vy_lsm *lsm,
+		 const struct vy_stmt_counter *in,
+		 const struct vy_disk_stmt_counter *out)
+{
+	lsm->stat.disk.dump.count++;
+	vy_stmt_counter_add(&lsm->stat.disk.dump.in, in);
+	vy_disk_stmt_counter_add(&lsm->stat.disk.dump.out, out);
+}
+
+void
+vy_lsm_acct_compaction(struct vy_lsm *lsm,
+		       const struct vy_disk_stmt_counter *in,
+		       const struct vy_disk_stmt_counter *out)
+{
+	lsm->stat.disk.compact.count++;
+	vy_disk_stmt_counter_add(&lsm->stat.disk.compact.in, in);
+	vy_disk_stmt_counter_add(&lsm->stat.disk.compact.out, out);
+}
+
 int
 vy_lsm_rotate_mem(struct vy_lsm *lsm)
 {
diff --git a/src/box/vy_lsm.h b/src/box/vy_lsm.h
index ba2feeef..19f82e34 100644
--- a/src/box/vy_lsm.h
+++ b/src/box/vy_lsm.h
@@ -457,6 +457,22 @@ void
 vy_lsm_unacct_range(struct vy_lsm *lsm, struct vy_range *range);
 
 /**
+ * Account dump in LSM tree statistics.
+ */
+void
+vy_lsm_acct_dump(struct vy_lsm *lsm,
+		 const struct vy_stmt_counter *in,
+		 const struct vy_disk_stmt_counter *out);
+
+/**
+ * Account compaction in LSM tree statistics.
+ */
+void
+vy_lsm_acct_compaction(struct vy_lsm *lsm,
+		       const struct vy_disk_stmt_counter *in,
+		       const struct vy_disk_stmt_counter *out);
+
+/**
  * Allocate a new active in-memory index for an LSM tree while
  * moving the old one to the sealed list. Used by the dump task
  * in order not to bother about synchronization with concurrent
diff --git a/src/box/vy_scheduler.c b/src/box/vy_scheduler.c
index e4afeafd..2f85424a 100644
--- a/src/box/vy_scheduler.c
+++ b/src/box/vy_scheduler.c
@@ -1090,6 +1090,8 @@ vy_task_dump_complete(struct vy_task *task)
 	struct vy_lsm *lsm = task->lsm;
 	struct vy_run *new_run = task->new_run;
 	int64_t dump_lsn = new_run->dump_lsn;
+	struct vy_disk_stmt_counter dump_out = new_run->count;
+	struct vy_stmt_counter dump_in;
 	struct tuple_format *key_format = lsm->env->key_format;
 	struct vy_mem *mem, *next_mem;
 	struct vy_slice **new_slices, *slice;
@@ -1178,12 +1180,8 @@ vy_task_dump_complete(struct vy_task *task)
 	if (vy_log_tx_commit() < 0)
 		goto fail_free_slices;
 
-	/*
-	 * Account the new run.
-	 */
+	/* Account the new run. */
 	vy_lsm_add_run(lsm, new_run);
-	vy_disk_stmt_counter_add(&lsm->stat.disk.dump.out, &new_run->count);
-
 	/* Drop the reference held by the task. */
 	vy_run_unref(new_run);
 
@@ -1212,16 +1210,18 @@ vy_task_dump_complete(struct vy_task *task)
 
 delete_mems:
 	/*
-	 * Delete dumped in-memory trees.
+	 * Delete dumped in-memory trees and account dump in
+	 * LSM tree statistics.
 	 */
+	vy_stmt_counter_reset(&dump_in);
 	rlist_foreach_entry_safe(mem, &lsm->sealed, in_sealed, next_mem) {
 		if (mem->generation > scheduler->dump_generation)
 			continue;
-		vy_stmt_counter_add(&lsm->stat.disk.dump.in, &mem->count);
+		vy_stmt_counter_add(&dump_in, &mem->count);
 		vy_lsm_delete_mem(lsm, mem);
 	}
 	lsm->dump_lsn = MAX(lsm->dump_lsn, dump_lsn);
-	lsm->stat.disk.dump.count++;
+	vy_lsm_acct_dump(lsm, &dump_in, &dump_out);
 
 	/* The iterator has been cleaned up in a worker thread. */
 	task->wi->iface->close(task->wi);
@@ -1443,6 +1443,8 @@ vy_task_compact_complete(struct vy_task *task)
 	struct vy_lsm *lsm = task->lsm;
 	struct vy_range *range = task->range;
 	struct vy_run *new_run = task->new_run;
+	struct vy_disk_stmt_counter compact_out = new_run->count;
+	struct vy_disk_stmt_counter compact_in;
 	struct vy_slice *first_slice = task->first_slice;
 	struct vy_slice *last_slice = task->last_slice;
 	struct vy_slice *slice, *next_slice, *new_slice = NULL;
@@ -1526,15 +1528,14 @@ vy_task_compact_complete(struct vy_task *task)
 	 */
 	if (new_slice != NULL) {
 		vy_lsm_add_run(lsm, new_run);
-		vy_disk_stmt_counter_add(&lsm->stat.disk.compact.out,
-					 &new_run->count);
 		/* Drop the reference held by the task. */
 		vy_run_unref(new_run);
 	} else
 		vy_run_discard(new_run);
 
 	/*
-	 * Replace compacted slices with the resulting slice.
+	 * Replace compacted slices with the resulting slice and
+	 * account compaction in LSM tree statistics.
 	 *
 	 * Note, since a slice might have been added to the range
 	 * by a concurrent dump while compaction was in progress,
@@ -1545,12 +1546,12 @@ vy_task_compact_complete(struct vy_task *task)
 	vy_lsm_unacct_range(lsm, range);
 	if (new_slice != NULL)
 		vy_range_add_slice_before(range, new_slice, first_slice);
+	vy_disk_stmt_counter_reset(&compact_in);
 	for (slice = first_slice; ; slice = next_slice) {
 		next_slice = rlist_next_entry(slice, in_range);
 		vy_range_remove_slice(range, slice);
 		rlist_add_entry(&compacted_slices, slice, in_range);
-		vy_disk_stmt_counter_add(&lsm->stat.disk.compact.in,
-					 &slice->count);
+		vy_disk_stmt_counter_add(&compact_in, &slice->count);
 		if (slice == last_slice)
 			break;
 	}
@@ -1558,7 +1559,7 @@ vy_task_compact_complete(struct vy_task *task)
 	range->version++;
 	vy_range_update_compact_priority(range, &lsm->opts);
 	vy_lsm_acct_range(lsm, range);
-	lsm->stat.disk.compact.count++;
+	vy_lsm_acct_compaction(lsm, &compact_in, &compact_out);
 
 	/*
 	 * Unaccount unused runs and delete compacted slices.
-- 
2.11.0




More information about the Tarantool-patches mailing list