From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: From: Vladimir Davydov Subject: [PATCH 4/7] vinyl: fix force compaction logic Date: Sun, 2 Sep 2018 23:18:57 +0300 Message-Id: <55436ba89b22a9f490c0642810986672a062cc73.1535917763.git.vdavydov.dev@gmail.com> In-Reply-To: References: In-Reply-To: References: To: kostja@tarantool.org Cc: tarantool-patches@freelists.org List-ID: This patch address a few problems index.compact() is suffering from, namely: - When a range is split or coalesced, it should inherit the value of needs_compaction flag from the source ranges. Currently, the flag is cleared so that the resulting range may be not compacted. - If a range has no slices, we shouldn't set needs_compaction flag for it, because obviously it can't be compacted, but we do. - The needs_compaction flag should be cleared as soon as we schedule a range for compaction, not when all slices have been compacted into one, as we presently expect, because the latter may never happen under a write-intensive load. --- src/box/vy_lsm.c | 9 +++++++-- src/box/vy_range.c | 16 ++-------------- src/box/vy_range.h | 8 ++------ src/box/vy_scheduler.c | 2 ++ 4 files changed, 13 insertions(+), 22 deletions(-) diff --git a/src/box/vy_lsm.c b/src/box/vy_lsm.c index 15592fbf..a0d211f8 100644 --- a/src/box/vy_lsm.c +++ b/src/box/vy_lsm.c @@ -1040,6 +1040,7 @@ vy_lsm_split_range(struct vy_lsm *lsm, struct vy_range *range) if (new_slice != NULL) vy_range_add_slice(part, new_slice); } + part->needs_compaction = range->needs_compaction; part->compact_priority = range->compact_priority; } @@ -1147,6 +1148,8 @@ vy_lsm_coalesce_range(struct vy_lsm *lsm, struct vy_range *range) rlist_splice(&result->slices, &it->slices); result->slice_count += it->slice_count; vy_disk_stmt_counter_add(&result->count, &it->count); + if (it->needs_compaction) + result->needs_compaction = true; vy_range_delete(it); it = next; } @@ -1181,8 +1184,10 @@ vy_lsm_force_compaction(struct vy_lsm *lsm) struct vy_range_tree_iterator it; vy_range_tree_ifirst(lsm->tree, &it); - while ((range = vy_range_tree_inext(&it)) != NULL) - vy_range_force_compaction(range); + while ((range = vy_range_tree_inext(&it)) != NULL) { + range->needs_compaction = true; + vy_range_update_compact_priority(range, &lsm->opts); + } vy_range_heap_update_all(&lsm->range_heap); } diff --git a/src/box/vy_range.c b/src/box/vy_range.c index 6a55a018..ddcd2ed3 100644 --- a/src/box/vy_range.c +++ b/src/box/vy_range.c @@ -262,18 +262,6 @@ vy_range_remove_slice(struct vy_range *range, struct vy_slice *slice) vy_disk_stmt_counter_sub(&range->count, &slice->count); } -void -vy_range_force_compaction(struct vy_range *range) -{ - if (range->slice_count == 1) { - /* Already compacted. */ - assert(!range->needs_compaction); - return; - } - range->needs_compaction = true; - range->compact_priority = range->slice_count; -} - /** * To reduce write amplification caused by compaction, we follow * the LSM tree design. Runs in each range are divided into groups @@ -304,9 +292,9 @@ vy_range_update_compact_priority(struct vy_range *range, assert(opts->run_count_per_level > 0); assert(opts->run_size_ratio > 1); - if (range->slice_count == 1) { + if (range->slice_count <= 1) { /* Nothing to compact. */ - range->compact_priority = 1; + range->compact_priority = 0; range->needs_compaction = false; return; } diff --git a/src/box/vy_range.h b/src/box/vy_range.h index d7031e70..2ca19a1c 100644 --- a/src/box/vy_range.h +++ b/src/box/vy_range.h @@ -110,8 +110,8 @@ struct vy_range { * If this flag is set, the range must be scheduled for * major compaction, i.e. its compact_priority must be * raised to max (slice_count). The flag is set by - * vy_range_force_compaction() and cleared automatically - * when all slices of the range have been compacted. + * vy_lsm_force_compaction() and cleared when the range + * is scheduled for compaction. */ bool needs_compaction; /** Number of times the range was compacted. */ @@ -229,10 +229,6 @@ vy_range_add_slice_before(struct vy_range *range, struct vy_slice *slice, void vy_range_remove_slice(struct vy_range *range, struct vy_slice *slice); -/** Mark a range for major compaction. */ -void -vy_range_force_compaction(struct vy_range *range); - /** * Update compaction priority of a range. * diff --git a/src/box/vy_scheduler.c b/src/box/vy_scheduler.c index 4959300e..a1ae3f54 100644 --- a/src/box/vy_scheduler.c +++ b/src/box/vy_scheduler.c @@ -1604,6 +1604,8 @@ vy_task_compact_new(struct vy_scheduler *scheduler, struct vy_lsm *lsm, assert(n == 0); assert(new_run->dump_lsn >= 0); + range->needs_compaction = false; + task->range = range; task->new_run = new_run; task->wi = wi; -- 2.11.0