From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: From: Vladimir Davydov Subject: [PATCH 2/2] vinyl: apply box.cfg.snap_io_rate_limit to dump/compaction Date: Tue, 29 May 2018 18:19:37 +0300 Message-Id: <0686cb69b36e79d9646fb44f2923a73a527ab0f9.1527605229.git.vdavydov.dev@gmail.com> In-Reply-To: References: In-Reply-To: References: To: kostja@tarantool.org Cc: tarantool-patches@freelists.org List-ID: Vinyl worker threads can consume all disk bandwidth while performing dump or compaction, thus stalling DML requests, which also need some disk bandwidth for WAL. Memtx has a similar problem - it needs to write snapshot files. In case of memtx, we cope with this problem by limiting the write rate with box.cfg.snap_io_rate_limit option. Let's reuse this option for limiting vinyl dump/compaction rate. Closes #3220 --- src/box/box.cc | 5 +++ src/box/vinyl.c | 6 +++ src/box/vinyl.h | 6 +++ src/box/vy_run.c | 7 +++- src/box/vy_run.h | 2 + test/vinyl/snap_io_rate.result | 85 ++++++++++++++++++++++++++++++++++++++++ test/vinyl/snap_io_rate.test.lua | 38 ++++++++++++++++++ 7 files changed, 148 insertions(+), 1 deletion(-) create mode 100644 test/vinyl/snap_io_rate.result create mode 100644 test/vinyl/snap_io_rate.test.lua diff --git a/src/box/box.cc b/src/box/box.cc index ac30eb4d..91c5f9f2 100644 --- a/src/box/box.cc +++ b/src/box/box.cc @@ -698,6 +698,11 @@ box_set_snap_io_rate_limit(void) assert(memtx != NULL); memtx_engine_set_snap_io_rate_limit(memtx, cfg_getd("snap_io_rate_limit")); + struct vinyl_engine *vinyl; + vinyl = (struct vinyl_engine *)engine_by_name("vinyl"); + assert(vinyl != NULL); + vinyl_engine_set_snap_io_rate_limit(vinyl, + cfg_getd("snap_io_rate_limit")); } void diff --git a/src/box/vinyl.c b/src/box/vinyl.c index f0d26874..2abe5696 100644 --- a/src/box/vinyl.c +++ b/src/box/vinyl.c @@ -2787,6 +2787,12 @@ vinyl_engine_set_too_long_threshold(struct vinyl_engine *vinyl, vinyl->env->lsm_env.too_long_threshold = too_long_threshold; } +void +vinyl_engine_set_snap_io_rate_limit(struct vinyl_engine *vinyl, double limit) +{ + vinyl->env->run_env.snap_io_rate_limit = limit * 1024 * 1024; +} + /** }}} Environment */ /* {{{ Checkpoint */ diff --git a/src/box/vinyl.h b/src/box/vinyl.h index ac7afefb..9fec3d0a 100644 --- a/src/box/vinyl.h +++ b/src/box/vinyl.h @@ -76,6 +76,12 @@ void vinyl_engine_set_too_long_threshold(struct vinyl_engine *vinyl, double too_long_threshold); +/** + * Update snap_io_rate_limit. + */ +void +vinyl_engine_set_snap_io_rate_limit(struct vinyl_engine *vinyl, double limit); + #ifdef __cplusplus } /* extern "C" */ diff --git a/src/box/vy_run.c b/src/box/vy_run.c index 1011abce..e2edbcaa 100644 --- a/src/box/vy_run.c +++ b/src/box/vy_run.c @@ -1956,6 +1956,8 @@ vy_run_write_index(struct vy_run *run, const char *dirpath, if (xlog_create(&index_xlog, path, 0, &meta) < 0) return -1; + index_xlog.rate_limit = run->env->snap_io_rate_limit; + xlog_tx_begin(&index_xlog); struct xrow_header xrow; @@ -2035,7 +2037,10 @@ vy_run_writer_create_xlog(struct vy_run_writer *writer) .filetype = XLOG_META_TYPE_RUN, .instance_uuid = INSTANCE_UUID, }; - return xlog_create(&writer->data_xlog, path, 0, &meta); + if (xlog_create(&writer->data_xlog, path, 0, &meta) != 0) + return -1; + writer->data_xlog.rate_limit = writer->run->env->snap_io_rate_limit; + return 0; } /** diff --git a/src/box/vy_run.h b/src/box/vy_run.h index 6551191b..7bafffec 100644 --- a/src/box/vy_run.h +++ b/src/box/vy_run.h @@ -54,6 +54,8 @@ struct vy_run_reader; /** Part of vinyl environment for run read/write */ struct vy_run_env { + /** Write rate limit, in bytes per second. */ + uint64_t snap_io_rate_limit; /** Mempool for struct vy_page_read_task */ struct mempool read_task_pool; /** Key for thread-local ZSTD context */ diff --git a/test/vinyl/snap_io_rate.result b/test/vinyl/snap_io_rate.result new file mode 100644 index 00000000..ecc8d35c --- /dev/null +++ b/test/vinyl/snap_io_rate.result @@ -0,0 +1,85 @@ +fiber = require('fiber') +--- +... +digest = require('digest') +--- +... +test_run = require('test_run').new() +--- +... +MB = 1024 * 1024 +--- +... +TUPLE_SIZE = 1024 +--- +... +TUPLE_COUNT = 100 +--- +... +snap_io_rate_limit = box.cfg.snap_io_rate_limit +--- +... +box.cfg{snap_io_rate_limit = 0.1} +--- +... +s = box.schema.space.create('test', {engine = 'vinyl'}) +--- +... +_ = s:create_index('primary', {page_size = TUPLE_SIZE, run_count_per_level = 1, run_size_ratio = 10}) +--- +... +function fill() for i = 1, TUPLE_COUNT do s:replace{i, digest.urandom(TUPLE_SIZE)} end end +--- +... +-- check that snap_io_rate_limit is applied to dump +fill() +--- +... +t1 = fiber.time() +--- +... +box.snapshot() +--- +- ok +... +t2 = fiber.time() +--- +... +rate = TUPLE_SIZE * TUPLE_COUNT / (t2 - t1) / MB +--- +... +rate < box.cfg.snap_io_rate_limit or rate +--- +- true +... +-- check that snap_io_rate_limit is applied to compaction +fill() +--- +... +t1 = fiber.time() +--- +... +box.snapshot() +--- +- ok +... +while s.index.primary:info().disk.compact.count == 0 do fiber.sleep(0.001) end +--- +... +t2 = fiber.time() +--- +... +-- dump + compaction => multiply by 2 +rate = 2 * TUPLE_SIZE * TUPLE_COUNT / (t2 - t1) / MB +--- +... +rate < box.cfg.snap_io_rate_limit or rate +--- +- true +... +s:drop() +--- +... +box.cfg{snap_io_rate_limit = snap_io_rate_limit} +--- +... diff --git a/test/vinyl/snap_io_rate.test.lua b/test/vinyl/snap_io_rate.test.lua new file mode 100644 index 00000000..836bf537 --- /dev/null +++ b/test/vinyl/snap_io_rate.test.lua @@ -0,0 +1,38 @@ +fiber = require('fiber') +digest = require('digest') +test_run = require('test_run').new() + +MB = 1024 * 1024 +TUPLE_SIZE = 1024 +TUPLE_COUNT = 100 + +snap_io_rate_limit = box.cfg.snap_io_rate_limit +box.cfg{snap_io_rate_limit = 0.1} + +s = box.schema.space.create('test', {engine = 'vinyl'}) +_ = s:create_index('primary', {page_size = TUPLE_SIZE, run_count_per_level = 1, run_size_ratio = 10}) + +function fill() for i = 1, TUPLE_COUNT do s:replace{i, digest.urandom(TUPLE_SIZE)} end end + +-- check that snap_io_rate_limit is applied to dump +fill() +t1 = fiber.time() +box.snapshot() +t2 = fiber.time() + +rate = TUPLE_SIZE * TUPLE_COUNT / (t2 - t1) / MB +rate < box.cfg.snap_io_rate_limit or rate + +-- check that snap_io_rate_limit is applied to compaction +fill() +t1 = fiber.time() +box.snapshot() +while s.index.primary:info().disk.compact.count == 0 do fiber.sleep(0.001) end +t2 = fiber.time() + +-- dump + compaction => multiply by 2 +rate = 2 * TUPLE_SIZE * TUPLE_COUNT / (t2 - t1) / MB +rate < box.cfg.snap_io_rate_limit or rate + +s:drop() +box.cfg{snap_io_rate_limit = snap_io_rate_limit} -- 2.11.0