[tarantool-patches] Re: [PATCH][vshard] Prohibit reconfigure non-dynamic options

Alex Khatskevich avkhatskevich at tarantool.org
Wed Jun 27 22:25:50 MSK 2018


The issue is solved.

I had to make bucket_cnt common for all tests in `rebalancer` folder.


Here is a full diff:

commit e9219cd33ea78d8dcbe18eb66a5216643af114e0
Author: AKhatskevich <avkhatskevich at tarantool.org>
Date:   Fri Jun 15 16:53:06 2018 +0300

     Prohibit reconfigure non-dynamic options

     By now there are two non-dynamic options:
      * bucket_count
      * shard_index

     They cannot be changed by passing a new value to
     router/storage.cfg.

     Extra changes:
      * bucket_count for test/rebalancer/* was changed to 200:
        1. After this commit it is impossible reconfigure bucket_count
           and it should be common for all tests which uses common cfg.
        2. 200 was choosed, because time to run some of the tests depends
           on the number.

     Closes #114

diff --git a/test/rebalancer/box_1_a.lua b/test/rebalancer/box_1_a.lua
index 8fddcf0..6d33978 100644
--- a/test/rebalancer/box_1_a.lua
+++ b/test/rebalancer/box_1_a.lua
@@ -21,6 +21,8 @@ if NAME == 'box_4_a' or NAME == 'box_4_b' or
     string.match(NAME, 'fullbox') then
      add_second_replicaset()
  end
+-- Use small number of buckets to speedup tests.
+cfg.bucket_count = 200
  vshard.storage.cfg(cfg, names.replica_uuid[NAME])

  function init_schema()
diff --git a/test/rebalancer/rebalancer_lock_and_pin.result 
b/test/rebalancer/rebalancer_lock_and_pin.result
index 95a160a..58583b1 100644
--- a/test/rebalancer/rebalancer_lock_and_pin.result
+++ b/test/rebalancer/rebalancer_lock_and_pin.result
@@ -33,7 +33,7 @@ test_run:switch('box_2_a')
  ---
  - true
  ...
-vshard.storage.bucket_force_create(1501, 1500)
+vshard.storage.bucket_force_create(101, 100)
  ---
  - true
  ...
@@ -41,7 +41,7 @@ test_run:switch('box_1_a')
  ---
  - true
  ...
-vshard.storage.bucket_force_create(1, 1500)
+vshard.storage.bucket_force_create(1, 100)
  ---
  - true
  ...
@@ -97,7 +97,7 @@ info = vshard.storage.info().bucket
  ...
  info.active
  ---
-- 1500
+- 100
  ...
  info.lock
  ---
@@ -135,7 +135,7 @@ info = vshard.storage.info().bucket
  ...
  info.active
  ---
-- 1500
+- 100
  ...
  info.lock
  ---
@@ -278,7 +278,7 @@ info = vshard.storage.info().bucket
  ...
  info.active
  ---
-- 1500
+- 100
  ...
  info.lock
  ---
@@ -290,7 +290,7 @@ test_run:switch('box_2_a')
  ...
  vshard.storage.info().bucket.active
  ---
-- 500
+- 34
  ...
  test_run:switch('box_3_a')
  ---
@@ -298,7 +298,7 @@ test_run:switch('box_3_a')
  ...
  vshard.storage.info().bucket.active
  ---
-- 1000
+- 66
  ...
  --
  -- Test bucket pinning. At first, return to the default
@@ -366,7 +366,7 @@ wait_rebalancer_state('The cluster is balanced ok', 
test_run)
  ...
  vshard.storage.info().bucket.active
  ---
-- 1000
+- 67
  ...
  status = box.space._bucket.index.status
  ---
@@ -443,12 +443,12 @@ test_run:cmd("setopt delimiter ''");
  -- rebalancer will face with unreachability of the perfect
  -- balance.
  --
-for i = 1, 800 do local ok, err = vshard.storage.bucket_pin(first_id - 
1 + i) assert(ok) end
+for i = 1, 60 do local ok, err = vshard.storage.bucket_pin(first_id - 1 
+ i) assert(ok) end
  ---
  ...
  status:count({vshard.consts.BUCKET.PINNED})
  ---
-- 800
+- 60
  ...
  rs1_cfg.weight = 0.5
  ---
@@ -466,11 +466,11 @@ info = vshard.storage.info().bucket
  ...
  info.active
  ---
-- 800
+- 60
  ...
  info.pinned
  ---
-- 800
+- 60
  ...
  test_run:switch('box_2_a')
  ---
@@ -478,7 +478,7 @@ test_run:switch('box_2_a')
  ...
  vshard.storage.info().bucket.active
  ---
-- 1100
+- 70
  ...
  test_run:switch('box_3_a')
  ---
@@ -486,7 +486,7 @@ test_run:switch('box_3_a')
  ...
  vshard.storage.info().bucket.active
  ---
-- 1100
+- 70
  ...
  test_run:cmd("switch default")
  ---
diff --git a/test/rebalancer/rebalancer_lock_and_pin.test.lua 
b/test/rebalancer/rebalancer_lock_and_pin.test.lua
index afef135..eae42fb 100644
--- a/test/rebalancer/rebalancer_lock_and_pin.test.lua
+++ b/test/rebalancer/rebalancer_lock_and_pin.test.lua
@@ -16,10 +16,10 @@ util.wait_master(test_run, REPLICASET_2, 'box_2_a')
  --

  test_run:switch('box_2_a')
-vshard.storage.bucket_force_create(1501, 1500)
+vshard.storage.bucket_force_create(101, 100)

  test_run:switch('box_1_a')
-vshard.storage.bucket_force_create(1, 1500)
+vshard.storage.bucket_force_create(1, 100)

  wait_rebalancer_state('The cluster is balanced ok', test_run)

@@ -202,7 +202,7 @@ test_run:cmd("setopt delimiter ''");
  -- rebalancer will face with unreachability of the perfect
  -- balance.
  --
-for i = 1, 800 do local ok, err = vshard.storage.bucket_pin(first_id - 
1 + i) assert(ok) end
+for i = 1, 60 do local ok, err = vshard.storage.bucket_pin(first_id - 1 
+ i) assert(ok) end
  status:count({vshard.consts.BUCKET.PINNED})
  rs1_cfg.weight = 0.5
  vshard.storage.cfg(cfg, names.replica_uuid.box_1_a)
diff --git a/test/rebalancer/restart_during_rebalancing.result 
b/test/rebalancer/restart_during_rebalancing.result
index 2c2e3ec..84ef6c1 100644
--- a/test/rebalancer/restart_during_rebalancing.result
+++ b/test/rebalancer/restart_during_rebalancing.result
@@ -83,7 +83,7 @@ log = require('log')
  log.info(string.rep('a', 1000))
  ---
  ...
-for i = 1, vshard.consts.DEFAULT_BUCKET_COUNT do 
box.space._bucket:replace({i, vshard.consts.BUCKET.ACTIVE}) end
+for i = 1, 200 do box.space._bucket:replace({i, 
vshard.consts.BUCKET.ACTIVE}) end
  ---
  ...
  test_run:switch('router_1')
@@ -93,7 +93,7 @@ test_run:switch('router_1')
  util = require('rebalancer_utils')
  ---
  ...
-util.set_bucket_count(vshard.consts.DEFAULT_BUCKET_COUNT)
+util.set_bucket_count(200)
  ---
  ...
  for i = 1, 4 do vshard.router.discovery_wakeup() end
@@ -201,7 +201,7 @@ test_run:switch('router_1')
  start = fiber.time()
  ---
  ...
-while vshard.router.info().bucket.available_rw ~= 
vshard.consts.DEFAULT_BUCKET_COUNT do vshard.router.discovery_wakeup() 
fiber.sleep(0.1) end
+while vshard.router.info().bucket.available_rw ~= 200 do 
vshard.router.discovery_wakeup() fiber.sleep(0.1) end
  ---
  ...
  fiber.sleep(10 - (fiber.time() - start))
@@ -234,7 +234,7 @@ info
          uri: storage at 127.0.0.1:3305
          uuid: ad40a200-730e-401a-9400-30dbd96dedbd
        bucket:
-        available_rw: 750
+        available_rw: 50
        uuid: ffdfc117-5089-40a8-99f5-0aa914aa2275
        master: *0
      3e9bdda9-ce19-4f9f-b630-116f5dbc7fef:
@@ -244,7 +244,7 @@ info
          uri: storage at 127.0.0.1:3307
          uuid: 535df17b-c325-466c-9320-77f1190c749c
        bucket:
-        available_rw: 750
+        available_rw: 50
        uuid: 3e9bdda9-ce19-4f9f-b630-116f5dbc7fef
        master: *1
      739fe4fb-2850-4cde-9637-10150724c5eb:
@@ -254,7 +254,7 @@ info
          uri: storage at 127.0.0.1:3301
          uuid: 3e01062d-5c1b-4382-b14e-f80a517cb462
        bucket:
-        available_rw: 750
+        available_rw: 50
        uuid: 739fe4fb-2850-4cde-9637-10150724c5eb
        master: *2
      832bbba0-9699-4aa1-907d-c7c7af61f5c9:
@@ -264,14 +264,14 @@ info
          uri: storage at 127.0.0.1:3303
          uuid: 7223fc89-1a0d-480b-a33e-a8d2b117b13d
        bucket:
-        available_rw: 750
+        available_rw: 50
        uuid: 832bbba0-9699-4aa1-907d-c7c7af61f5c9
        master: *3
    bucket:
      unreachable: 0
      available_ro: 0
      unknown: 0
-    available_rw: 3000
+    available_rw: 200
    status: 0
    alerts: []
  ...
@@ -289,8 +289,8 @@ test_run:switch('fullbox_1_a')
  vshard.storage.info().bucket
  ---
  - receiving: 0
-  active: 750
-  total: 750
+  active: 50
+  total: 50
    garbage: 0
    pinned: 0
    sending: 0
@@ -310,8 +310,8 @@ test_run:switch('fullbox_2_a')
  vshard.storage.info().bucket
  ---
  - receiving: 0
-  active: 750
-  total: 750
+  active: 50
+  total: 50
    garbage: 0
    pinned: 0
    sending: 0
@@ -331,8 +331,8 @@ test_run:switch('fullbox_3_a')
  vshard.storage.info().bucket
  ---
  - receiving: 0
-  active: 750
-  total: 750
+  active: 50
+  total: 50
    garbage: 0
    pinned: 0
    sending: 0
@@ -352,8 +352,8 @@ test_run:switch('fullbox_4_a')
  vshard.storage.info().bucket
  ---
  - receiving: 0
-  active: 750
-  total: 750
+  active: 50
+  total: 50
    garbage: 0
    pinned: 0
    sending: 0
diff --git a/test/rebalancer/restart_during_rebalancing.test.lua 
b/test/rebalancer/restart_during_rebalancing.test.lua
index fdd086c..397d181 100644
--- a/test/rebalancer/restart_during_rebalancing.test.lua
+++ b/test/rebalancer/restart_during_rebalancing.test.lua
@@ -36,11 +36,11 @@ test_run:switch('fullbox_1_a')
  vshard.storage.rebalancer_disable()
  log = require('log')
  log.info(string.rep('a', 1000))
-for i = 1, vshard.consts.DEFAULT_BUCKET_COUNT do 
box.space._bucket:replace({i, vshard.consts.BUCKET.ACTIVE}) end
+for i = 1, 200 do box.space._bucket:replace({i, 
vshard.consts.BUCKET.ACTIVE}) end

  test_run:switch('router_1')
  util = require('rebalancer_utils')
-util.set_bucket_count(vshard.consts.DEFAULT_BUCKET_COUNT)
+util.set_bucket_count(200)
  for i = 1, 4 do vshard.router.discovery_wakeup() end
  util.start_loading()
  fiber.sleep(2)
@@ -105,7 +105,7 @@ while killer:status() ~= 'dead' do fiber.sleep(0.1) end
  test_run:switch('router_1')
  -- Wait until all GC, recovery-discovery finish work.
  start = fiber.time()
-while vshard.router.info().bucket.available_rw ~= 
vshard.consts.DEFAULT_BUCKET_COUNT do vshard.router.discovery_wakeup() 
fiber.sleep(0.1) end
+while vshard.router.info().bucket.available_rw ~= 200 do 
vshard.router.discovery_wakeup() fiber.sleep(0.1) end
  fiber.sleep(10 - (fiber.time() - start))
  info = vshard.router.info()
  -- Do not show concrete timeouts. They are not stable.
diff --git a/test/rebalancer/router_1.lua b/test/rebalancer/router_1.lua
index 5ad944b..5fe6dc7 100644
--- a/test/rebalancer/router_1.lua
+++ b/test/rebalancer/router_1.lua
@@ -9,6 +9,8 @@ rs_uuid = names.rs_uuid
  replica_uuid = names.replica_uuid

  box.cfg{listen = 3333}
+-- Use small number of buckets to speedup tests.
+cfg.bucket_count = 200
  vshard.router.cfg(cfg)

  require('console').listen(os.getenv('ADMIN'))
diff --git a/test/rebalancer/stress_add_remove_rs.result 
b/test/rebalancer/stress_add_remove_rs.result
index 02263ea..8a955e2 100644
--- a/test/rebalancer/stress_add_remove_rs.result
+++ b/test/rebalancer/stress_add_remove_rs.result
@@ -50,16 +50,13 @@ test_run:switch('box_2_a')
  ---
  - true
  ...
-for i = 1, 100 do box.space._bucket:replace{i, 
vshard.consts.BUCKET.ACTIVE} end
+for i = 1, 200 do box.space._bucket:replace{i, 
vshard.consts.BUCKET.ACTIVE} end
  ---
  ...
  test_run:switch('box_1_a')
  ---
  - true
  ...
-cfg.bucket_count = 100
----
-...
  vshard.storage.cfg(cfg, names.replica_uuid.box_1_a)
  ---
  ...
@@ -68,7 +65,7 @@ wait_rebalancer_state('The cluster is balanced ok', 
test_run)
  ...
  #box.space._bucket.index.status:select{vshard.consts.BUCKET.ACTIVE}
  ---
-- 50
+- 100
  ...
  --
  -- Test the first case of described above.
@@ -80,9 +77,6 @@ test_run:switch('router_1')
  util = require('rebalancer_utils')
  ---
  ...
-cfg.bucket_count = 100
----
-...
  vshard.router.cfg(cfg)
  ---
  ...
@@ -176,7 +170,7 @@ wait_rebalancer_state('The cluster is balanced ok', 
test_run)
  ...
  #box.space._bucket.index.status:select{vshard.consts.BUCKET.ACTIVE}
  ---
-- 33
+- 67
  ...
  test_run:switch('router_1')
  ---
@@ -195,7 +189,7 @@ test_run:switch('box_1_a')
  ...
  #box.space._bucket.index.status:select{vshard.consts.BUCKET.ACTIVE}
  ---
-- 33
+- 67
  ...
  check_consistency()
  ---
@@ -207,7 +201,7 @@ test_run:switch('box_2_a')
  ...
  #box.space._bucket.index.status:select{vshard.consts.BUCKET.ACTIVE}
  ---
-- 34
+- 67
  ...
  check_consistency()
  ---
@@ -219,7 +213,7 @@ test_run:switch('box_3_a')
  ...
  #box.space._bucket.index.status:select{vshard.consts.BUCKET.ACTIVE}
  ---
-- 33
+- 66
  ...
  check_consistency()
  ---
@@ -354,7 +348,7 @@ test_run:switch('box_1_a')
  ...
  #box.space._bucket.index.status:select{vshard.consts.BUCKET.ACTIVE}
  ---
-- 50
+- 100
  ...
  check_consistency()
  ---
@@ -366,7 +360,7 @@ test_run:switch('box_2_a')
  ...
  #box.space._bucket.index.status:select{vshard.consts.BUCKET.ACTIVE}
  ---
-- 50
+- 100
  ...
  check_consistency()
  ---
diff --git a/test/rebalancer/stress_add_remove_rs.test.lua 
b/test/rebalancer/stress_add_remove_rs.test.lua
index 342751e..c80df40 100644
--- a/test/rebalancer/stress_add_remove_rs.test.lua
+++ b/test/rebalancer/stress_add_remove_rs.test.lua
@@ -24,9 +24,8 @@ test_run:switch('router_1')
  -- data, that were written during the step execution.
  --
  test_run:switch('box_2_a')
-for i = 1, 100 do box.space._bucket:replace{i, 
vshard.consts.BUCKET.ACTIVE} end
+for i = 1, 200 do box.space._bucket:replace{i, 
vshard.consts.BUCKET.ACTIVE} end
  test_run:switch('box_1_a')
-cfg.bucket_count = 100
  vshard.storage.cfg(cfg, names.replica_uuid.box_1_a)
  wait_rebalancer_state('The cluster is balanced ok', test_run)
  #box.space._bucket.index.status:select{vshard.consts.BUCKET.ACTIVE}
@@ -36,7 +35,6 @@ wait_rebalancer_state('The cluster is balanced ok', 
test_run)
  --
  test_run:switch('router_1')
  util = require('rebalancer_utils')
-cfg.bucket_count = 100
  vshard.router.cfg(cfg)
  vshard.router.discovery_wakeup()
  util.start_loading()
diff --git a/test/rebalancer/stress_add_remove_several_rs.result 
b/test/rebalancer/stress_add_remove_several_rs.result
index 79639b3..d6008b8 100644
--- a/test/rebalancer/stress_add_remove_several_rs.result
+++ b/test/rebalancer/stress_add_remove_several_rs.result
@@ -54,25 +54,19 @@ test_run:switch('box_2_a')
  ---
  - true
  ...
-cfg.bucket_count = 100
----
-...
  cfg.rebalancer_max_receiving = 2
  ---
  ...
  vshard.storage.cfg(cfg, names.replica_uuid.box_2_a)
  ---
  ...
-for i = 1, 100 do box.space._bucket:replace{i, 
vshard.consts.BUCKET.ACTIVE} end
+for i = 1, 200 do box.space._bucket:replace{i, 
vshard.consts.BUCKET.ACTIVE} end
  ---
  ...
  test_run:switch('box_1_a')
  ---
  - true
  ...
-cfg.bucket_count = 100
----
-...
  cfg.rebalancer_max_receiving = 2
  ---
  ...
@@ -89,9 +83,6 @@ test_run:switch('router_1')
  util = require('rebalancer_utils')
  ---
  ...
-cfg.bucket_count = 100
----
-...
  vshard.router.cfg(cfg)
  ---
  ...
@@ -303,7 +294,7 @@ test_run:switch('box_1_a')
  ...
  #box.space._bucket.index.status:select{vshard.consts.BUCKET.ACTIVE}
  ---
-- 25
+- 50
  ...
  check_consistency()
  ---
@@ -315,7 +306,7 @@ test_run:switch('box_2_a')
  ...
  #box.space._bucket.index.status:select{vshard.consts.BUCKET.ACTIVE}
  ---
-- 25
+- 50
  ...
  check_consistency()
  ---
@@ -327,7 +318,7 @@ test_run:switch('box_3_a')
  ...
  #box.space._bucket.index.status:select{vshard.consts.BUCKET.ACTIVE}
  ---
-- 25
+- 50
  ...
  check_consistency()
  ---
@@ -339,7 +330,7 @@ test_run:switch('box_4_a')
  ...
  #box.space._bucket.index.status:select{vshard.consts.BUCKET.ACTIVE}
  ---
-- 25
+- 50
  ...
  check_consistency()
  ---
@@ -496,7 +487,7 @@ test_run:switch('box_1_a')
  ...
  #box.space._bucket.index.status:select{vshard.consts.BUCKET.ACTIVE}
  ---
-- 50
+- 100
  ...
  check_consistency()
  ---
@@ -508,7 +499,7 @@ test_run:switch('box_2_a')
  ...
  #box.space._bucket.index.status:select{vshard.consts.BUCKET.ACTIVE}
  ---
-- 50
+- 100
  ...
  check_consistency()
  ---
diff --git a/test/rebalancer/stress_add_remove_several_rs.test.lua 
b/test/rebalancer/stress_add_remove_several_rs.test.lua
index 3895363..3cc105e 100644
--- a/test/rebalancer/stress_add_remove_several_rs.test.lua
+++ b/test/rebalancer/stress_add_remove_several_rs.test.lua
@@ -27,20 +27,17 @@ test_run:switch('router_1')
  --

  test_run:switch('box_2_a')
-cfg.bucket_count = 100
  cfg.rebalancer_max_receiving = 2
  vshard.storage.cfg(cfg, names.replica_uuid.box_2_a)
-for i = 1, 100 do box.space._bucket:replace{i, 
vshard.consts.BUCKET.ACTIVE} end
+for i = 1, 200 do box.space._bucket:replace{i, 
vshard.consts.BUCKET.ACTIVE} end

  test_run:switch('box_1_a')
-cfg.bucket_count = 100
  cfg.rebalancer_max_receiving = 2
  vshard.storage.cfg(cfg, names.replica_uuid.box_1_a)
  wait_rebalancer_state('The cluster is balanced ok', test_run)

  test_run:switch('router_1')
  util = require('rebalancer_utils')
-cfg.bucket_count = 100
  vshard.router.cfg(cfg)
  vshard.router.discovery_wakeup()
  util.start_loading()
diff --git a/test/router/router.result b/test/router/router.result
index 2ee1bff..ab1808d 100644
--- a/test/router/router.result
+++ b/test/router/router.result
@@ -1057,6 +1057,17 @@ error_messages
  - - Use replica:is_connected(...) instead of replica.is_connected(...)
    - Use replica:safe_uri(...) instead of replica.safe_uri(...)
  ...
+-- gh-114: Check non-dynamic option change during reconfigure.
+non_dynamic_cfg = table.copy(cfg)
+---
+...
+non_dynamic_cfg.shard_index = 'non_default_name'
+---
+...
+util.check_error(vshard.router.cfg, non_dynamic_cfg)
+---
+- Non-dynamic option shard_index cannot be reconfigured
+...
  _ = test_run:cmd("switch default")
  ---
  ...
diff --git a/test/router/router.test.lua b/test/router/router.test.lua
index fae8e24..63616cd 100644
--- a/test/router/router.test.lua
+++ b/test/router/router.test.lua
@@ -389,6 +389,11 @@ end;
  test_run:cmd("setopt delimiter ''");
  error_messages

+-- gh-114: Check non-dynamic option change during reconfigure.
+non_dynamic_cfg = table.copy(cfg)
+non_dynamic_cfg.shard_index = 'non_default_name'
+util.check_error(vshard.router.cfg, non_dynamic_cfg)
+
  _ = test_run:cmd("switch default")
  test_run:drop_cluster(REPLICASET_2)

diff --git a/test/storage/storage.result b/test/storage/storage.result
index d0bf792..5685ccf 100644
--- a/test/storage/storage.result
+++ b/test/storage/storage.result
@@ -720,6 +720,17 @@ test_run:cmd("setopt delimiter ''");
  ---
  - true
  ...
+-- gh-114: Check non-dynamic option change during reconfigure.
+non_dynamic_cfg = table.copy(cfg)
+---
+...
+non_dynamic_cfg.bucket_count = 
require('vshard.consts').DEFAULT_BUCKET_COUNT + 1
+---
+...
+util.check_error(vshard.storage.cfg, non_dynamic_cfg, names.storage_1_a)
+---
+- Non-dynamic option bucket_count cannot be reconfigured
+...
  _ = test_run:cmd("switch default")
  ---
  ...
diff --git a/test/storage/storage.test.lua b/test/storage/storage.test.lua
index f4bbf0e..72564e1 100644
--- a/test/storage/storage.test.lua
+++ b/test/storage/storage.test.lua
@@ -177,6 +177,11 @@ for _, new_replicaset in pairs(new_replicasets) do
  end;
  test_run:cmd("setopt delimiter ''");

+-- gh-114: Check non-dynamic option change during reconfigure.
+non_dynamic_cfg = table.copy(cfg)
+non_dynamic_cfg.bucket_count = 
require('vshard.consts').DEFAULT_BUCKET_COUNT + 1
+util.check_error(vshard.storage.cfg, non_dynamic_cfg, names.storage_1_a)
+
  _ = test_run:cmd("switch default")

  test_run:drop_cluster(REPLICASET_2)
diff --git a/test/unit/config.result b/test/unit/config.result
index d3b9a23..10237ee 100644
--- a/test/unit/config.result
+++ b/test/unit/config.result
@@ -546,3 +546,17 @@ lcfg.check(cfg)['sharding']
  replica.url = old_uri
  ---
  ...
+-- gh-114: Check non-dynamic option change during reconfigure.
+cfg_with_non_default = table.copy(cfg)
+---
+...
+cfg.shard_index = nil
+---
+...
+cfg_with_non_default.shard_index = 'non_default_name'
+---
+...
+util.check_error(lcfg.check, cfg, cfg_with_non_default)
+---
+- Non-dynamic option shard_index cannot be reconfigured
+...
diff --git a/test/unit/config.test.lua b/test/unit/config.test.lua
index fd62707..3361766 100644
--- a/test/unit/config.test.lua
+++ b/test/unit/config.test.lua
@@ -217,3 +217,9 @@ lcfg.check(cfg)['sharding']
  replica.uri = 'user:password at localhost'
  lcfg.check(cfg)['sharding']
  replica.url = old_uri
+
+-- gh-114: Check non-dynamic option change during reconfigure.
+cfg_with_non_default = table.copy(cfg)
+cfg.shard_index = nil
+cfg_with_non_default.shard_index = 'non_default_name'
+util.check_error(lcfg.check, cfg, cfg_with_non_default)
diff --git a/vshard/cfg.lua b/vshard/cfg.lua
index caaf19f..d5429af 100644
--- a/vshard/cfg.lua
+++ b/vshard/cfg.lua
@@ -219,16 +219,34 @@ local cfg_template = {
      }},
  }

+--
+-- Names of options which cannot be changed during reconfigure.
+--
+local non_dynamic_options = {
+    'bucket_count', 'shard_index'
+}
+
  --
  -- Check sharding config on correctness. Check types, name and uri
--- uniqueness, master count (in each replicaset must by <= 1).
+-- uniqueness, master count (in each replicaset must be <= 1).
  --
-local function cfg_check(shard_cfg)
+local function cfg_check(shard_cfg, old_cfg)
      if type(shard_cfg) ~= 'table' then
          error('Сonfig must be map of options')
      end
      shard_cfg = table.deepcopy(shard_cfg)
      validate_config(shard_cfg, cfg_template)
+    if not old_cfg then
+        return shard_cfg
+    end
+    -- Check non-dynamic after default values are added.
+    for _, f_name in pairs(non_dynamic_options) do
+        -- New option may be added in new vshard version.
+        if shard_cfg[f_name] ~= old_cfg[f_name] then
+           error(string.format('Non-dynamic option %s ' ..
+                               'cannot be reconfigured', f_name))
+        end
+    end
      return shard_cfg
  end

diff --git a/vshard/router/init.lua b/vshard/router/init.lua
index 21093e5..fc4714d 100644
--- a/vshard/router/init.lua
+++ b/vshard/router/init.lua
@@ -457,7 +457,8 @@ end
  --------------------------------------------------------------------------------

  local function router_cfg(cfg)
-    cfg = lcfg.check(cfg)
+    cfg = lcfg.check(cfg, M.current_cfg)
+    local new_cfg = table.copy(cfg)
      if not M.replicasets then
          log.info('Starting router configuration')
      else
@@ -478,6 +479,7 @@ local function router_cfg(cfg)
      -- TODO: update existing route map in-place
      M.route_map = {}
      M.replicasets = new_replicasets
+    M.current_cfg = new_cfg
      -- Move connections from an old configuration to a new one.
      -- It must be done with no yields to prevent usage both of not
      -- fully moved old replicasets, and not fully built new ones.
diff --git a/vshard/storage/init.lua b/vshard/storage/init.lua
index c80bfbf..6e1194a 100644
--- a/vshard/storage/init.lua
+++ b/vshard/storage/init.lua
@@ -1452,7 +1452,8 @@ local function storage_cfg(cfg, this_replica_uuid)
      if this_replica_uuid == nil then
          error('Usage: cfg(configuration, this_replica_uuid)')
      end
-    cfg = lcfg.check(cfg)
+    cfg = lcfg.check(cfg, M.current_cfg)
+    local new_cfg = table.copy(cfg)
      if cfg.weights or cfg.zone then
          error('Weights and zone are not allowed for storage 
configuration')
      end
@@ -1578,6 +1579,7 @@ local function storage_cfg(cfg, this_replica_uuid)
      M.shard_index = shard_index
      M.collect_bucket_garbage_interval = collect_bucket_garbage_interval
      M.collect_lua_garbage = collect_lua_garbage
+    M.current_cfg = new_cfg

      if was_master and not is_master then
          local_on_master_disable()





More information about the Tarantool-patches mailing list