Tarantool development patches archive
 help / color / mirror / Atom feed
* [tarantool-patches] [PATCH] test: enable parallel for python tests and long
@ 2018-12-18  8:43 Sergei Voronezhskii
  2018-12-23 11:04 ` [tarantool-patches] " Alexander Turenko
  0 siblings, 1 reply; 2+ messages in thread
From: Sergei Voronezhskii @ 2018-12-18  8:43 UTC (permalink / raw)
  To: tarantool-patches; +Cc: Alexander Turenko, Kirill Yukhin

Fixed cleanup for python tests:
- box-py/iproto.test.py need to cleanup created cluster
  reproduce:
  - [box-py/iproto.test.py, null]
  - [box-py/bootstrap.test.py, null]
- box-py/boostrap.test.py should not restart server because of next
  box-py/call.test.py got error:
   `NetworkError: (104, 'Connection reset by peer')`
   at `iproto.authenticate('test', 'test')`
  reproduce:
  - [box-py/iproto.test.py, null]
  - [box-py/bootstrap.test.py, null]
  - [box-py/call.test.py, null]
- replication-py/multi.test.py should not relay on hardcoded server.id
  because previous test can create some and `id` will autoincremented,
  instead this we need to calculate vclock_diff
  reproduce:
  - [replication-py/cluster.test.py, null]
  - [replication-py/multi.test.py, null]

Part of: #3232
---
BRANCH: https://github.com/tarantool/tarantool/tree/sergw/enable-parallel-test-py-long
 test/box-py/bootstrap.test.py     |  4 +---
 test/box-py/iproto.result         |  4 ++++
 test/box-py/iproto.test.py        |  1 +
 test/box-py/suite.ini             |  2 +-
 test/engine_long/suite.ini        |  2 +-
 test/long_run-py/suite.ini        |  2 +-
 test/luajit-tap/suite.ini         |  2 +-
 test/replication-py/multi.result  | 19 +++++--------------
 test/replication-py/multi.test.py | 28 +++++++++++++++++-----------
 test/replication-py/suite.ini     |  2 +-
 test/xlog-py/suite.ini            |  2 +-
 11 files changed, 34 insertions(+), 34 deletions(-)

diff --git a/test/box-py/bootstrap.test.py b/test/box-py/bootstrap.test.py
index 9d690b03f..dba6f5ae9 100644
--- a/test/box-py/bootstrap.test.py
+++ b/test/box-py/bootstrap.test.py
@@ -9,8 +9,6 @@ cluster_uuid = yaml.load(server.admin('box.space._schema:get("cluster")',
 sys.stdout.push_filter(cluster_uuid, '<cluster uuid>')
 
 server.admin('box.internal.bootstrap()')
-server.restart()
-
 server.admin('box.space._schema:select{}')
 server.admin('box.space._cluster:select{}')
 server.admin('box.space._space:select{}')
@@ -20,4 +18,4 @@ server.admin('box.space._func:select{}')
 server.admin('box.space._priv:select{}')
 
 # Cleanup
-sys.stdout.pop_filter()
+sys.stdout.clear_all_filters()
diff --git a/test/box-py/iproto.result b/test/box-py/iproto.result
index 37c0adce5..eb84eaf1c 100644
--- a/test/box-py/iproto.result
+++ b/test/box-py/iproto.result
@@ -137,6 +137,10 @@ space:drop()
 space2:drop()
 ---
 ...
+box.space._cluster:delete(2)
+---
+- [2, '0d5bd431-7f3e-4695-a5c2-82de0a9cbc95']
+...
 space = box.schema.create_space('gh1280', { engine = 'vinyl' })
 ---
 ...
diff --git a/test/box-py/iproto.test.py b/test/box-py/iproto.test.py
index 81cdddb61..6bcb027bc 100644
--- a/test/box-py/iproto.test.py
+++ b/test/box-py/iproto.test.py
@@ -317,6 +317,7 @@ c.close()
 
 admin("space:drop()")
 admin("space2:drop()")
+admin("box.space._cluster:delete(2)")
 
 #
 # gh-1280 Segmentation fault on space.select(tuple()) or space.select([2])
diff --git a/test/box-py/suite.ini b/test/box-py/suite.ini
index d7d512b0d..18737e48f 100644
--- a/test/box-py/suite.ini
+++ b/test/box-py/suite.ini
@@ -4,4 +4,4 @@ description = legacy python tests
 script = box.lua
 lua_libs = lua/fiber.lua lua/fifo.lua
 use_unix_sockets = True
-is_parallel = False
+is_parallel = True
diff --git a/test/engine_long/suite.ini b/test/engine_long/suite.ini
index a46c9535f..66df26715 100644
--- a/test/engine_long/suite.ini
+++ b/test/engine_long/suite.ini
@@ -6,4 +6,4 @@ long_run =  delete_replace_update.test.lua delete_insert.test.lua
 lua_libs = suite.lua
 use_unix_sockets = True
 config = engine.cfg
-is_parallel = False
+is_parallel = True
diff --git a/test/long_run-py/suite.ini b/test/long_run-py/suite.ini
index 251f88af6..9050f9b77 100644
--- a/test/long_run-py/suite.ini
+++ b/test/long_run-py/suite.ini
@@ -7,4 +7,4 @@ valgrind_disabled =
 release_disabled =
 lua_libs = suite.lua
 use_unix_sockets = True
-is_parallel = False
+is_parallel = True
diff --git a/test/luajit-tap/suite.ini b/test/luajit-tap/suite.ini
index d5b1aa798..80899270e 100644
--- a/test/luajit-tap/suite.ini
+++ b/test/luajit-tap/suite.ini
@@ -1,4 +1,4 @@
 [default]
 core = app
 description = Luajit tests
-is_parallel = False
+is_parallel = True
diff --git a/test/replication-py/multi.result b/test/replication-py/multi.result
index b4f16b699..2084f763a 100644
--- a/test/replication-py/multi.result
+++ b/test/replication-py/multi.result
@@ -17,24 +17,15 @@ Make a full mesh
 server 1 connected
 server 1 connected
 server 1 connected
-box.info.vclock
----
-- {1: 4}
-...
+vclock_diff: 2
 server 2 connected
 server 2 connected
 server 2 connected
-box.info.vclock
----
-- {1: 4}
-...
+vclock_diff: 2
 server 3 connected
 server 3 connected
 server 3 connected
-box.info.vclock
----
-- {1: 4}
-...
+vclock_diff: 2
 done
 ----------------------------------------------------------------------
 Test inserts
@@ -54,8 +45,8 @@ Insert records
 inserted 60 records
 
 Synchronize
-server 3 done
-server 3 done
+server 1 done
+server 2 done
 server 3 done
 done
 
diff --git a/test/replication-py/multi.test.py b/test/replication-py/multi.test.py
index 224332266..3394d47be 100644
--- a/test/replication-py/multi.test.py
+++ b/test/replication-py/multi.test.py
@@ -10,10 +10,14 @@ ROW_N = REPLICA_N * 20
 
 # master server
 master = server
+
 master.admin("fiber = require('fiber')")
 master.admin("box.schema.user.grant('guest', 'replication')")
 master.admin("box.schema.user.grant('guest', 'execute', 'universe')")
 
+# Get vclock on master
+vclock_cnt = yaml.load(master.admin("box.info.vclock[1]", silent = True))[0]
+
 print '----------------------------------------------------------------------'
 print 'Bootstrap replicas'
 print '----------------------------------------------------------------------'
@@ -48,14 +52,16 @@ for server in cluster:
     server.iproto.py_con.eval("box.cfg { replication = ... }", [sources])
 
 # Wait connections to establish
-for server in cluster:
+for sid, server in enumerate(cluster, 1):
     for server2 in cluster:
         server.iproto.py_con.eval("""
             while #box.info.vclock[...] ~= nil do
                 fiber.sleep(0.01)
             end;""", server2.id)
-        print 'server', server.id, "connected"
-    server.admin("box.info.vclock")
+        print 'server', sid, "connected"
+    vclock_new = yaml.load(server.admin("box.info.vclock[1]", silent = True))[0]
+    print "vclock_diff: {}".format(vclock_new - vclock_cnt)
+
 
 print 'done'
 
@@ -68,9 +74,9 @@ master.admin("_ = box.schema.space.create('test')")
 master.admin("_ = box.space.test:create_index('primary')")
 master_lsn = master.get_lsn(master.id)
 # Wait changes to propagate to replicas
-for server in cluster:
+for sid, server in enumerate(cluster, 1):
     server.wait_lsn(master.id, master_lsn)
-    print 'server', server.id, 'is ok'
+    print 'server', sid, 'is ok'
 print
 
 print 'Insert records'
@@ -81,17 +87,17 @@ print 'inserted %d records' % ROW_N
 print
 
 print 'Synchronize'
-for server1 in cluster:
+for sid, server1 in enumerate(cluster, 1):
     for server2 in cluster:
         server1.wait_lsn(server2.id, server2.get_lsn(server2.id))
-    print 'server', server.id, 'done'
+    print 'server', sid, 'done'
 print 'done'
 print
 
 print 'Check data'
-for server in cluster:
+for sid, server in enumerate(cluster, 1):
     cnt = yaml.load(server.admin("box.space.test:len()", silent = True))[0]
-    print 'server', server.id, 'is', cnt == ROW_N and 'ok' or 'not ok'
+    print 'server', sid, 'is', cnt == ROW_N and 'ok' or 'not ok'
 print 'Done'
 print
 
@@ -100,9 +106,9 @@ print '----------------------------------------------------------------------'
 print 'Cleanup'
 print '----------------------------------------------------------------------'
 
-for server in cluster:
+for sid, server in enumerate(cluster, 1):
     server.stop()
-    print 'server', server.id, 'done'
+    print 'server', sid, 'done'
 print
 
 master.cleanup()
diff --git a/test/replication-py/suite.ini b/test/replication-py/suite.ini
index f0ae98896..df2dd8ef0 100644
--- a/test/replication-py/suite.ini
+++ b/test/replication-py/suite.ini
@@ -2,4 +2,4 @@
 core = tarantool
 script =  master.lua
 description = tarantool/box, replication
-is_parallel = False
+is_parallel = True
diff --git a/test/xlog-py/suite.ini b/test/xlog-py/suite.ini
index d7d512b0d..18737e48f 100644
--- a/test/xlog-py/suite.ini
+++ b/test/xlog-py/suite.ini
@@ -4,4 +4,4 @@ description = legacy python tests
 script = box.lua
 lua_libs = lua/fiber.lua lua/fifo.lua
 use_unix_sockets = True
-is_parallel = False
+is_parallel = True
-- 
2.18.0

^ permalink raw reply	[flat|nested] 2+ messages in thread

* [tarantool-patches] Re: [PATCH] test: enable parallel for python tests and long
  2018-12-18  8:43 [tarantool-patches] [PATCH] test: enable parallel for python tests and long Sergei Voronezhskii
@ 2018-12-23 11:04 ` Alexander Turenko
  0 siblings, 0 replies; 2+ messages in thread
From: Alexander Turenko @ 2018-12-23 11:04 UTC (permalink / raw)
  To: Sergei Voronezhskii; +Cc: tarantool-patches, Kirill Yukhin

Hi!

See comments below and fixups on the
sergw/enable-parallel-test-py-long-fixups branch.

I tested it with the command below and after the fixup it passes.

```
$ ./test-run.py --long -- $(for i in $(seq 1 10); do echo -n "-py/ "; done)
```

WBR, Alexander Turenko.

On Tue, Dec 18, 2018 at 11:43:00AM +0300, Sergei Voronezhskii wrote:
> Fixed cleanup for python tests:
> - box-py/iproto.test.py need to cleanup created cluster
>   reproduce:
>   - [box-py/iproto.test.py, null]
>   - [box-py/bootstrap.test.py, null]
> - box-py/boostrap.test.py should not restart server because of next
>   box-py/call.test.py got error:
>    `NetworkError: (104, 'Connection reset by peer')`
>    at `iproto.authenticate('test', 'test')`
>   reproduce:
>   - [box-py/iproto.test.py, null]
>   - [box-py/bootstrap.test.py, null]
>   - [box-py/call.test.py, null]
> - replication-py/multi.test.py should not relay on hardcoded server.id

relay -> rely

Why it should not? You cleaned up _cluster in box-py/iproto.test.py.
Why don't clean up it in replication-py/cluster.test.py too?

>   because previous test can create some and `id` will autoincremented,
>   instead this we need to calculate vclock_diff

You use hardcoded server id 1. 'previous test can create some' -- some
what? I suppose you are about using vclock difference instead of
absolute value, but the message states something different.

>   reproduce:
>   - [replication-py/cluster.test.py, null]
>   - [replication-py/multi.test.py, null]
> 
> Part of: #3232
> ---
> BRANCH: https://github.com/tarantool/tarantool/tree/sergw/enable-parallel-test-py-long
>  test/box-py/bootstrap.test.py     |  4 +---
>  test/box-py/iproto.result         |  4 ++++
>  test/box-py/iproto.test.py        |  1 +
>  test/box-py/suite.ini             |  2 +-
>  test/engine_long/suite.ini        |  2 +-
>  test/long_run-py/suite.ini        |  2 +-
>  test/luajit-tap/suite.ini         |  2 +-
>  test/replication-py/multi.result  | 19 +++++--------------
>  test/replication-py/multi.test.py | 28 +++++++++++++++++-----------
>  test/replication-py/suite.ini     |  2 +-
>  test/xlog-py/suite.ini            |  2 +-
>  11 files changed, 34 insertions(+), 34 deletions(-)
> 
> diff --git a/test/box-py/bootstrap.test.py b/test/box-py/bootstrap.test.py
> index 9d690b03f..dba6f5ae9 100644
> --- a/test/box-py/bootstrap.test.py
> +++ b/test/box-py/bootstrap.test.py
> @@ -9,8 +9,6 @@ cluster_uuid = yaml.load(server.admin('box.space._schema:get("cluster")',
>  sys.stdout.push_filter(cluster_uuid, '<cluster uuid>')
>  
>  server.admin('box.internal.bootstrap()')
> -server.restart()
> -

I suppose now the test case will not test what it was intended. Please,
implement waiting for the server startup.

You should have strong opinion why this will not broke the test case
before propose such kind of things in a patch. When you have this
opinion it is easy to write it in the commit message.

I don't sure whether it really brokes the test case, but I don't see any
justification in the commit message. So I need to investigate the
problem from scratch to prove it is good or bad. And so what gain we get
from your work?

The code cost is zero w/o investigation. The investigation result was
not shared. I don't understand why do you behave in this way. Again and
again.

>  server.admin('box.space._schema:select{}')
>  server.admin('box.space._cluster:select{}')
>  server.admin('box.space._space:select{}')
> @@ -20,4 +18,4 @@ server.admin('box.space._func:select{}')
>  server.admin('box.space._priv:select{}')
>  
>  # Cleanup
> -sys.stdout.pop_filter()
> +sys.stdout.clear_all_filters()

Two filters was pushed, two should be popped, I think.

> diff --git a/test/replication-py/multi.test.py b/test/replication-py/multi.test.py
> index 224332266..3394d47be 100644
> --- a/test/replication-py/multi.test.py
> +++ b/test/replication-py/multi.test.py
> @@ -10,10 +10,14 @@ ROW_N = REPLICA_N * 20
>  
>  # master server
>  master = server
> +
>  master.admin("fiber = require('fiber')")
>  master.admin("box.schema.user.grant('guest', 'replication')")
>  master.admin("box.schema.user.grant('guest', 'execute', 'universe')")
>  
> +# Get vclock on master
> +vclock_cnt = yaml.load(master.admin("box.info.vclock[1]", silent = True))[0]
> +
>  print '----------------------------------------------------------------------'
>  print 'Bootstrap replicas'
>  print '----------------------------------------------------------------------'
> @@ -48,14 +52,16 @@ for server in cluster:
>      server.iproto.py_con.eval("box.cfg { replication = ... }", [sources])
>  
>  # Wait connections to establish
> -for server in cluster:
> +for sid, server in enumerate(cluster, 1):
>      for server2 in cluster:
>          server.iproto.py_con.eval("""
>              while #box.info.vclock[...] ~= nil do
>                  fiber.sleep(0.01)
>              end;""", server2.id)
> -        print 'server', server.id, "connected"
> -    server.admin("box.info.vclock")
> +        print 'server', sid, "connected"
> +    vclock_new = yaml.load(server.admin("box.info.vclock[1]", silent = True))[0]
> +    print "vclock_diff: {}".format(vclock_new - vclock_cnt)
> +
>  

Re server.id -> sid: Please, cleanup _cluster in
replication-py/cluster.test.py instead. The same for all such changes
below.

Re vclocks: Why this vector clock difference is a scalar value? Why it
is always for server id 1? Please, describe the problem first. It is
that cluster.test.py moves vclock? I think we can just don't print it,
because it was informational thing.

^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2018-12-23 11:04 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-12-18  8:43 [tarantool-patches] [PATCH] test: enable parallel for python tests and long Sergei Voronezhskii
2018-12-23 11:04 ` [tarantool-patches] " Alexander Turenko

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox