Tarantool development patches archive
 help / color / mirror / Atom feed
* [Tarantool-patches] [PATCH] memtx: allow quota overuse for truncation
@ 2020-12-11 15:37 Ilya Kosarev
  2020-12-14 23:41 ` Vladislav Shpilevoy
  0 siblings, 1 reply; 8+ messages in thread
From: Ilya Kosarev @ 2020-12-11 15:37 UTC (permalink / raw)
  To: v.shpilevoy, alyapunov; +Cc: tarantool-patches

Trying to perform space:truncate() while reaching memtx_memory limit
we could experience slab allocator failure. This behavior seems to be
quite surprising for users. Now we are allowing to overuse memtx quota
for tuples in space _truncate using flag in struct quota.
Truncate tuples are only being allocated with large slabs using malloc
so that the quota can shrink back when they are freed.

Closes #3807
---
Branch: https://github.com/tarantool/tarantool/tree/i.kosarev/gh-3807-safe-alloc-on-truncation
Issue: https://github.com/tarantool/tarantool/issues/3807

 src/box/memtx_engine.c                  |  25 ++++++
 src/box/memtx_engine.h                  |   2 +
 src/box/memtx_space.c                   |  36 ++++----
 test/box/gh-3807-truncate-fail.result   | 104 ++++++++++++++++++++++++
 test/box/gh-3807-truncate-fail.test.lua |  55 +++++++++++++
 test/box/low_memory.lua                 |   8 ++
 6 files changed, 215 insertions(+), 15 deletions(-)
 create mode 100644 test/box/gh-3807-truncate-fail.result
 create mode 100644 test/box/gh-3807-truncate-fail.test.lua
 create mode 100644 test/box/low_memory.lua

diff --git a/src/box/memtx_engine.c b/src/box/memtx_engine.c
index db2bb23333..2f061d9f9c 100644
--- a/src/box/memtx_engine.c
+++ b/src/box/memtx_engine.c
@@ -1306,6 +1306,31 @@ struct tuple_format_vtab memtx_tuple_format_vtab = {
 	memtx_tuple_chunk_new,
 };
 
+struct tuple *
+memtx_truncate_tuple_new(struct tuple_format *format,
+                         const char *data, const char *end)
+{
+	quota_disable(&((struct memtx_engine *)format->engine)->quota);
+	struct tuple *tuple = memtx_tuple_new(format, data, end);
+	quota_enable(&((struct memtx_engine *)format->engine)->quota);
+	return tuple;
+}
+
+void
+memtx_truncate_tuple_delete(struct tuple_format *format, struct tuple *tuple)
+{
+	quota_disable(&((struct memtx_engine *)format->engine)->quota);
+	memtx_tuple_delete(format, tuple);
+	quota_enable(&((struct memtx_engine *)format->engine)->quota);
+}
+
+struct tuple_format_vtab memtx_truncate_tuple_format_vtab = {
+	memtx_truncate_tuple_delete,
+	memtx_truncate_tuple_new,
+	metmx_tuple_chunk_delete,
+	memtx_tuple_chunk_new,
+};
+
 /**
  * Allocate a block of size MEMTX_EXTENT_SIZE for memtx index
  */
diff --git a/src/box/memtx_engine.h b/src/box/memtx_engine.h
index 8b380bf3cc..ddd33f8942 100644
--- a/src/box/memtx_engine.h
+++ b/src/box/memtx_engine.h
@@ -255,6 +255,8 @@ memtx_tuple_delete(struct tuple_format *format, struct tuple *tuple);
 
 /** Tuple format vtab for memtx engine. */
 extern struct tuple_format_vtab memtx_tuple_format_vtab;
+/** Tuple format vtab for space _truncate. */
+extern struct tuple_format_vtab memtx_truncate_tuple_format_vtab;
 
 enum {
 	MEMTX_EXTENT_SIZE = 16 * 1024,
diff --git a/src/box/memtx_space.c b/src/box/memtx_space.c
index 73b4c450eb..cc431ea816 100644
--- a/src/box/memtx_space.c
+++ b/src/box/memtx_space.c
@@ -327,8 +327,9 @@ memtx_space_execute_replace(struct space *space, struct txn *txn,
 	struct memtx_space *memtx_space = (struct memtx_space *)space;
 	struct txn_stmt *stmt = txn_current_stmt(txn);
 	enum dup_replace_mode mode = dup_replace_mode(request->type);
-	stmt->new_tuple = memtx_tuple_new(space->format, request->tuple,
-					  request->tuple_end);
+	stmt->new_tuple = space->format->vtab.tuple_new(space->format,
+							request->tuple,
+							request->tuple_end);
 	if (stmt->new_tuple == NULL)
 		return -1;
 	tuple_ref(stmt->new_tuple);
@@ -412,8 +413,8 @@ memtx_space_execute_update(struct space *space, struct txn *txn,
 	if (new_data == NULL)
 		return -1;
 
-	stmt->new_tuple = memtx_tuple_new(format, new_data,
-					  new_data + new_size);
+	stmt->new_tuple = format->vtab.tuple_new(format, new_data,
+						 new_data + new_size);
 	if (stmt->new_tuple == NULL)
 		return -1;
 	tuple_ref(stmt->new_tuple);
@@ -483,8 +484,9 @@ memtx_space_execute_upsert(struct space *space, struct txn *txn,
 					  format, request->index_base) != 0) {
 			return -1;
 		}
-		stmt->new_tuple = memtx_tuple_new(format, request->tuple,
-						  request->tuple_end);
+		stmt->new_tuple = format->vtab.tuple_new(format,
+							 request->tuple,
+							 request->tuple_end);
 		if (stmt->new_tuple == NULL)
 			return -1;
 		tuple_ref(stmt->new_tuple);
@@ -507,8 +509,8 @@ memtx_space_execute_upsert(struct space *space, struct txn *txn,
 		if (new_data == NULL)
 			return -1;
 
-		stmt->new_tuple = memtx_tuple_new(format, new_data,
-						  new_data + new_size);
+		stmt->new_tuple = format->vtab.tuple_new(format, new_data,
+							 new_data + new_size);
 		if (stmt->new_tuple == NULL)
 			return -1;
 		tuple_ref(stmt->new_tuple);
@@ -559,14 +561,15 @@ memtx_space_ephemeral_replace(struct space *space, const char *tuple,
 				      const char *tuple_end)
 {
 	struct memtx_space *memtx_space = (struct memtx_space *)space;
-	struct tuple *new_tuple = memtx_tuple_new(space->format, tuple,
-						  tuple_end);
+	struct tuple *new_tuple = space->format->vtab.tuple_new(space->format,
+								tuple,
+								tuple_end);
 	if (new_tuple == NULL)
 		return -1;
 	struct tuple *old_tuple;
 	if (memtx_space->replace(space, NULL, new_tuple,
 				 DUP_REPLACE_OR_INSERT, &old_tuple) != 0) {
-		memtx_tuple_delete(space->format, new_tuple);
+		space->format->vtab.tuple_delete(space->format, new_tuple);
 		return -1;
 	}
 	if (old_tuple != NULL)
@@ -1207,11 +1210,14 @@ memtx_space_new(struct memtx_engine *memtx,
 		free(memtx_space);
 		return NULL;
 	}
+	struct tuple_format_vtab *vtab = &memtx_tuple_format_vtab;
+	if (def->id == BOX_TRUNCATE_ID)
+		vtab = &memtx_truncate_tuple_format_vtab;
 	struct tuple_format *format =
-		tuple_format_new(&memtx_tuple_format_vtab, memtx, keys, key_count,
-				 def->fields, def->field_count,
-				 def->exact_field_count, def->dict,
-				 def->opts.is_temporary, def->opts.is_ephemeral);
+		tuple_format_new(vtab, memtx, keys, key_count, def->fields,
+				 def->field_count, def->exact_field_count,
+				 def->dict, def->opts.is_temporary,
+				 def->opts.is_ephemeral);
 	if (format == NULL) {
 		free(memtx_space);
 		return NULL;
diff --git a/test/box/gh-3807-truncate-fail.result b/test/box/gh-3807-truncate-fail.result
new file mode 100644
index 0000000000..1a8f961aef
--- /dev/null
+++ b/test/box/gh-3807-truncate-fail.result
@@ -0,0 +1,104 @@
+-- test-run result file version 2
+test_run = require('test_run').new()
+ | ---
+ | ...
+
+-- Creating tarantool with 32 megabytes memory to make truncate fail easier.
+test_run:cmd("create server master with script='box/low_memory.lua'")
+ | ---
+ | - true
+ | ...
+test_run:cmd('start server master')
+ | ---
+ | - true
+ | ...
+test_run:cmd("switch master")
+ | ---
+ | - true
+ | ...
+
+
+test_run:cmd("setopt delimiter ';'")
+ | ---
+ | - true
+ | ...
+function create_space(name)
+    local space = box.schema.create_space(name)
+    space:format({
+        { name = "id",  type = "unsigned" },
+        { name = "val", type = "str" }
+    })
+    space:create_index('primary', { parts = { 'id' } })
+    return space
+end;
+ | ---
+ | ...
+
+function insert(space, i)
+    space:insert{ i, string.rep('-', 256) }
+end;
+ | ---
+ | ...
+
+function fill_space(space, start)
+    local err = nil
+    local i = start
+    while err == nil do _, err = pcall(insert, space, i) i = i + 1 end
+end;
+ | ---
+ | ...
+
+-- Creating space if possible. If the space creation fails, stacking
+-- some more tuples into the test space to exhaust slabs.
+-- Then trying to truncate all spaces except the filled one.
+-- Truncate shouldn't fail.
+function stress_truncation(i)
+    local res, space = pcall(create_space, 'test' .. tostring(i))
+    if res then spaces[i] = space return end
+    fill_space(box.space.test, box.space.test:len())
+    for _, s in pairs(spaces) do s:truncate() end
+end;
+ | ---
+ | ...
+test_run:cmd("setopt delimiter ''");
+ | ---
+ | - true
+ | ...
+
+
+_ = create_space('test')
+ | ---
+ | ...
+fill_space(box.space.test, 0)
+ | ---
+ | ...
+
+spaces = {}
+ | ---
+ | ...
+counter = 0
+ | ---
+ | ...
+status, res = true, nil
+ | ---
+ | ...
+while status and counter < 42 do status, res = pcall(stress_truncation, counter) counter = counter + 1 end
+ | ---
+ | ...
+status
+ | ---
+ | - true
+ | ...
+res
+ | ---
+ | - null
+ | ...
+
+-- Cleanup.
+test_run:cmd('switch default')
+ | ---
+ | - true
+ | ...
+test_run:drop_cluster({'master'})
+ | ---
+ | ...
diff --git a/test/box/gh-3807-truncate-fail.test.lua b/test/box/gh-3807-truncate-fail.test.lua
new file mode 100644
index 0000000000..1e2dd5d750
--- /dev/null
+++ b/test/box/gh-3807-truncate-fail.test.lua
@@ -0,0 +1,55 @@
+test_run = require('test_run').new()
+
+-- Creating tarantool with 32 megabytes memory to make truncate fail easier.
+test_run:cmd("create server master with script='box/low_memory.lua'")
+test_run:cmd('start server master')
+test_run:cmd("switch master")
+
+
+test_run:cmd("setopt delimiter ';'")
+function create_space(name)
+    local space = box.schema.create_space(name)
+    space:format({
+        { name = "id",  type = "unsigned" },
+        { name = "val", type = "str" }
+    })
+    space:create_index('primary', { parts = { 'id' } })
+    return space
+end;
+
+function insert(space, i)
+    space:insert{ i, string.rep('-', 256) }
+end;
+
+function fill_space(space, start)
+    local err = nil
+    local i = start
+    while err == nil do _, err = pcall(insert, space, i) i = i + 1 end
+end;
+
+-- Creating space if possible. If the space creation fails, stacking
+-- some more tuples into the test space to exhaust slabs.
+-- Then trying to truncate all spaces except the filled one.
+-- Truncate shouldn't fail.
+function stress_truncation(i)
+    local res, space = pcall(create_space, 'test' .. tostring(i))
+    if res then spaces[i] = space return end
+    fill_space(box.space.test, box.space.test:len())
+    for _, s in pairs(spaces) do s:truncate() end
+end;
+test_run:cmd("setopt delimiter ''");
+
+
+_ = create_space('test')
+fill_space(box.space.test, 0)
+
+spaces = {}
+counter = 0
+status, res = true, nil
+while status and counter < 42 do status, res = pcall(stress_truncation, counter) counter = counter + 1 end
+status
+res
+
+-- Cleanup.
+test_run:cmd('switch default')
+test_run:drop_cluster({'master'})
diff --git a/test/box/low_memory.lua b/test/box/low_memory.lua
new file mode 100644
index 0000000000..46fd26d1b6
--- /dev/null
+++ b/test/box/low_memory.lua
@@ -0,0 +1,8 @@
+#!/usr/bin/env tarantool
+os = require('os')
+box.cfg({
+    listen              = os.getenv("LISTEN"),
+    memtx_memory        = 32 * 1024 * 1024,
+})
+
+require('console').listen(os.getenv('ADMIN'))
-- 
2.17.1

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [Tarantool-patches] [PATCH] memtx: allow quota overuse for truncation
  2020-12-11 15:37 [Tarantool-patches] [PATCH] memtx: allow quota overuse for truncation Ilya Kosarev
@ 2020-12-14 23:41 ` Vladislav Shpilevoy
  2020-12-16 23:43   ` Ilya Kosarev
  0 siblings, 1 reply; 8+ messages in thread
From: Vladislav Shpilevoy @ 2020-12-14 23:41 UTC (permalink / raw)
  To: Ilya Kosarev, alyapunov; +Cc: tarantool-patches

Thanks for the patch!

See 2 comments below.

On 11.12.2020 16:37, Ilya Kosarev wrote:
> Trying to perform space:truncate() while reaching memtx_memory limit
> we could experience slab allocator failure. This behavior seems to be
> quite surprising for users. Now we are allowing to overuse memtx quota
> for tuples in space _truncate using flag in struct quota.
> Truncate tuples are only being allocated with large slabs using malloc
> so that the quota can shrink back when they are freed.

1. 3807 is also about delete. Why didn't you patch it too? AFAIR, the
fix I proposed was easy - in case something inside memtx_space_execute_delete()
fails due to OOM, we disable quota, try again, and enable quota. In fact,
it would be even simpler than the truncation fix, I suppose.

> Closes #3807
> ---
> Branch: https://github.com/tarantool/tarantool/tree/i.kosarev/gh-3807-safe-alloc-on-truncation
> Issue: https://github.com/tarantool/tarantool/issues/3807
> 
> diff --git a/src/box/memtx_space.c b/src/box/memtx_space.c
> index 73b4c450eb..cc431ea816 100644
> --- a/src/box/memtx_space.c
> +++ b/src/box/memtx_space.c
> @@ -327,8 +327,9 @@ memtx_space_execute_replace(struct space *space, struct txn *txn,
>  	struct memtx_space *memtx_space = (struct memtx_space *)space;
>  	struct txn_stmt *stmt = txn_current_stmt(txn);
>  	enum dup_replace_mode mode = dup_replace_mode(request->type);
> -	stmt->new_tuple = memtx_tuple_new(space->format, request->tuple,
> -					  request->tuple_end);
> +	stmt->new_tuple = space->format->vtab.tuple_new(space->format,
> +							request->tuple,
> +							request->tuple_end);

2. Seems like an expensive change. You added +2 pointer
dereferences to a hot path.

Last time when you worked on that I proposed to make space_truncate
use box.begin + quota disable + box_upsert + quota enable + commit.
So there are no yields between quota enable and disable. And no changes
in the code not related to truncation. Why didn't it work?

>  	if (stmt->new_tuple == NULL)
>  		return -1;
>  	tuple_ref(stmt->new_tuple);

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [Tarantool-patches] [PATCH] memtx: allow quota overuse for truncation
  2020-12-14 23:41 ` Vladislav Shpilevoy
@ 2020-12-16 23:43   ` Ilya Kosarev
  2020-12-20 16:13     ` Vladislav Shpilevoy
  0 siblings, 1 reply; 8+ messages in thread
From: Ilya Kosarev @ 2020-12-16 23:43 UTC (permalink / raw)
  To: Vladislav Shpilevoy; +Cc: tarantool-patches

[-- Attachment #1: Type: text/plain, Size: 3036 bytes --]


Hi!
 
Thanks for your review.
 
2 answers below. 
>Вторник, 15 декабря 2020, 2:41 +03:00 от Vladislav Shpilevoy <v.shpilevoy@tarantool.org>:
> 
>Thanks for the patch!
>
>See 2 comments below.
>
>On 11.12.2020 16:37, Ilya Kosarev wrote:
>> Trying to perform space:truncate() while reaching memtx_memory limit
>> we could experience slab allocator failure. This behavior seems to be
>> quite surprising for users. Now we are allowing to overuse memtx quota
>> for tuples in space _truncate using flag in struct quota.
>> Truncate tuples are only being allocated with large slabs using malloc
>> so that the quota can shrink back when they are freed.
>
>1. 3807 is also about delete. Why didn't you patch it too? AFAIR, the
>fix I proposed was easy - in case something inside memtx_space_execute_delete()
>fails due to OOM, we disable quota, try again, and enable quota. In fact,
>it would be even simpler than the truncation fix, I suppose.
There is no reproduce for delete() failure. And we discussed back in February
that delete() has to work through reserves. I see that there is theoretical possibility
for reserves to be exhausted, but i can’t see the exact sequence of actions to reach it.
Thus i think it is not ok to use quota disabling for delete().
>
>> Closes #3807
>> ---
>> Branch:  https://github.com/tarantool/tarantool/tree/i.kosarev/gh-3807-safe-alloc-on-truncation
>> Issue:  https://github.com/tarantool/tarantool/issues/3807
>>
>> diff --git a/src/box/memtx_space.c b/src/box/memtx_space.c
>> index 73b4c450eb..cc431ea816 100644
>> --- a/src/box/memtx_space.c
>> +++ b/src/box/memtx_space.c
>> @@ -327,8 +327,9 @@ memtx_space_execute_replace(struct space *space, struct txn *txn,
>> struct memtx_space *memtx_space = (struct memtx_space *)space;
>> struct txn_stmt *stmt = txn_current_stmt(txn);
>> enum dup_replace_mode mode = dup_replace_mode(request->type);
>> - stmt->new_tuple = memtx_tuple_new(space->format, request->tuple,
>> - request->tuple_end);
>> + stmt->new_tuple = space->format->vtab.tuple_new(space->format,
>> + request->tuple,
>> + request->tuple_end);
>
>2. Seems like an expensive change. You added +2 pointer
>dereferences to a hot path.
>
>Last time when you worked on that I proposed to make space_truncate
>use box.begin + quota disable + box_upsert + quota enable + commit.
>So there are no yields between quota enable and disable. And no changes
>in the code not related to truncation. Why didn't it work?
That does work, but then it was also discussed that we should use only
huge slabs on disabled quota, so that quota can shrink back when freed.
It also requires specific freeing for those tuples, that is why both new() and
delete() are now used from vtab, which has specific version for truncate space.
There is an option to use only specific delete() from vtab and for allocation
patch space_truncate() instead of new(), although it looks more strange.
>
>> if (stmt->new_tuple == NULL)
>> return -1;
>> tuple_ref(stmt->new_tuple); 
 
 
--
Ilya Kosarev
 

[-- Attachment #2: Type: text/html, Size: 4375 bytes --]

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [Tarantool-patches] [PATCH] memtx: allow quota overuse for truncation
  2020-12-16 23:43   ` Ilya Kosarev
@ 2020-12-20 16:13     ` Vladislav Shpilevoy
  2020-12-21 18:15       ` Ilya Kosarev
  0 siblings, 1 reply; 8+ messages in thread
From: Vladislav Shpilevoy @ 2020-12-20 16:13 UTC (permalink / raw)
  To: Ilya Kosarev; +Cc: tarantool-patches

>     > diff --git a/src/box/memtx_space.c b/src/box/memtx_space.c
>     > index 73b4c450eb..cc431ea816 100644
>     > --- a/src/box/memtx_space.c
>     > +++ b/src/box/memtx_space.c
>     > @@ -327,8 +327,9 @@ memtx_space_execute_replace(struct space *space, struct txn *txn,
>     > struct memtx_space *memtx_space = (struct memtx_space *)space;
>     > struct txn_stmt *stmt = txn_current_stmt(txn);
>     > enum dup_replace_mode mode = dup_replace_mode(request->type);
>     > - stmt->new_tuple = memtx_tuple_new(space->format, request->tuple,
>     > - request->tuple_end);
>     > + stmt->new_tuple = space->format->vtab.tuple_new(space->format,
>     > + request->tuple,
>     > + request->tuple_end);
> 
>     2. Seems like an expensive change. You added +2 pointer
>     dereferences to a hot path.
> 
>     Last time when you worked on that I proposed to make space_truncate
>     use box.begin + quota disable + box_upsert + quota enable + commit.
>     So there are no yields between quota enable and disable. And no changes
>     in the code not related to truncation. Why didn't it work?
> 
> That does work, but then it was also discussed that we should use only
> huge slabs on disabled quota, so that quota can shrink back when freed.

Such discussion results are far from something trivial and obvious. Please,
specify that in the commit message and in the comments if possible.

Also I don't understand why can't you do that on the slab arena
level. Start allocating only 'huge' slabs if the quota is full, but
disabled.

> It also requires specific freeing for those tuples, that is why both new() and
> delete() are now used from vtab, which has specific version for truncate space.

Why do you need special freeing? memtx_tuple_delete() does not allocate
anything.

> There is an option to use only specific delete() from vtab and for allocation
> patch space_truncate() instead of new(), although it looks more strange.

Didn't understand what you mean.

> 
> 
>     > if (stmt->new_tuple == NULL)
>     > return -1;
>     > tuple_ref(stmt->new_tuple);
> 
>  
>  
> --
> Ilya Kosarev
>  

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [Tarantool-patches] [PATCH] memtx: allow quota overuse for truncation
  2020-12-20 16:13     ` Vladislav Shpilevoy
@ 2020-12-21 18:15       ` Ilya Kosarev
  2020-12-22 13:28         ` Vladislav Shpilevoy
  0 siblings, 1 reply; 8+ messages in thread
From: Ilya Kosarev @ 2020-12-21 18:15 UTC (permalink / raw)
  To: Vladislav Shpilevoy; +Cc: tarantool-patches

[-- Attachment #1: Type: text/plain, Size: 3415 bytes --]


Hi!
 
Answers below. 
>Воскресенье, 20 декабря 2020, 19:13 +03:00 от Vladislav Shpilevoy <v.shpilevoy@tarantool.org>:
> 
>> > diff --git a/src/box/memtx_space.c b/src/box/memtx_space.c
>> > index 73b4c450eb..cc431ea816 100644
>> > --- a/src/box/memtx_space.c
>> > +++ b/src/box/memtx_space.c
>> > @@ -327,8 +327,9 @@ memtx_space_execute_replace(struct space *space, struct txn *txn,
>> > struct memtx_space *memtx_space = (struct memtx_space *)space;
>> > struct txn_stmt *stmt = txn_current_stmt(txn);
>> > enum dup_replace_mode mode = dup_replace_mode(request->type);
>> > - stmt->new_tuple = memtx_tuple_new(space->format, request->tuple,
>> > - request->tuple_end);
>> > + stmt->new_tuple = space->format->vtab.tuple_new(space->format,
>> > + request->tuple,
>> > + request->tuple_end);
>>
>> 2. Seems like an expensive change. You added +2 pointer
>> dereferences to a hot path.
>>
>> Last time when you worked on that I proposed to make space_truncate
>> use box.begin + quota disable + box_upsert + quota enable + commit.
>> So there are no yields between quota enable and disable. And no changes
>> in the code not related to truncation. Why didn't it work?
>>
>> That does work, but then it was also discussed that we should use only
>> huge slabs on disabled quota, so that quota can shrink back when freed.
>
>Such discussion results are far from something trivial and obvious. Please,
>specify that in the commit message and in the comments if possible.
Right, i will repush with more info on this.
>
>Also I don't understand why can't you do that on the slab arena
>level. Start allocating only 'huge' slabs if the quota is full, but
>disabled.
Yes, allocation in case of out of quota can be done on more
internal level.
But the problem is exactly in freeing you mentioned further.
We need to free in the right way: If we are allocating with
mempool, we need to use mempool_find() properly, while
if we are allocating ‘huge’ slab, we need to free it accordingly.
So the information about the allocation has to be stored
somehow in case we are going to alter it depending on the
current state. I don’t see convenient place where we can store
it, looks like it will be even worse to store such information
in struct tuple itself.
This leads to the solution, where allocation and deletion can
only being altered per space, thus all the tuples in _truncate
space are being allocated through malloc() and freed accordingly.
>
>> It also requires specific freeing for those tuples, that is why both new() and
>> delete() are now used from vtab, which has specific version for truncate space.
>
>Why do you need special freeing? memtx_tuple_delete() does not allocate
>anything.
Commented above.
>
>> There is an option to use only specific delete() from vtab and for allocation
>> patch space_truncate() instead of new(), although it looks more strange.
>
>Didn't understand what you mean.
I mean that allocation and deletion is now altered for _truncate space.
Technically, we don’t to change the way we allocate on smalloc() and
tuple_new() levels. We can patch space_truncate() instead. But we will
still need to patch tuple_delete() for the correct freeing. This way doesn’t
look good.
>
>>
>>
>> > if (stmt->new_tuple == NULL)
>> > return -1;
>> > tuple_ref(stmt->new_tuple);
>>
>>  
>>  
>> --
>> Ilya Kosarev
>>   
 
 
--
Ilya Kosarev
 

[-- Attachment #2: Type: text/html, Size: 5203 bytes --]

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [Tarantool-patches] [PATCH] memtx: allow quota overuse for truncation
  2020-12-21 18:15       ` Ilya Kosarev
@ 2020-12-22 13:28         ` Vladislav Shpilevoy
  2020-12-22 14:14           ` Ilya Kosarev
  0 siblings, 1 reply; 8+ messages in thread
From: Vladislav Shpilevoy @ 2020-12-22 13:28 UTC (permalink / raw)
  To: Ilya Kosarev; +Cc: tarantool-patches

>     Also I don't understand why can't you do that on the slab arena
>     level. Start allocating only 'huge' slabs if the quota is full, but
>     disabled.
> 
> Yes, allocation in case of out of quota can be done on more
> internal level.
> But the problem is exactly in freeing you mentioned further.
> We need to free in the right way: If we are allocating with
> mempool, we need to use mempool_find() properly, while
> if we are allocating ‘huge’ slab, we need to free it accordingly.
> So the information about the allocation has to be stored
> somehow in case we are going to alter it depending on the
> current state. I don’t see convenient place where we can store
> it, looks like it will be even worse to store such information
> in struct tuple itself.

I don't understand why do you need to store any info. Huge slabs
existed and were freed before your patch, and all worked fine here.
How are they found and freed with quota enabled?

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [Tarantool-patches] [PATCH] memtx: allow quota overuse for truncation
  2020-12-22 13:28         ` Vladislav Shpilevoy
@ 2020-12-22 14:14           ` Ilya Kosarev
  2020-12-22 14:22             ` Vladislav Shpilevoy
  0 siblings, 1 reply; 8+ messages in thread
From: Ilya Kosarev @ 2020-12-22 14:14 UTC (permalink / raw)
  To: Vladislav Shpilevoy; +Cc: tarantool-patches

[-- Attachment #1: Type: text/plain, Size: 1400 bytes --]


>Вторник, 22 декабря 2020, 16:28 +03:00 от Vladislav Shpilevoy <v.shpilevoy@tarantool.org>:
> 
>> Also I don't understand why can't you do that on the slab arena
>> level. Start allocating only 'huge' slabs if the quota is full, but
>> disabled.
>>
>> Yes, allocation in case of out of quota can be done on more
>> internal level.
>> But the problem is exactly in freeing you mentioned further.
>> We need to free in the right way: If we are allocating with
>> mempool, we need to use mempool_find() properly, while
>> if we are allocating ‘huge’ slab, we need to free it accordingly.
>> So the information about the allocation has to be stored
>> somehow in case we are going to alter it depending on the
>> current state. I don’t see convenient place where we can store
>> it, looks like it will be even worse to store such information
>> in struct tuple itself.
>I don't understand why do you need to store any info. Huge slabs
>existed and were freed before your patch, and all worked fine here.
>How are they found and freed with quota enabled?
Proper huge slabs are actually huge and mempool_find() thus
returns NULL for them and they are freed properly. Here we are
allocating «huge» slabs of the size which is the size of actual
truncate tuples, and it is not really huge. Thus we need specific
info to free it in the right way.
 
 
--
Ilya Kosarev
 

[-- Attachment #2: Type: text/html, Size: 2034 bytes --]

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [Tarantool-patches] [PATCH] memtx: allow quota overuse for truncation
  2020-12-22 14:14           ` Ilya Kosarev
@ 2020-12-22 14:22             ` Vladislav Shpilevoy
  0 siblings, 0 replies; 8+ messages in thread
From: Vladislav Shpilevoy @ 2020-12-22 14:22 UTC (permalink / raw)
  To: Ilya Kosarev; +Cc: tarantool-patches

On 22.12.2020 15:14, Ilya Kosarev wrote:
>     Вторник, 22 декабря 2020, 16:28 +03:00 от Vladislav Shpilevoy <v.shpilevoy@tarantool.org>:
>      
>     > Also I don't understand why can't you do that on the slab arena
>     > level. Start allocating only 'huge' slabs if the quota is full, but
>     > disabled.
>     >
>     > Yes, allocation in case of out of quota can be done on more
>     > internal level.
>     > But the problem is exactly in freeing you mentioned further.
>     > We need to free in the right way: If we are allocating with
>     > mempool, we need to use mempool_find() properly, while
>     > if we are allocating ‘huge’ slab, we need to free it accordingly.
>     > So the information about the allocation has to be stored
>     > somehow in case we are going to alter it depending on the
>     > current state. I don’t see convenient place where we can store
>     > it, looks like it will be even worse to store such information
>     > in struct tuple itself.
> 
>     I don't understand why do you need to store any info. Huge slabs
>     existed and were freed before your patch, and all worked fine here.
>     How are they found and freed with quota enabled?
> 
> Proper huge slabs are actually huge and mempool_find() thus
> returns NULL for them and they are freed properly. Here we are
> allocating «huge» slabs of the size which is the size of actual
> truncate tuples, and it is not really huge. Thus we need specific
> info to free it in the right way.

Ok, why can't you use the existing 'huge' slabs and not re-invent your
own ones?

^ permalink raw reply	[flat|nested] 8+ messages in thread

end of thread, other threads:[~2020-12-22 14:22 UTC | newest]

Thread overview: 8+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-12-11 15:37 [Tarantool-patches] [PATCH] memtx: allow quota overuse for truncation Ilya Kosarev
2020-12-14 23:41 ` Vladislav Shpilevoy
2020-12-16 23:43   ` Ilya Kosarev
2020-12-20 16:13     ` Vladislav Shpilevoy
2020-12-21 18:15       ` Ilya Kosarev
2020-12-22 13:28         ` Vladislav Shpilevoy
2020-12-22 14:14           ` Ilya Kosarev
2020-12-22 14:22             ` Vladislav Shpilevoy

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox