From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mail-lj1-f195.google.com (mail-lj1-f195.google.com [209.85.208.195]) (using TLSv1.2 with cipher ECDHE-RSA-AES128-GCM-SHA256 (128/128 bits)) (No client certificate requested) by dev.tarantool.org (Postfix) with ESMTPS id 47484430410 for ; Mon, 17 Aug 2020 16:40:43 +0300 (MSK) Received: by mail-lj1-f195.google.com with SMTP id v4so17562139ljd.0 for ; Mon, 17 Aug 2020 06:40:43 -0700 (PDT) From: Cyrill Gorcunov Date: Mon, 17 Aug 2020 16:39:16 +0300 Message-Id: <20200817133918.875558-8-gorcunov@gmail.com> In-Reply-To: <20200817133918.875558-1-gorcunov@gmail.com> References: <20200817133918.875558-1-gorcunov@gmail.com> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit Subject: [Tarantool-patches] [PATCH v8 7/9] applier: process synchro requests without txn engine List-Id: Tarantool development patches List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , To: tml Cc: Vladislav Shpilevoy Transaction processing code is very heavy simply because transactions are carrying various data and involves a number of other mechanisms to proceed. In turn, when we receive confirm or rollback packed from another node in a cluster we just need to inspect limbo queue and write this packed into a WAL journal. So calling a bunch of txn engine helpers is simply waste of cycles. Thus lets rather handle them in a special light way: - allocate synchro_entry structure which would carry the journal entry itself and encoded message - process limbo queue to mark confirmed/rollback'ed messages - finally write this synchro_entry into a journal Which is a way simplier. Part-of #5129 Suggedsted-by: Vladislav Shpilevoy Co-developed-by: Vladislav Shpilevoy Signed-off-by: Cyrill Gorcunov --- src/box/applier.cc | 169 +++++++++++++++++++++++++++++++++++++++------ 1 file changed, 149 insertions(+), 20 deletions(-) diff --git a/src/box/applier.cc b/src/box/applier.cc index 860a18681..83f6da461 100644 --- a/src/box/applier.cc +++ b/src/box/applier.cc @@ -51,8 +51,10 @@ #include "schema.h" #include "txn.h" #include "box.h" +#include "xrow.h" #include "scoped_guard.h" #include "txn_limbo.h" +#include "journal.h" STRS(applier_state, applier_STATE); @@ -772,19 +774,9 @@ applier_read_tx(struct applier *applier, struct stailq *rows) } while (!applier_last_row(rows)->is_commit); } -static int -applier_txn_rollback_cb(struct trigger *trigger, void *event) +static void +applier_rollback_by_wal_io(void) { - (void) trigger; - struct txn *txn = (struct txn *) event; - /* - * Synchronous transaction rollback due to receiving a - * ROLLBACK entry is a normal event and requires no - * special handling. - */ - if (txn->signature == TXN_SIGNATURE_SYNC_ROLLBACK) - return 0; - /* * Setup shared applier diagnostic area. * @@ -793,19 +785,32 @@ applier_txn_rollback_cb(struct trigger *trigger, void *event) * diag use per-applier diag instead all the time * (which actually already present in the structure). * - * But remember that transactions are asynchronous - * and rollback may happen a way latter after it - * passed to the journal engine. + * But remember that WAL writes are asynchronous and + * rollback may happen a way later after it was passed to + * the journal engine. */ diag_set(ClientError, ER_WAL_IO); diag_set_error(&replicaset.applier.diag, diag_last_error(diag_get())); - /* Broadcast the rollback event across all appliers. */ - trigger_run(&replicaset.applier.on_rollback, event); - + /* Broadcast the rollback across all appliers. */ + trigger_run(&replicaset.applier.on_rollback, NULL); /* Rollback applier vclock to the committed one. */ vclock_copy(&replicaset.applier.vclock, &replicaset.vclock); +} + +static int +applier_txn_rollback_cb(struct trigger *trigger, void *event) +{ + (void) trigger; + struct txn *txn = (struct txn *) event; + /* + * Synchronous transaction rollback due to receiving a + * ROLLBACK entry is a normal event and requires no + * special handling. + */ + if (txn->signature != TXN_SIGNATURE_SYNC_ROLLBACK) + applier_rollback_by_wal_io(); return 0; } @@ -818,6 +823,110 @@ applier_txn_wal_write_cb(struct trigger *trigger, void *event) return 0; } +struct synchro_entry { + /** Encoded form of a synchro record. */ + struct synchro_body_bin body_bin; + + /** xrow to write, used by the journal engine. */ + struct xrow_header row; + + /** + * The journal entry itself. Note since + * it has unsized array it must be the + * last entry in the structure. + */ + struct journal_entry journal_entry; +}; + +static void +synchro_entry_delete(struct synchro_entry *entry) +{ + free(entry); +} + +/** + * Async write journal completion. + */ +static void +apply_synchro_row_cb(struct journal_entry *entry) +{ + assert(entry->complete_data != NULL); + struct synchro_entry *synchro_entry = + (struct synchro_entry *)entry->complete_data; + if (entry->res < 0) + applier_rollback_by_wal_io(); + else + trigger_run(&replicaset.applier.on_wal_write, NULL); + + synchro_entry_delete(synchro_entry); +} + +/** + * Allocate a new synchro_entry to be passed to + * the journal engine in async write way. + */ +static struct synchro_entry * +synchro_entry_new(struct xrow_header *applier_row, + struct synchro_request *req) +{ + struct synchro_entry *entry; + size_t size = sizeof(*entry) + sizeof(struct xrow_header *); + + /* + * For simplicity we use malloc here but + * probably should provide some cache similar + * to txn cache. + */ + entry = (struct synchro_entry *)malloc(size); + if (entry == NULL) { + diag_set(OutOfMemory, size, "malloc", "synchro_entry"); + return NULL; + } + + struct journal_entry *journal_entry = &entry->journal_entry; + struct synchro_body_bin *body_bin = &entry->body_bin; + struct xrow_header *row = &entry->row; + + journal_entry->rows[0] = row; + + xrow_encode_synchro(row, body_bin, req); + + row->lsn = applier_row->lsn; + row->replica_id = applier_row->replica_id; + + journal_entry_create(journal_entry, 1, xrow_approx_len(row), + apply_synchro_row_cb, entry); + return entry; +} + +/** Process a synchro request. */ +static int +apply_synchro_row(struct xrow_header *row) +{ + assert(iproto_type_is_synchro_request(row->type)); + + struct synchro_request req; + if (xrow_decode_synchro(row, &req) != 0) + goto err; + + if (txn_limbo_process(&txn_limbo, &req)) + goto err; + + struct synchro_entry *entry; + entry = synchro_entry_new(row, &req); + if (entry == NULL) + goto err; + + if (journal_write_async(&entry->journal_entry) != 0) { + diag_set(ClientError, ER_WAL_IO); + goto err; + } + return 0; +err: + diag_log(); + return -1; +} + /** * Apply all rows in the rows queue as a single transaction. * @@ -861,13 +970,26 @@ applier_apply_tx(struct stailq *rows) } } + if (unlikely(iproto_type_is_synchro_request(first_row->type))) { + /* + * Synchro messages are not transactions, in terms + * of DML. Always sent and written isolated from + * each other. + */ + assert(first_row == last_row); + if (apply_synchro_row(first_row) != 0) + diag_raise(); + goto success; + } + /** * Explicitly begin the transaction so that we can * control fiber->gc life cycle and, in case of apply * conflict safely access failed xrow object and allocate * IPROTO_NOP on gc. */ - struct txn *txn = txn_begin(); + struct txn *txn; + txn = txn_begin(); struct applier_tx_row *item; if (txn == NULL) { latch_unlock(latch); @@ -936,6 +1058,7 @@ applier_apply_tx(struct stailq *rows) if (txn_commit_async(txn) < 0) goto fail; +success: /* * The transaction was sent to journal so promote vclock. * @@ -1103,7 +1226,13 @@ applier_subscribe(struct applier *applier) applier->lag = TIMEOUT_INFINITY; - /* Register triggers to handle WAL writes and rollbacks. */ + /* + * Register triggers to handle WAL writes and rollbacks. + * + * Note we use them for syncronous packets handling as well + * thus when changing make sure that synchro handling won't + * be broken. + */ struct trigger on_wal_write; trigger_create(&on_wal_write, applier_on_wal_write, applier, NULL); trigger_add(&replicaset.applier.on_wal_write, &on_wal_write); -- 2.26.2