From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mail-lj1-f195.google.com (mail-lj1-f195.google.com [209.85.208.195]) (using TLSv1.2 with cipher ECDHE-RSA-AES128-GCM-SHA256 (128/128 bits)) (No client certificate requested) by dev.tarantool.org (Postfix) with ESMTPS id 4CCBD43040B for ; Sat, 15 Aug 2020 00:15:47 +0300 (MSK) Received: by mail-lj1-f195.google.com with SMTP id g6so11303496ljn.11 for ; Fri, 14 Aug 2020 14:15:47 -0700 (PDT) From: Cyrill Gorcunov Date: Sat, 15 Aug 2020 00:14:39 +0300 Message-Id: <20200814211442.667099-6-gorcunov@gmail.com> In-Reply-To: <20200814211442.667099-1-gorcunov@gmail.com> References: <20200814211442.667099-1-gorcunov@gmail.com> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit Subject: [Tarantool-patches] [PATCH v7 5/8] applier: factor out latch locking List-Id: Tarantool development patches List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , To: tml Cc: Vladislav Shpilevoy We will need to reuse this helpers. Part-of #5129 Signed-off-by: Cyrill Gorcunov --- src/box/applier.cc | 43 +++++++++++++++++++++++++++++-------------- 1 file changed, 29 insertions(+), 14 deletions(-) diff --git a/src/box/applier.cc b/src/box/applier.cc index 98fb87375..60689f6d3 100644 --- a/src/box/applier.cc +++ b/src/box/applier.cc @@ -799,6 +799,29 @@ applier_txn_wal_write_cb(struct trigger *trigger, void *event) return 0; } +/* + * In a full mesh topology, the same set of changes + * may arrive via two concurrently running appliers. + * Hence we need a latch to strictly order all changes + * that belong to the same server id. + */ +static inline struct latch * +applier_lock(uint32_t replica_id) +{ + struct replica *replica = replica_by_id(replica_id); + struct latch *latch = (replica ? &replica->order_latch : + &replicaset.applier.order_latch); + latch_lock(latch); + return latch; +} + +static inline void +applier_unlock(struct latch *latch) +{ + assert(latch != NULL); + latch_unlock(latch); +} + /** * Apply all rows in the rows queue as a single transaction. * @@ -811,19 +834,11 @@ applier_apply_tx(struct stailq *rows) struct applier_tx_row, next)->row; struct xrow_header *last_row; last_row = &stailq_last_entry(rows, struct applier_tx_row, next)->row; - struct replica *replica = replica_by_id(first_row->replica_id); - /* - * In a full mesh topology, the same set of changes - * may arrive via two concurrently running appliers. - * Hence we need a latch to strictly order all changes - * that belong to the same server id. - */ - struct latch *latch = (replica ? &replica->order_latch : - &replicaset.applier.order_latch); - latch_lock(latch); + struct latch *latch = applier_lock(first_row->replica_id); + if (vclock_get(&replicaset.applier.vclock, last_row->replica_id) >= last_row->lsn) { - latch_unlock(latch); + applier_unlock(latch); return 0; } else if (vclock_get(&replicaset.applier.vclock, first_row->replica_id) >= first_row->lsn) { @@ -855,7 +870,7 @@ applier_apply_tx(struct stailq *rows) struct txn *txn = txn_begin(); struct applier_tx_row *item; if (txn == NULL) { - latch_unlock(latch); + applier_unlock(latch); return -1; } stailq_foreach_entry(item, rows, next) { @@ -930,12 +945,12 @@ applier_apply_tx(struct stailq *rows) */ vclock_follow(&replicaset.applier.vclock, last_row->replica_id, last_row->lsn); - latch_unlock(latch); + applier_unlock(latch); return 0; rollback: txn_rollback(txn); fail: - latch_unlock(latch); + applier_unlock(latch); fiber_gc(); return -1; } -- 2.26.2