From: Cyrill Gorcunov via Tarantool-patches <tarantool-patches@dev.tarantool.org> To: TML <tarantool-patches@dev.tarantool.org> Cc: Vladislav Shpilevoy <v.shpilevoy@tarantool.org> Subject: Re: [Tarantool-patches] [RFC] on downstream.lag design Date: Fri, 4 Jun 2021 01:20:00 +0300 [thread overview] Message-ID: <YLlVkLjUUAiWhH+l@grain> (raw) In-Reply-To: <43b6b0d8-b5e9-e8e5-1b6d-ed550c674c11@tarantool.org> After f2f conversation with Vlad and Sergey we've got the following approach (if I understand everything correctly): remember the last lag on a master relay and update it only when new acks are coming (by acks we consider not synchro packets but any transaction written on a replica). So I took my previous series https://lists.tarantool.org/tarantool-patches/20210506214132.533913-1-gorcunov@gmail.com/ and modified it to make it so. Guys, take a look please. Note the patch is not for merging because there some nits I need to address from previous review, but the genral idea remains the same: we remember transaction time obtained from a master node and once it replicated and get written to replica's WAL we calculate time delta and send it back as a transaction lag. 1) It is implied that nodes have realtime clock synchronized otherwise this scheme won't work. 2) Since there might be idle periods we remember the last time used for lag computation and during the idle time relay simply sustain old lag value to report. 3) Since the lag computation is a part of relay structure if replica is rebooted the lag gonna be dropped to zero. In turn if master node get rebooted once connection is back the replica will report back the last lag it keeps. --- diff --git a/src/box/applier.cc b/src/box/applier.cc index 33181fdbf..2088c3669 100644 --- a/src/box/applier.cc +++ b/src/box/applier.cc @@ -148,6 +148,40 @@ applier_check_sync(struct applier *applier) } } +/** + * Update WAL statstics once transaction is written. + */ +static void +wal_stat_update(struct applier_wal_stat *wal_st) +{ + struct replica *r = replica_by_id(wal_st->replica_id); + if (likely(r != NULL)) { + assert(wal_st->replica_id == r->wal_st.replica_id); + r->wal_st.txn_start_tm = wal_st->txn_start_tm; + } +} + +/** + * Encode timestamp for ACK sending. We drop the value on purpose + * because it is one time action and if transactions are written + * in a batch we need to account the oldest timestamp to show real + * relay lag. + */ +static void +wal_stat_encode(uint32_t replica_id, struct xrow_header *xrow) +{ + struct replica *r = replica_by_id(replica_id); + if (unlikely(r == NULL)) + return; + + assert(replica_id == r->wal_st.replica_id); + if (r->wal_st.txn_acked_tm != r->wal_st.txn_start_tm) { + r->wal_st.txn_lag = ev_now(loop()) - r->wal_st.txn_start_tm; + r->wal_st.txn_acked_tm = r->wal_st.txn_start_tm; + } + xrow->tm = r->wal_st.txn_lag; +} + /* * Fiber function to write vclock to replication master. * To track connection status, replica answers master @@ -193,6 +227,7 @@ applier_writer_f(va_list ap) applier->has_acks_to_send = false; struct xrow_header xrow; xrow_encode_vclock(&xrow, &replicaset.vclock); + wal_stat_encode(applier->instance_id, &xrow); coio_write_xrow(&io, &xrow); ERROR_INJECT(ERRINJ_APPLIER_SLOW_ACK, { fiber_sleep(0.01); @@ -490,7 +525,7 @@ static uint64_t applier_read_tx(struct applier *applier, struct stailq *rows, double timeout); static int -apply_final_join_tx(struct stailq *rows); +apply_final_join_tx(uint32_t replica_id, struct stailq *rows); /** * A helper struct to link xrow objects in a list. @@ -535,7 +570,7 @@ applier_wait_register(struct applier *applier, uint64_t row_count) next)->row); break; } - if (apply_final_join_tx(&rows) != 0) + if (apply_final_join_tx(applier->instance_id, &rows) != 0) diag_raise(); } @@ -756,6 +791,11 @@ applier_txn_wal_write_cb(struct trigger *trigger, void *event) { (void) trigger; (void) event; + + struct applier_wal_stat *wal_st = + (struct applier_wal_stat *)trigger->data; + wal_stat_update(wal_st); + /* Broadcast the WAL write across all appliers. */ trigger_run(&replicaset.applier.on_wal_write, NULL); return 0; @@ -766,6 +806,8 @@ struct synchro_entry { struct synchro_request *req; /** Fiber created the entry. To wakeup when WAL write is done. */ struct fiber *owner; + /** WAL bound statistics. */ + struct applier_wal_stat *wal_st; /** * The base journal entry. It has unsized array and then must be the * last entry in the structure. But can workaround it via a union @@ -789,6 +831,7 @@ apply_synchro_row_cb(struct journal_entry *entry) if (entry->res < 0) { applier_rollback_by_wal_io(); } else { + wal_stat_update(synchro_entry->wal_st); txn_limbo_process(&txn_limbo, synchro_entry->req); trigger_run(&replicaset.applier.on_wal_write, NULL); } @@ -797,7 +840,7 @@ apply_synchro_row_cb(struct journal_entry *entry) /** Process a synchro request. */ static int -apply_synchro_row(struct xrow_header *row) +apply_synchro_row(uint32_t replica_id, struct xrow_header *row) { assert(iproto_type_is_synchro_request(row->type)); @@ -805,6 +848,7 @@ apply_synchro_row(struct xrow_header *row) if (xrow_decode_synchro(row, &req) != 0) goto err; + struct applier_wal_stat wal_st; struct synchro_entry entry; /* * Rows array is cast from *[] to **, because otherwise g++ complains @@ -817,6 +861,11 @@ apply_synchro_row(struct xrow_header *row) apply_synchro_row_cb, &entry); entry.req = &req; entry.owner = fiber(); + + wal_st.replica_id = replica_id; + wal_st.txn_start_tm = row->tm; + entry.wal_st = &wal_st; + /* * The WAL write is blocking. Otherwise it might happen that a CONFIRM * or ROLLBACK is sent to WAL, and it would empty the limbo, but before @@ -862,8 +911,9 @@ applier_handle_raft(struct applier *applier, struct xrow_header *row) return box_raft_process(&req, applier->instance_id); } -static inline int -apply_plain_tx(struct stailq *rows, bool skip_conflict, bool use_triggers) +static int +apply_plain_tx(uint32_t replica_id, struct stailq *rows, + bool skip_conflict, bool use_triggers) { /* * Explicitly begin the transaction so that we can @@ -931,10 +981,21 @@ apply_plain_tx(struct stailq *rows, bool skip_conflict, bool use_triggers) goto fail; } + struct applier_wal_stat *wal_st; + wal_st = region_alloc_object(&txn->region, typeof(*wal_st), &size); + if (wal_st == NULL) { + diag_set(OutOfMemory, size, "region_alloc_object", "wal_st"); + goto fail; + } + trigger_create(on_rollback, applier_txn_rollback_cb, NULL, NULL); txn_on_rollback(txn, on_rollback); - trigger_create(on_wal_write, applier_txn_wal_write_cb, NULL, NULL); + item = stailq_first_entry(rows, struct applier_tx_row, next); + wal_st->replica_id = replica_id; + wal_st->txn_start_tm = item->row.tm; + + trigger_create(on_wal_write, applier_txn_wal_write_cb, wal_st, NULL); txn_on_wal_write(txn, on_wal_write); } @@ -946,7 +1007,7 @@ apply_plain_tx(struct stailq *rows, bool skip_conflict, bool use_triggers) /** A simpler version of applier_apply_tx() for final join stage. */ static int -apply_final_join_tx(struct stailq *rows) +apply_final_join_tx(uint32_t replica_id, struct stailq *rows) { struct xrow_header *first_row = &stailq_first_entry(rows, struct applier_tx_row, next)->row; @@ -957,9 +1018,9 @@ apply_final_join_tx(struct stailq *rows) vclock_follow_xrow(&replicaset.vclock, last_row); if (unlikely(iproto_type_is_synchro_request(first_row->type))) { assert(first_row == last_row); - rc = apply_synchro_row(first_row); + rc = apply_synchro_row(replica_id, first_row); } else { - rc = apply_plain_tx(rows, false, false); + rc = apply_plain_tx(replica_id, rows, false, false); } fiber_gc(); return rc; @@ -1088,11 +1149,14 @@ applier_apply_tx(struct applier *applier, struct stailq *rows) * each other. */ assert(first_row == last_row); - if ((rc = apply_synchro_row(first_row)) != 0) + rc = apply_synchro_row(applier->instance_id, first_row); + if (rc != 0) + goto finish; + } else { + rc = apply_plain_tx(applier->instance_id, rows, + replication_skip_conflict, true); + if (rc != 0) goto finish; - } else if ((rc = apply_plain_tx(rows, replication_skip_conflict, - true)) != 0) { - goto finish; } vclock_follow(&replicaset.applier.vclock, last_row->replica_id, last_row->lsn); diff --git a/src/box/applier.h b/src/box/applier.h index 15ca1fcfd..aaa851b86 100644 --- a/src/box/applier.h +++ b/src/box/applier.h @@ -74,6 +74,8 @@ extern const char *applier_state_strs[]; * State of a replication connection to the master */ struct applier { + double delta; + double prev_tm; /** Background fiber */ struct fiber *reader; /** Background fiber to reply with vclock */ @@ -133,6 +135,29 @@ struct applier { struct vclock remote_vclock_at_subscribe; }; +/** + * WAL related statistics. + */ +struct applier_wal_stat { + /** Replica ID initiated a transaction. */ + uint32_t replica_id; + /** + * Timestamp of a transaction to be accounted + * for relay lag. Usually it is a first row in + * a transaction. + */ + double txn_start_tm; + /** + * Last accounted transaction timestamp. Needed + * to persist lag report in idle state. + */ + double txn_acked_tm; + /** + * Last accounted transaction lag. + */ + double txn_lag; +}; + /** * Start a client to a remote master using a background fiber. * diff --git a/src/box/lua/info.c b/src/box/lua/info.c index 0eb48b823..f201b25e3 100644 --- a/src/box/lua/info.c +++ b/src/box/lua/info.c @@ -143,6 +143,9 @@ lbox_pushrelay(lua_State *L, struct relay *relay) lua_pushnumber(L, ev_monotonic_now(loop()) - relay_last_row_time(relay)); lua_settable(L, -3); + lua_pushstring(L, "lag"); + lua_pushnumber(L, relay_txn_lag(relay)); + lua_settable(L, -3); break; case RELAY_STOPPED: { diff --git a/src/box/relay.cc b/src/box/relay.cc index b1571b361..83d753a30 100644 --- a/src/box/relay.cc +++ b/src/box/relay.cc @@ -158,6 +158,13 @@ struct relay { struct stailq pending_gc; /** Time when last row was sent to peer. */ double last_row_time; + /** + * A time difference between moment when we + * wrote a row in the local WAL and received + * an ACK that peer has it replicated. + */ + double txn_lag; + /** Relay sync state. */ enum relay_state state; @@ -217,6 +224,12 @@ relay_last_row_time(const struct relay *relay) return relay->last_row_time; } +double +relay_txn_lag(const struct relay *relay) +{ + return relay->txn_lag; +} + static void relay_send(struct relay *relay, struct xrow_header *packet); static void @@ -284,6 +297,7 @@ relay_start(struct relay *relay, int fd, uint64_t sync, relay->state = RELAY_FOLLOW; relay->row_count = 0; relay->last_row_time = ev_monotonic_now(loop()); + relay->txn_lag = 0; } void @@ -336,6 +350,7 @@ relay_stop(struct relay *relay) * upon cord_create(). */ relay->cord.id = 0; + relay->txn_lag = 0; } void @@ -629,6 +644,13 @@ relay_reader_f(va_list ap) /* vclock is followed while decoding, zeroing it. */ vclock_create(&relay->recv_vclock); xrow_decode_vclock_xc(&xrow, &relay->recv_vclock); + /* + * WAL to WAL transaction lag is reported + * inside timestamp field. It is ugly and + * confusing but done so to minimize changes + * in packets format. + */ + relay->txn_lag = xrow.tm; fiber_cond_signal(&relay->reader_cond); } } catch (Exception *e) { diff --git a/src/box/relay.h b/src/box/relay.h index b32e2ea2a..615ffb75d 100644 --- a/src/box/relay.h +++ b/src/box/relay.h @@ -93,6 +93,12 @@ relay_vclock(const struct relay *relay); double relay_last_row_time(const struct relay *relay); +/** + * Returns relay's transaction's lag. + */ +double +relay_txn_lag(const struct relay *relay); + /** * Send a Raft update request to the relay channel. It is not * guaranteed that it will be delivered. The connection may break. diff --git a/src/box/replication.cc b/src/box/replication.cc index aefb812b3..157944f05 100644 --- a/src/box/replication.cc +++ b/src/box/replication.cc @@ -184,6 +184,7 @@ replica_new(void) trigger_create(&replica->on_applier_state, replica_on_applier_state_f, NULL, NULL); replica->applier_sync_state = APPLIER_DISCONNECTED; + memset(&replica->wal_st, 0, sizeof(replica->wal_st)); latch_create(&replica->order_latch); return replica; } @@ -234,6 +235,7 @@ replica_set_id(struct replica *replica, uint32_t replica_id) assert(replica_id < VCLOCK_MAX); assert(replica->id == REPLICA_ID_NIL); /* replica id is read-only */ replica->id = replica_id; + replica->wal_st.replica_id = replica_id; if (tt_uuid_is_equal(&INSTANCE_UUID, &replica->uuid)) { /* Assign local replica id */ @@ -281,6 +283,7 @@ replica_clear_id(struct replica *replica) instance_id = REPLICA_ID_NIL; } replica->id = REPLICA_ID_NIL; + replica->wal_st.replica_id = REPLICA_ID_NIL; say_info("removed replica %s", tt_uuid_str(&replica->uuid)); /* diff --git a/src/box/replication.h b/src/box/replication.h index 2ad1cbf66..a40582cd3 100644 --- a/src/box/replication.h +++ b/src/box/replication.h @@ -331,6 +331,10 @@ struct replica { * separate from applier. */ enum applier_state applier_sync_state; + /** + * Applier's WAL related statistics. + */ + struct applier_wal_stat wal_st; /* The latch is used to order replication requests. */ struct latch order_latch; };
prev parent reply other threads:[~2021-06-03 22:20 UTC|newest] Thread overview: 4+ messages / expand[flat|nested] mbox.gz Atom feed top 2021-06-01 22:02 Cyrill Gorcunov via Tarantool-patches 2021-06-02 6:55 ` Serge Petrenko via Tarantool-patches 2021-06-02 8:41 ` Cyrill Gorcunov via Tarantool-patches 2021-06-03 22:20 ` Cyrill Gorcunov via Tarantool-patches [this message]
Reply instructions: You may reply publicly to this message via plain-text email using any one of the following methods: * Save the following mbox file, import it into your mail client, and reply-to-all from there: mbox Avoid top-posting and favor interleaved quoting: https://en.wikipedia.org/wiki/Posting_style#Interleaved_style * Reply using the --to, --cc, and --in-reply-to switches of git-send-email(1): git send-email \ --in-reply-to=YLlVkLjUUAiWhH+l@grain \ --to=tarantool-patches@dev.tarantool.org \ --cc=gorcunov@gmail.com \ --cc=v.shpilevoy@tarantool.org \ --subject='Re: [Tarantool-patches] [RFC] on downstream.lag design' \ /path/to/YOUR_REPLY https://kernel.org/pub/software/scm/git/docs/git-send-email.html * If your mail client supports setting the In-Reply-To header via mailto: links, try the mailto: link
This is a public inbox, see mirroring instructions for how to clone and mirror all data and code used for this inbox