From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from smtp45.i.mail.ru (smtp45.i.mail.ru [94.100.177.105]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by dev.tarantool.org (Postfix) with ESMTPS id 25C8745C306 for ; Sat, 19 Dec 2020 20:41:39 +0300 (MSK) From: Ilya Kosarev Date: Sat, 19 Dec 2020 20:39:43 +0300 Message-Id: <2152c51f6222f2d3d9d412183063c8646872ff68.1608373787.git.i.kosarev@tarantool.org> In-Reply-To: References: In-Reply-To: References: Subject: [Tarantool-patches] [PATCH v7 3/3] iproto: move greeting from tx thread to iproto List-Id: Tarantool development patches List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , To: v.shpilevoy@tarantool.org, alyapunov@tarantool.org Cc: tarantool-patches@dev.tarantool.org On connection, an evio service callback is invoked to accept it. The next step after acceptance was to process connection to tx thread through cbus. This meant that any connection interaction involves tx thread even before we get to send the greeting to the client. Consequently, the client might reach the instance with enormous number of connections, leading to the file descriptors limit exhaustion in case of unresponsive tx thread (for example, when building secondary index) and extra tx_process_connects afterwords, even in case the instance doesn't fail with too many open files error. This patch allows iproto to accept connection and send greeting by itself. Thus the connection is being established and stays in fetch_schema state while tx thread is unresponsive. Closes #3776 --- src/box/iproto.cc | 134 ++++++++++++++---- test/app/gh-4787-netbox-empty-errmsg.result | 18 --- test/app/gh-4787-netbox-empty-errmsg.test.lua | 8 -- 3 files changed, 103 insertions(+), 57 deletions(-) diff --git a/src/box/iproto.cc b/src/box/iproto.cc index f7330af21d..93bf869299 100644 --- a/src/box/iproto.cc +++ b/src/box/iproto.cc @@ -356,6 +356,11 @@ enum iproto_connection_state { * takes an already active socket in a constructor. */ IPROTO_CONNECTION_ALIVE, + /** + * Connection is failed in iproto, but tx doesn't know + * about it yet. Used for unsuccessful greeting. + */ + IPROTO_CONNECTION_FAILED, /** * Socket was closed, a notification is sent to the TX * thread to close the session. @@ -453,6 +458,8 @@ struct iproto_connection * meaningless. */ size_t parse_size; + /** Iproto buffer used to send greeting. */ + char greeting_buf[IPROTO_GREETING_SIZE]; /** * Nubmer of active long polling requests that have already * discarded their arguments in order not to stall other @@ -679,7 +686,8 @@ iproto_connection_close(struct iproto_connection *con) */ con->p_ibuf->wpos -= con->parse_size; cpipe_push(&tx_pipe, &con->disconnect_msg); - assert(con->state == IPROTO_CONNECTION_ALIVE); + assert(con->state == IPROTO_CONNECTION_ALIVE || + con->state == IPROTO_CONNECTION_FAILED); con->state = IPROTO_CONNECTION_CLOSED; } else if (con->state == IPROTO_CONNECTION_PENDING_DESTROY) { iproto_connection_try_to_start_destroy(con); @@ -1091,6 +1099,56 @@ iproto_connection_on_output(ev_loop *loop, struct ev_io *watcher, } } +/** + * Flush greeting from it's buffer. It requires separate flush function + * because greeting is being sent from iproto thread which can't use obuf + * and thus we can send greeting without involving tx. + */ +static int +iproto_connection_greeting_flush(struct iproto_connection *con) +{ + struct iovec greeting; + greeting.iov_base = &con->greeting_buf; + greeting.iov_len = IPROTO_GREETING_SIZE; + ssize_t nwr = sio_writev(con->output.fd, &greeting, 1); + + if (nwr > 0) { + /* Count statistics */ + rmean_collect(rmean_net, IPROTO_SENT, nwr); + if (nwr < IPROTO_GREETING_SIZE) { + diag_set(SocketError, sio_socketname(con->output.fd), + "partial write %d", nwr); + return -1; + } + return 1; + } else if (nwr < 0 && !sio_wouldblock(errno)) { + return -1; + } + + return 0; +} + +static void +iproto_connection_on_greeting(ev_loop *loop, struct ev_io *watcher, + int /* revents */) +{ + struct iproto_connection *con = + (struct iproto_connection *)watcher->data; + int rc = iproto_connection_greeting_flush(con); + if (rc <= 0) { + if (rc == 0) { + ev_io_start(loop, &con->output); + } else { + diag_log(); + con->state = IPROTO_CONNECTION_FAILED; + } + return; + } + ev_io_stop(con->loop, &con->output); + ev_io_init(&con->output, iproto_connection_on_output, + con->output.fd, EV_WRITE); +} + static struct iproto_connection * iproto_connection_new(int fd) { @@ -1103,7 +1161,7 @@ iproto_connection_new(int fd) con->input.data = con->output.data = con; con->loop = loop(); ev_io_init(&con->input, iproto_connection_on_input, fd, EV_READ); - ev_io_init(&con->output, iproto_connection_on_output, fd, EV_WRITE); + ev_io_init(&con->output, iproto_connection_on_greeting, fd, EV_WRITE); ibuf_create(&con->ibuf[0], cord_slab_cache(), iproto_readahead); ibuf_create(&con->ibuf[1], cord_slab_cache(), iproto_readahead); obuf_create(&con->obuf[0], &net_slabc, iproto_readahead); @@ -1938,50 +1996,63 @@ net_end_subscribe(struct cmsg *m) } /** - * Handshake a connection: invoke the on-connect trigger - * and possibly authenticate. Try to send the client an error - * upon a failure. + * Handshake a connection: send greeting for it. + */ +static void +iproto_process_connect(struct iproto_connection *con) +{ + /* + * INSTANCE_UUID is guaranteed to be inited before this moment. + * We start listening either in local_recovery() or bootstrap(). + * The INSTANCE_UUID is ensured to be inited in the beginning of + * both methods. In case of local_recovery() it is verified that + * INSTANCE_UUID was read from the snapshot in memtx_engine_new(). + * In bootstrap() INSTANCE_UUID is either taken from the + * instance_uuid box.cfg{} param or created on the spot. + */ + random_bytes(con->salt, IPROTO_SALT_SIZE); + greeting_encode(con->greeting_buf, tarantool_version_id(), + &INSTANCE_UUID, con->salt, IPROTO_SALT_SIZE); + assert(evio_has_fd(&con->output)); + ev_feed_event(con->loop, &con->output, EV_WRITE); +} + +/** + * Create the session and invoke the on_connect triggers. */ static void tx_process_connect(struct cmsg *m) { struct iproto_msg *msg = (struct iproto_msg *) m; struct iproto_connection *con = msg->connection; - struct obuf *out = msg->connection->tx.p_obuf; - try { /* connect. */ - con->session = session_create(SESSION_TYPE_BINARY); - if (con->session == NULL) - diag_raise(); - con->session->meta.connection = con; - tx_fiber_init(con->session, 0); - char *greeting = (char *) static_alloc(IPROTO_GREETING_SIZE); - /* TODO: dirty read from tx thread */ - struct tt_uuid uuid = INSTANCE_UUID; - random_bytes(con->salt, IPROTO_SALT_SIZE); - greeting_encode(greeting, tarantool_version_id(), &uuid, - con->salt, IPROTO_SALT_SIZE); - obuf_dup_xc(out, greeting, IPROTO_GREETING_SIZE); - if (! rlist_empty(&session_on_connect)) { - if (session_run_on_connect_triggers(con->session) != 0) - diag_raise(); - } - iproto_wpos_create(&msg->wpos, out); - } catch (Exception *e) { + + con->session = session_create(SESSION_TYPE_BINARY); + if (con->session == NULL) { tx_reply_error(msg); msg->close_connection = true; + return; + } + con->session->meta.connection = con; + + tx_fiber_init(con->session, 0); + if (! rlist_empty(&session_on_connect)) { + if (session_run_on_connect_triggers(con->session) != 0) { + tx_reply_error(msg); + msg->close_connection = true; + } } } /** - * Send a response to connect to the client or close the - * connection in case on_connect trigger failed. + * Try to send the client an error upon a failure. Start reading + * input in case the connection is inited and all good. */ static void -net_send_greeting(struct cmsg *m) +net_finish_connect(struct cmsg *m) { struct iproto_msg *msg = (struct iproto_msg *) m; struct iproto_connection *con = msg->connection; - if (msg->close_connection) { + if (msg->close_connection || con->state == IPROTO_CONNECTION_FAILED) { struct obuf *out = msg->wpos.obuf; int64_t nwr = sio_writev(con->output.fd, out->iov, obuf_iovcnt(out)); @@ -2005,13 +2076,13 @@ net_send_greeting(struct cmsg *m) */ assert(evio_has_fd(&con->output)); /* Handshake OK, start reading input. */ - ev_feed_event(con->loop, &con->output, EV_WRITE); + ev_feed_event(con->loop, &con->input, EV_READ); iproto_msg_delete(msg); } static const struct cmsg_hop connect_route[] = { { tx_process_connect, &net_pipe }, - { net_send_greeting, NULL }, + { net_finish_connect, NULL }, }; /** }}} */ @@ -2042,6 +2113,7 @@ iproto_on_accept(struct evio_service * /* service */, int fd, cmsg_init(&msg->base, connect_route); msg->p_ibuf = con->p_ibuf; msg->wpos = con->wpos; + iproto_process_connect(con); cpipe_push(&tx_pipe, &msg->base); return 0; } diff --git a/test/app/gh-4787-netbox-empty-errmsg.result b/test/app/gh-4787-netbox-empty-errmsg.result index d30337a050..6389b27bc8 100644 --- a/test/app/gh-4787-netbox-empty-errmsg.result +++ b/test/app/gh-4787-netbox-empty-errmsg.result @@ -38,24 +38,6 @@ req_during_auth() | - Connection is not established, state is "auth" | ... --- Check the same for 'initial' state. -ok, err = nil - | --- - | ... -do \ - c = netbox.connect(box.cfg.listen, {wait_connected = false}) \ - ok, err = pcall(c.call, c, 'echo', {}, {is_async = true}) \ -end - | --- - | ... -ok, err - | --- - | - false - | - Connection is not established, state is "initial" - | ... -c:close() - | --- - | ... box.schema.user.drop('test') | --- | ... diff --git a/test/app/gh-4787-netbox-empty-errmsg.test.lua b/test/app/gh-4787-netbox-empty-errmsg.test.lua index 0eecaa1bf0..55ea43f26f 100755 --- a/test/app/gh-4787-netbox-empty-errmsg.test.lua +++ b/test/app/gh-4787-netbox-empty-errmsg.test.lua @@ -21,12 +21,4 @@ end req_during_auth() --- Check the same for 'initial' state. -ok, err = nil -do \ - c = netbox.connect(box.cfg.listen, {wait_connected = false}) \ - ok, err = pcall(c.call, c, 'echo', {}, {is_async = true}) \ -end -ok, err -c:close() box.schema.user.drop('test') -- 2.17.1