Tarantool development patches archive
 help / color / mirror / Atom feed
From: Vladimir Davydov <vdavydov.dev@gmail.com>
To: kostja@tarantool.org
Cc: tarantool-patches@freelists.org
Subject: [PATCH 12/25] vinyl: rename some members of vy_scheduler and vy_task struct
Date: Fri, 27 Jul 2018 14:29:52 +0300	[thread overview]
Message-ID: <771564cf0e9f11dd5fbe6ec36e390a8e896342a4.1532689066.git.vdavydov.dev@gmail.com> (raw)
In-Reply-To: <cover.1532689065.git.vdavydov.dev@gmail.com>
In-Reply-To: <cover.1532689065.git.vdavydov.dev@gmail.com>

I'm planning to add some new members and remove some old members from
those structs. For this to play nicely, let's do some renames:

  vy_scheduler::workers_available => idle_worker_count
  vy_scheduler::input_queue       => pending_tasks
  vy_scheduler::output_queue      => processed_tasks
  vy_task::link                   => in_pending, in_processed
---
 src/box/vy_scheduler.c | 50 ++++++++++++++++++++++++++------------------------
 src/box/vy_scheduler.h | 10 +++++-----
 2 files changed, 31 insertions(+), 29 deletions(-)

diff --git a/src/box/vy_scheduler.c b/src/box/vy_scheduler.c
index 6a6fa1fc..1ae6dd02 100644
--- a/src/box/vy_scheduler.c
+++ b/src/box/vy_scheduler.c
@@ -120,17 +120,16 @@ struct vy_task {
 	 */
 	struct vy_slice *first_slice, *last_slice;
 	/**
-	 * Link in the list of pending or processed tasks.
-	 * See vy_scheduler::input_queue, output_queue.
-	 */
-	struct stailq_entry link;
-	/**
 	 * Index options may be modified while a task is in
 	 * progress so we save them here to safely access them
 	 * from another thread.
 	 */
 	double bloom_fpr;
 	int64_t page_size;
+	/** Link in vy_scheduler::pending_tasks. */
+	struct stailq_entry in_pending;
+	/** Link in vy_scheduler::processed_tasks. */
+	struct stailq_entry in_processed;
 };
 
 /**
@@ -259,7 +258,7 @@ vy_scheduler_start_workers(struct vy_scheduler *scheduler)
 	assert(scheduler->worker_pool_size >= 2);
 
 	scheduler->is_worker_pool_running = true;
-	scheduler->workers_available = scheduler->worker_pool_size;
+	scheduler->idle_worker_count = scheduler->worker_pool_size;
 	scheduler->worker_pool = calloc(scheduler->worker_pool_size,
 					sizeof(struct cord));
 	if (scheduler->worker_pool == NULL)
@@ -318,8 +317,8 @@ vy_scheduler_create(struct vy_scheduler *scheduler, int write_threads,
 	scheduler->worker_pool_size = write_threads;
 	mempool_create(&scheduler->task_pool, cord_slab_cache(),
 		       sizeof(struct vy_task));
-	stailq_create(&scheduler->input_queue);
-	stailq_create(&scheduler->output_queue);
+	stailq_create(&scheduler->pending_tasks);
+	stailq_create(&scheduler->processed_tasks);
 
 	tt_pthread_cond_init(&scheduler->worker_cond, NULL);
 	tt_pthread_mutex_init(&scheduler->mutex, NULL);
@@ -1422,7 +1421,7 @@ vy_schedule(struct vy_scheduler *scheduler, struct vy_task **ptask)
 	if (*ptask != NULL)
 		return 0;
 
-	if (scheduler->workers_available <= 1) {
+	if (scheduler->idle_worker_count <= 1) {
 		/*
 		 * If all worker threads are busy doing compaction
 		 * when we run out of quota, ongoing transactions will
@@ -1501,26 +1500,27 @@ vy_scheduler_f(va_list va)
 	vy_scheduler_start_workers(scheduler);
 
 	while (scheduler->scheduler_fiber != NULL) {
-		struct stailq output_queue;
+		struct stailq processed_tasks;
 		struct vy_task *task, *next;
 		int tasks_failed = 0, tasks_done = 0;
 		bool was_empty;
 
 		/* Get the list of processed tasks. */
-		stailq_create(&output_queue);
+		stailq_create(&processed_tasks);
 		tt_pthread_mutex_lock(&scheduler->mutex);
-		stailq_concat(&output_queue, &scheduler->output_queue);
+		stailq_concat(&processed_tasks, &scheduler->processed_tasks);
 		tt_pthread_mutex_unlock(&scheduler->mutex);
 
 		/* Complete and delete all processed tasks. */
-		stailq_foreach_entry_safe(task, next, &output_queue, link) {
+		stailq_foreach_entry_safe(task, next, &processed_tasks,
+					  in_processed) {
 			if (vy_task_complete(task) != 0)
 				tasks_failed++;
 			else
 				tasks_done++;
 			vy_task_delete(task);
-			scheduler->workers_available++;
-			assert(scheduler->workers_available <=
+			scheduler->idle_worker_count++;
+			assert(scheduler->idle_worker_count <=
 			       scheduler->worker_pool_size);
 		}
 		/*
@@ -1534,7 +1534,7 @@ vy_scheduler_f(va_list va)
 			 * opens a time window for a worker to submit
 			 * a processed task and wake up the scheduler
 			 * (via scheduler_async). Hence we should go
-			 * and recheck the output_queue in order not
+			 * and recheck the processed_tasks in order not
 			 * to lose a wakeup event and hang for good.
 			 */
 			continue;
@@ -1543,7 +1543,7 @@ vy_scheduler_f(va_list va)
 		if (tasks_failed > 0)
 			goto error;
 		/* All worker threads are busy. */
-		if (scheduler->workers_available == 0)
+		if (scheduler->idle_worker_count == 0)
 			goto wait;
 		/* Get a task to schedule. */
 		if (vy_schedule(scheduler, &task) != 0)
@@ -1554,13 +1554,14 @@ vy_scheduler_f(va_list va)
 
 		/* Queue the task and notify workers if necessary. */
 		tt_pthread_mutex_lock(&scheduler->mutex);
-		was_empty = stailq_empty(&scheduler->input_queue);
-		stailq_add_tail_entry(&scheduler->input_queue, task, link);
+		was_empty = stailq_empty(&scheduler->pending_tasks);
+		stailq_add_tail_entry(&scheduler->pending_tasks,
+				      task, in_pending);
 		if (was_empty)
 			tt_pthread_cond_signal(&scheduler->worker_cond);
 		tt_pthread_mutex_unlock(&scheduler->mutex);
 
-		scheduler->workers_available--;
+		scheduler->idle_worker_count--;
 		fiber_reschedule();
 		continue;
 error:
@@ -1605,7 +1606,7 @@ vy_worker_f(void *arg)
 	tt_pthread_mutex_lock(&scheduler->mutex);
 	while (scheduler->is_worker_pool_running) {
 		/* Wait for a task */
-		if (stailq_empty(&scheduler->input_queue)) {
+		if (stailq_empty(&scheduler->pending_tasks)) {
 			/* Wake scheduler up if there are no more tasks */
 			ev_async_send(scheduler->scheduler_loop,
 				      &scheduler->scheduler_async);
@@ -1613,8 +1614,8 @@ vy_worker_f(void *arg)
 					     &scheduler->mutex);
 			continue;
 		}
-		task = stailq_shift_entry(&scheduler->input_queue,
-					  struct vy_task, link);
+		task = stailq_shift_entry(&scheduler->pending_tasks,
+					  struct vy_task, in_pending);
 		tt_pthread_mutex_unlock(&scheduler->mutex);
 		assert(task != NULL);
 
@@ -1628,7 +1629,8 @@ vy_worker_f(void *arg)
 
 		/* Return processed task to scheduler */
 		tt_pthread_mutex_lock(&scheduler->mutex);
-		stailq_add_tail_entry(&scheduler->output_queue, task, link);
+		stailq_add_tail_entry(&scheduler->processed_tasks,
+				      task, in_processed);
 	}
 	tt_pthread_mutex_unlock(&scheduler->mutex);
 	return NULL;
diff --git a/src/box/vy_scheduler.h b/src/box/vy_scheduler.h
index 777756c0..284f666e 100644
--- a/src/box/vy_scheduler.h
+++ b/src/box/vy_scheduler.h
@@ -77,13 +77,13 @@ struct vy_scheduler {
 	/** Total number of worker threads. */
 	int worker_pool_size;
 	/** Number worker threads that are currently idle. */
-	int workers_available;
+	int idle_worker_count;
 	/** Memory pool used for allocating vy_task objects. */
 	struct mempool task_pool;
-	/** Queue of pending tasks, linked by vy_task::link. */
-	struct stailq input_queue;
-	/** Queue of processed tasks, linked by vy_task::link. */
-	struct stailq output_queue;
+	/** Queue of pending tasks, linked by vy_task::in_pending. */
+	struct stailq pending_tasks;
+	/** Queue of processed tasks, linked by vy_task::in_processed. */
+	struct stailq processed_tasks;
 	/**
 	 * Signaled to wake up a worker when there is
 	 * a pending task in the input queue. Also used
-- 
2.11.0

  parent reply	other threads:[~2018-07-27 11:29 UTC|newest]

Thread overview: 39+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-07-27 11:29 [PATCH 00/25] vinyl: eliminate disk read on REPLACE/DELETE Vladimir Davydov
2018-07-27 11:29 ` [PATCH 01/25] vinyl: make point lookup always return the latest tuple version Vladimir Davydov
2018-07-27 11:29 ` [PATCH 02/25] vinyl: simplify vy_squash_process Vladimir Davydov
2018-07-27 11:29 ` [PATCH 03/25] vinyl: always get full tuple from pk after reading from secondary index Vladimir Davydov
2018-07-27 11:29 ` [PATCH 04/25] vinyl: fold vy_replace_one and vy_replace_impl Vladimir Davydov
2018-07-27 11:29 ` [PATCH 05/25] vinyl: fold vy_delete_impl Vladimir Davydov
2018-07-27 11:29 ` [PATCH 06/25] vinyl: refactor unique check Vladimir Davydov
2018-07-27 11:29 ` [PATCH 07/25] vinyl: check key uniqueness before modifying tx write set Vladimir Davydov
2018-07-27 11:29 ` [PATCH 08/25] vinyl: remove env argument of vy_check_is_unique_{primary,secondary} Vladimir Davydov
2018-07-31 20:45   ` [tarantool-patches] " Konstantin Osipov
2018-07-27 11:29 ` [PATCH 09/25] vinyl: store full tuples in secondary index cache Vladimir Davydov
2018-07-31 20:47   ` Konstantin Osipov
2018-07-27 11:29 ` [PATCH 10/25] vinyl: do not free pending tasks on shutdown Vladimir Davydov
2018-07-31 20:48   ` Konstantin Osipov
2018-07-27 11:29 ` [PATCH 11/25] vinyl: store pointer to scheduler in struct vy_task Vladimir Davydov
2018-07-31 20:49   ` Konstantin Osipov
2018-07-27 11:29 ` Vladimir Davydov [this message]
2018-07-27 11:29 ` [PATCH 13/25] vinyl: use cbus for communication between scheduler and worker threads Vladimir Davydov
2018-07-27 11:29 ` [PATCH 14/25] vinyl: zap vy_scheduler::is_worker_pool_running Vladimir Davydov
2018-07-27 11:29 ` [PATCH 15/25] vinyl: rename vy_task::status to is_failed Vladimir Davydov
2018-07-27 11:29 ` [PATCH 16/25] xrow: allow to store flags in DML requests Vladimir Davydov
2018-07-27 11:29 ` [PATCH 17/25] vinyl: pin last statement returned by write iterator explicitly Vladimir Davydov
2018-07-27 11:29 ` [PATCH 18/25] vinyl: teach write iterator to return overwritten tuples Vladimir Davydov
2018-07-27 11:29 ` [PATCH 19/25] vinyl: prepare write iterator heap comparator for deferred DELETEs Vladimir Davydov
2018-07-27 11:30 ` [PATCH 20/25] vinyl: allow to skip certain statements on read Vladimir Davydov
2018-07-27 11:30 ` [PATCH 21/25] vinyl: add function to create surrogate deletes from raw msgpack Vladimir Davydov
2018-07-27 11:30 ` [PATCH 22/25] vinyl: remove pointless assertion from vy_stmt_new_surrogate_delete Vladimir Davydov
2018-07-27 11:30 ` [PATCH 23/25] txn: add helper to detect transaction boundaries Vladimir Davydov
2018-07-31 20:52   ` [tarantool-patches] " Konstantin Osipov
2018-07-27 11:30 ` [PATCH 24/25] Introduce _vinyl_deferred_delete system space Vladimir Davydov
2018-07-31 20:54   ` Konstantin Osipov
2018-08-01 14:00     ` Vladimir Davydov
2018-08-01 20:25       ` [tarantool-patches] " Konstantin Osipov
2018-08-02  9:43         ` Vladimir Davydov
2018-08-06  8:42           ` Vladimir Davydov
2018-07-27 11:30 ` [PATCH 25/25] vinyl: eliminate disk read on REPLACE/DELETE Vladimir Davydov
2018-07-31 20:55   ` Konstantin Osipov
2018-08-01 16:03     ` Vladimir Davydov
2018-08-01 16:51     ` Vladimir Davydov

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=771564cf0e9f11dd5fbe6ec36e390a8e896342a4.1532689066.git.vdavydov.dev@gmail.com \
    --to=vdavydov.dev@gmail.com \
    --cc=kostja@tarantool.org \
    --cc=tarantool-patches@freelists.org \
    --subject='Re: [PATCH 12/25] vinyl: rename some members of vy_scheduler and vy_task struct' \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox