[PATCH] vinyl: rename vy_index to vy_lsm

Vladimir Davydov vdavydov.dev at gmail.com
Fri Mar 23 18:50:11 MSK 2018


Vinyl assigns a unique id to each index so that it can be identified in
vylog, see vy_index->id, but outside Vinyl index id means something
completely different - it's the ordinal number of the index in a space.
This creates a lot of confusion. To resolve this, let's rename vy_index
to vy_lsm and refer to Vinyl indexes as LSM trees in comments so that
Vinyl's index_id turns into lsm_id.

Also, rename vy_log_record's index_def_id and space_def_id back to
conventional index_id and space_id as they doesn't conflict with Vinyl
index id anymore (which is now lsm_id).

Thanks to @Gerold103 for suggesting the new name.
---
 src/box/CMakeLists.txt           |   2 +-
 src/box/vinyl.c                  | 665 +++++++++++++++++++--------------------
 src/box/vy_cache.h               |   2 +-
 src/box/vy_log.c                 | 401 ++++++++++++-----------
 src/box/vy_log.h                 | 144 ++++-----
 src/box/{vy_index.c => vy_lsm.c} | 636 ++++++++++++++++++-------------------
 src/box/{vy_index.h => vy_lsm.h} | 270 ++++++++--------
 src/box/vy_mem.c                 |   2 +-
 src/box/vy_point_lookup.c        |  98 +++---
 src/box/vy_point_lookup.h        |   8 +-
 src/box/vy_range.h               |  15 +-
 src/box/vy_read_iterator.c       | 117 ++++---
 src/box/vy_read_iterator.h       |  16 +-
 src/box/vy_read_set.c            |  24 +-
 src/box/vy_read_set.h            |  46 +--
 src/box/vy_run.c                 |   5 +-
 src/box/vy_run.h                 |  14 +-
 src/box/vy_scheduler.c           | 418 ++++++++++++------------
 src/box/vy_scheduler.h           |  18 +-
 src/box/vy_stat.h                |  14 +-
 src/box/vy_tx.c                  | 152 +++++----
 src/box/vy_tx.h                  |  33 +-
 test/unit/CMakeLists.txt         |   2 +-
 test/unit/vy_log_stub.c          |   6 +-
 test/unit/vy_point_lookup.c      |  37 ++-
 test/unit/vy_point_lookup.result |   6 +-
 26 files changed, 1565 insertions(+), 1586 deletions(-)
 rename src/box/{vy_index.c => vy_lsm.c} (56%)
 rename src/box/{vy_index.h => vy_lsm.h} (59%)

diff --git a/src/box/CMakeLists.txt b/src/box/CMakeLists.txt
index fb6602b0..598b596a 100644
--- a/src/box/CMakeLists.txt
+++ b/src/box/CMakeLists.txt
@@ -75,7 +75,7 @@ add_library(box STATIC
     vy_mem.c
     vy_run.c
     vy_range.c
-    vy_index.c
+    vy_lsm.c
     vy_tx.c
     vy_write_iterator.c
     vy_read_iterator.c
diff --git a/src/box/vinyl.c b/src/box/vinyl.c
index ac12a331..f782ee64 100644
--- a/src/box/vinyl.c
+++ b/src/box/vinyl.c
@@ -33,7 +33,7 @@
 #include "vy_mem.h"
 #include "vy_run.h"
 #include "vy_range.h"
-#include "vy_index.h"
+#include "vy_lsm.h"
 #include "vy_tx.h"
 #include "vy_cache.h"
 #include "vy_log.h"
@@ -130,8 +130,8 @@ struct vy_env {
 	 * best result among 10% worst measurements.
 	 */
 	struct histogram *dump_bw;
-	/** Common index environment. */
-	struct vy_index_env index_env;
+	/** Common LSM tree environment. */
+	struct vy_lsm_env lsm_env;
 	/** Environment for cache subsystem */
 	struct vy_cache_env cache_env;
 	/** Environment for run subsystem */
@@ -204,15 +204,15 @@ vy_env(struct engine *engine)
 
 struct vinyl_index {
 	struct index base;
-	/** Vinyl index implementation. */
-	struct vy_index *db;
+	/** LSM tree that stores index data. */
+	struct vy_lsm *lsm;
 };
 
-/** Extract vy_index from an index object. */
-struct vy_index *
-vy_index(struct index *index)
+/** Extract vy_lsm from an index object. */
+struct vy_lsm *
+vy_lsm(struct index *index)
 {
-	return ((struct vinyl_index *)index)->db;
+	return ((struct vinyl_index *)index)->lsm;
 }
 
 /** Mask passed to vy_gc(). */
@@ -231,8 +231,8 @@ struct vinyl_iterator {
 	struct iterator base;
 	/** Vinyl environment. */
 	struct vy_env *env;
-	/** Vinyl index this iterator is for. */
-	struct vy_index *index;
+	/** LSM tree this iterator is for. */
+	struct vy_lsm *lsm;
 	/**
 	 * Points either to tx_autocommit for autocommit mode
 	 * or to a multi-statement transaction active when the
@@ -396,12 +396,12 @@ vy_info_append_compact_stat(struct info_handler *h, const char *name,
 }
 
 static void
-vinyl_index_info(struct index *base, struct info_handler *h)
+vinyl_index_info(struct index *index, struct info_handler *h)
 {
 	char buf[1024];
-	struct vy_index *index = vy_index(base);
-	struct vy_index_stat *stat = &index->stat;
-	struct vy_cache_stat *cache_stat = &index->cache.stat;
+	struct vy_lsm *lsm = vy_lsm(index);
+	struct vy_lsm_stat *stat = &lsm->stat;
+	struct vy_cache_stat *cache_stat = &lsm->cache.stat;
 
 	info_begin(h);
 
@@ -425,7 +425,7 @@ vinyl_index_info(struct index *base, struct info_handler *h)
 	info_append_int(h, "lookup", stat->memory.iterator.lookup);
 	vy_info_append_stmt_counter(h, "get", &stat->memory.iterator.get);
 	info_table_end(h);
-	info_append_int(h, "index_size", vy_index_mem_tree_size(index));
+	info_append_int(h, "index_size", vy_lsm_mem_tree_size(lsm));
 	info_table_end(h);
 
 	info_table_begin(h, "disk");
@@ -441,8 +441,8 @@ vinyl_index_info(struct index *base, struct info_handler *h)
 	info_table_end(h);
 	vy_info_append_compact_stat(h, "dump", &stat->disk.dump);
 	vy_info_append_compact_stat(h, "compact", &stat->disk.compact);
-	info_append_int(h, "index_size", index->page_index_size);
-	info_append_int(h, "bloom_size", index->bloom_size);
+	info_append_int(h, "index_size", lsm->page_index_size);
+	info_append_int(h, "bloom_size", lsm->bloom_size);
 	info_table_end(h);
 
 	info_table_begin(h, "cache");
@@ -453,7 +453,7 @@ vinyl_index_info(struct index *base, struct info_handler *h)
 	vy_info_append_stmt_counter(h, "invalidate", &cache_stat->invalidate);
 	vy_info_append_stmt_counter(h, "evict", &cache_stat->evict);
 	info_append_int(h, "index_size",
-			vy_cache_tree_mem_used(&index->cache.cache_tree));
+			vy_cache_tree_mem_used(&lsm->cache.cache_tree));
 	info_table_end(h);
 
 	info_table_begin(h, "txw");
@@ -464,21 +464,21 @@ vinyl_index_info(struct index *base, struct info_handler *h)
 	info_table_end(h);
 	info_table_end(h);
 
-	info_append_int(h, "range_count", index->range_count);
-	info_append_int(h, "run_count", index->run_count);
-	info_append_int(h, "run_avg", index->run_count / index->range_count);
-	histogram_snprint(buf, sizeof(buf), index->run_hist);
+	info_append_int(h, "range_count", lsm->range_count);
+	info_append_int(h, "run_count", lsm->run_count);
+	info_append_int(h, "run_avg", lsm->run_count / lsm->range_count);
+	histogram_snprint(buf, sizeof(buf), lsm->run_hist);
 	info_append_str(h, "run_histogram", buf);
 
 	info_end(h);
 }
 
 static void
-vinyl_index_reset_stat(struct index *base)
+vinyl_index_reset_stat(struct index *index)
 {
-	struct vy_index *index = vy_index(base);
-	struct vy_index_stat *stat = &index->stat;
-	struct vy_cache_stat *cache_stat = &index->cache.stat;
+	struct vy_lsm *lsm = vy_lsm(index);
+	struct vy_lsm_stat *stat = &lsm->stat;
+	struct vy_cache_stat *cache_stat = &lsm->cache.stat;
 
 	stat->lookup = 0;
 	latency_reset(&stat->latency);
@@ -507,8 +507,8 @@ vinyl_engine_memory_stat(struct engine *engine, struct engine_memory_stat *stat)
 	stat->data += lsregion_used(&env->mem_env.allocator) -
 				env->mem_env.tree_extent_size;
 	stat->index += env->mem_env.tree_extent_size;
-	stat->index += env->index_env.bloom_size;
-	stat->index += env->index_env.page_index_size;
+	stat->index += env->lsm_env.bloom_size;
+	stat->index += env->lsm_env.page_index_size;
 	stat->cache += env->cache_env.mem_used;
 	stat->tx += env->xm->write_set_size + env->xm->read_set_size;
 	mempool_stats(&env->xm->tx_mempool, &mstats);
@@ -553,31 +553,31 @@ vinyl_check_wal(struct vy_env *env, const char *what)
 }
 
 /**
- * Given a space and an index id, return vy_index.
+ * Given a space and an index id, return vy_lsm.
  * If index not found, return NULL and set diag.
  */
-static struct vy_index *
-vy_index_find(struct space *space, uint32_t iid)
+static struct vy_lsm *
+vy_lsm_find(struct space *space, uint32_t iid)
 {
 	struct index *index = index_find(space, iid);
 	if (index == NULL)
 		return NULL;
-	return vy_index(index);
+	return vy_lsm(index);
 }
 
 /**
- * Wrapper around vy_index_find() which ensures that
+ * Wrapper around vy_lsm_find() which ensures that
  * the found index is unique.
  */
-static  struct vy_index *
-vy_index_find_unique(struct space *space, uint32_t index_id)
+static  struct vy_lsm *
+vy_lsm_find_unique(struct space *space, uint32_t index_id)
 {
-	struct vy_index *index = vy_index_find(space, index_id);
-	if (index != NULL && !index->opts.is_unique) {
+	struct vy_lsm *lsm = vy_lsm_find(space, index_id);
+	if (lsm != NULL && !lsm->opts.is_unique) {
 		diag_set(ClientError, ER_MORE_THAN_ONE_TUPLE);
 		return NULL;
 	}
-	return index;
+	return lsm;
 }
 
 static int
@@ -684,39 +684,39 @@ vinyl_space_create_index(struct space *space, struct index_def *index_def)
 		return NULL;
 	}
 	struct vy_env *env = vinyl->env;
-	struct vy_index *pk = NULL;
+	struct vy_lsm *pk = NULL;
 	if (index_def->iid > 0) {
-		pk = vy_index(space_index(space, 0));
+		pk = vy_lsm(space_index(space, 0));
 		assert(pk != NULL);
 	}
-	struct vy_index *db = vy_index_new(&env->index_env, &env->cache_env,
-					   &env->mem_env, index_def,
-					   space->format, pk);
-	if (db == NULL) {
+	struct vy_lsm *lsm = vy_lsm_new(&env->lsm_env, &env->cache_env,
+					&env->mem_env, index_def,
+					space->format, pk);
+	if (lsm == NULL) {
 		free(index);
 		return NULL;
 	}
 	if (index_create(&index->base, (struct engine *)vinyl,
 			 &vinyl_index_vtab, index_def) != 0) {
-		vy_index_delete(db);
+		vy_lsm_delete(lsm);
 		free(index);
 		return NULL;
 	}
-	index->db = db;
+	index->lsm = lsm;
 	return &index->base;
 }
 
 static void
-vinyl_index_destroy(struct index *base)
+vinyl_index_destroy(struct index *index)
 {
-	struct vy_index *index = vy_index(base);
+	struct vy_lsm *lsm = vy_lsm(index);
 	/*
-	 * There still may be a task scheduled for this index
+	 * There still may be a task scheduled for this LSM tree
 	 * so postpone actual deletion until the last reference
 	 * is gone.
 	 */
-	vy_index_unref(index);
-	free(base);
+	vy_lsm_unref(lsm);
+	free(index);
 }
 
 /**
@@ -726,8 +726,11 @@ vinyl_index_destroy(struct index *base)
  * account.
  */
 static int
-vy_index_open(struct vy_env *env, struct vy_index *index)
+vinyl_index_open(struct index *index)
 {
+	struct vy_env *env = vy_env(index->engine);
+	struct vy_lsm *lsm = vy_lsm(index);
+
 	/* Ensure vinyl data directory exists. */
 	if (access(env->path, F_OK) != 0) {
 		diag_set(SystemError, "can not access vinyl data directory");
@@ -740,7 +743,7 @@ vy_index_open(struct vy_env *env, struct vy_index *index)
 		 * The recovery is complete, simply
 		 * create a new index.
 		 */
-		rc = vy_index_create(index);
+		rc = vy_lsm_create(lsm);
 		if (rc == 0) {
 			/* Make sure reader threads are up and running. */
 			vy_run_env_enable_coio(&env->run_env,
@@ -754,7 +757,7 @@ vy_index_open(struct vy_env *env, struct vy_index *index)
 		 * exist locally, and we should create the
 		 * index directory from scratch.
 		 */
-		rc = vy_index_create(index);
+		rc = vy_lsm_create(lsm);
 		break;
 	case VINYL_INITIAL_RECOVERY_LOCAL:
 	case VINYL_FINAL_RECOVERY_LOCAL:
@@ -764,10 +767,10 @@ vy_index_open(struct vy_env *env, struct vy_index *index)
 		 * have already been created, so try to load
 		 * the index files from it.
 		 */
-		rc = vy_index_recover(index, env->recovery, &env->run_env,
-				vclock_sum(env->recovery_vclock),
-				env->status == VINYL_INITIAL_RECOVERY_LOCAL,
-				env->force_recovery);
+		rc = vy_lsm_recover(lsm, env->recovery, &env->run_env,
+				    vclock_sum(env->recovery_vclock),
+				    env->status == VINYL_INITIAL_RECOVERY_LOCAL,
+				    env->force_recovery);
 		break;
 	default:
 		unreachable();
@@ -776,12 +779,12 @@ vy_index_open(struct vy_env *env, struct vy_index *index)
 }
 
 static void
-vinyl_index_commit_create(struct index *base, int64_t lsn)
+vinyl_index_commit_create(struct index *index, int64_t lsn)
 {
-	struct vy_env *env = vy_env(base->engine);
-	struct vy_index *index = vy_index(base);
+	struct vy_env *env = vy_env(index->engine);
+	struct vy_lsm *lsm = vy_lsm(index);
 
-	assert(index->id >= 0);
+	assert(lsm->id >= 0);
 
 	if (env->status == VINYL_INITIAL_RECOVERY_LOCAL ||
 	    env->status == VINYL_FINAL_RECOVERY_LOCAL) {
@@ -793,8 +796,8 @@ vinyl_index_commit_create(struct index *base, int64_t lsn)
 		 * the index isn't in the recovery context and we
 		 * need to retry to log it now.
 		 */
-		if (index->is_committed) {
-			vy_scheduler_add_index(&env->scheduler, index);
+		if (lsm->is_committed) {
+			vy_scheduler_add_lsm(&env->scheduler, lsm);
 			return;
 		}
 	}
@@ -815,14 +818,14 @@ vinyl_index_commit_create(struct index *base, int64_t lsn)
 	 * index by 1. So for legacy indexes use the LSN from
 	 * index options.
 	 */
-	if (index->opts.lsn != 0)
-		lsn = index->opts.lsn;
+	if (lsm->opts.lsn != 0)
+		lsn = lsm->opts.lsn;
 
-	assert(!index->is_committed);
-	index->is_committed = true;
+	assert(!lsm->is_committed);
+	lsm->is_committed = true;
 
-	assert(index->range_count == 1);
-	struct vy_range *range = vy_range_tree_first(index->tree);
+	assert(lsm->range_count == 1);
+	struct vy_range *range = vy_range_tree_first(lsm->tree);
 
 	/*
 	 * Since it's too late to fail now, in case of vylog write
@@ -833,27 +836,27 @@ vinyl_index_commit_create(struct index *base, int64_t lsn)
 	 * recovery.
 	 */
 	vy_log_tx_begin();
-	vy_log_create_index(index->id, index->space_id, index->index_id,
-			    index->key_def, lsn);
-	vy_log_insert_range(index->id, range->id, NULL, NULL);
+	vy_log_create_lsm(lsm->id, lsm->space_id, lsm->index_id,
+			  lsm->key_def, lsn);
+	vy_log_insert_range(lsm->id, range->id, NULL, NULL);
 	vy_log_tx_try_commit();
 	/*
 	 * After we committed the index in the log, we can schedule
 	 * a task for it.
 	 */
-	vy_scheduler_add_index(&env->scheduler, index);
+	vy_scheduler_add_lsm(&env->scheduler, lsm);
 }
 
 /*
- * Delete all runs, ranges, and slices of a given index
+ * Delete all runs, ranges, and slices of a given LSM tree
  * from the metadata log.
  */
 static void
-vy_log_index_prune(struct vy_index *index, int64_t gc_lsn)
+vy_log_lsm_prune(struct vy_lsm *lsm, int64_t gc_lsn)
 {
 	int loops = 0;
-	for (struct vy_range *range = vy_range_tree_first(index->tree);
-	     range != NULL; range = vy_range_tree_next(index->tree, range)) {
+	for (struct vy_range *range = vy_range_tree_first(lsm->tree);
+	     range != NULL; range = vy_range_tree_next(lsm->tree, range)) {
 		struct vy_slice *slice;
 		rlist_foreach_entry(slice, &range->slices, in_range)
 			vy_log_delete_slice(slice->id);
@@ -862,7 +865,7 @@ vy_log_index_prune(struct vy_index *index, int64_t gc_lsn)
 			fiber_sleep(0);
 	}
 	struct vy_run *run;
-	rlist_foreach_entry(run, &index->runs, in_index) {
+	rlist_foreach_entry(run, &lsm->runs, in_lsm) {
 		vy_log_drop_run(run->id, gc_lsn);
 		if (++loops % VY_YIELD_LOOPS == 0)
 			fiber_sleep(0);
@@ -870,12 +873,12 @@ vy_log_index_prune(struct vy_index *index, int64_t gc_lsn)
 }
 
 static void
-vinyl_index_commit_drop(struct index *base)
+vinyl_index_commit_drop(struct index *index)
 {
-	struct vy_env *env = vy_env(base->engine);
-	struct vy_index *index = vy_index(base);
+	struct vy_env *env = vy_env(index->engine);
+	struct vy_lsm *lsm = vy_lsm(index);
 
-	vy_scheduler_remove_index(&env->scheduler, index);
+	vy_scheduler_remove_lsm(&env->scheduler, lsm);
 
 	/*
 	 * We can't abort here, because the index drop request has
@@ -885,14 +888,14 @@ vinyl_index_commit_drop(struct index *base)
 	 * not flushed before the instance is shut down, we replay it
 	 * on local recovery from WAL.
 	 */
-	if (env->status == VINYL_FINAL_RECOVERY_LOCAL && index->is_dropped)
+	if (env->status == VINYL_FINAL_RECOVERY_LOCAL && lsm->is_dropped)
 		return;
 
-	index->is_dropped = true;
+	lsm->is_dropped = true;
 
 	vy_log_tx_begin();
-	vy_log_index_prune(index, checkpoint_last(NULL));
-	vy_log_drop_index(index->id);
+	vy_log_lsm_prune(lsm, checkpoint_last(NULL));
+	vy_log_drop_lsm(lsm->id);
 	vy_log_tx_try_commit();
 }
 
@@ -916,7 +919,7 @@ vinyl_space_prepare_alter(struct space *old_space, struct space *new_space)
 	 */
 	if (old_space->index_count == 0)
 		return 0;
-	struct vy_index *pk = vy_index(old_space->index[0]);
+	struct vy_lsm *pk = vy_lsm(old_space->index[0]);
 	/*
 	 * During WAL recovery, the space may be not empty. But we
 	 * open existing indexes, not creating new ones. Allow
@@ -984,7 +987,7 @@ vinyl_space_check_format(struct space *new_space, struct space *old_space)
 	/* @sa vy_prepare_alter_space for checks below. */
 	if (old_space->index_count == 0)
 		return 0;
-	struct vy_index *pk = vy_index(old_space->index[0]);
+	struct vy_lsm *pk = vy_lsm(old_space->index[0]);
 	if (env->status != VINYL_ONLINE)
 		return 0;
 	if (pk->stat.disk.count.rows == 0 && pk->stat.memory.count.rows == 0)
@@ -1002,7 +1005,7 @@ vinyl_space_commit_alter(struct space *old_space, struct space *new_space)
 		return; /* space drop */
 
 	struct tuple_format *new_format = new_space->format;
-	struct vy_index *pk = vy_index(new_space->index[0]);
+	struct vy_lsm *pk = vy_lsm(new_space->index[0]);
 	struct index_def *new_index_def = space_index_def(new_space, 0);
 
 	assert(pk->pk == NULL);
@@ -1038,33 +1041,32 @@ vinyl_space_commit_alter(struct space *old_space, struct space *new_space)
 	tuple_format_ref(format);
 	pk->mem_format = new_format;
 	tuple_format_ref(new_format);
-	vy_index_validate_formats(pk);
+	vy_lsm_validate_formats(pk);
 	key_def_update_optionality(pk->key_def, new_format->min_field_count);
 	key_def_update_optionality(pk->cmp_def, new_format->min_field_count);
 
 	for (uint32_t i = 1; i < new_space->index_count; ++i) {
-		struct vy_index *index = vy_index(new_space->index[i]);
-		vy_index_unref(index->pk);
-		vy_index_ref(pk);
-		index->pk = pk;
+		struct vy_lsm *lsm = vy_lsm(new_space->index[i]);
+		vy_lsm_unref(lsm->pk);
+		vy_lsm_ref(pk);
+		lsm->pk = pk;
 		new_index_def = space_index_def(new_space, i);
-		index->opts = new_index_def->opts;
-		index->check_is_unique = index->opts.is_unique;
-		tuple_format_unref(index->mem_format_with_colmask);
-		tuple_format_unref(index->mem_format);
-		tuple_format_unref(index->upsert_format);
-		index->mem_format_with_colmask =
-			pk->mem_format_with_colmask;
-		index->mem_format = pk->mem_format;
-		index->upsert_format = pk->upsert_format;
-		tuple_format_ref(index->mem_format_with_colmask);
-		tuple_format_ref(index->mem_format);
-		tuple_format_ref(index->upsert_format);
-		key_def_update_optionality(index->key_def,
+		lsm->opts = new_index_def->opts;
+		lsm->check_is_unique = lsm->opts.is_unique;
+		tuple_format_unref(lsm->mem_format_with_colmask);
+		tuple_format_unref(lsm->mem_format);
+		tuple_format_unref(lsm->upsert_format);
+		lsm->mem_format_with_colmask = pk->mem_format_with_colmask;
+		lsm->mem_format = pk->mem_format;
+		lsm->upsert_format = pk->upsert_format;
+		tuple_format_ref(lsm->mem_format_with_colmask);
+		tuple_format_ref(lsm->mem_format);
+		tuple_format_ref(lsm->upsert_format);
+		key_def_update_optionality(lsm->key_def,
 					   new_format->min_field_count);
-		key_def_update_optionality(index->cmp_def,
+		key_def_update_optionality(lsm->cmp_def,
 					   new_format->min_field_count);
-		vy_index_validate_formats(index);
+		vy_lsm_validate_formats(lsm);
 	}
 
 	/*
@@ -1075,14 +1077,14 @@ vinyl_space_commit_alter(struct space *old_space, struct space *new_space)
 	 * likelier to have a "colder" cache.
 	 */
 	for (int i = new_space->index_count - 1; i >= 0; i--) {
-		struct vy_index *index = vy_index(new_space->index[i]);
-		if (!index->check_is_unique)
+		struct vy_lsm *lsm = vy_lsm(new_space->index[i]);
+		if (!lsm->check_is_unique)
 			continue;
 		for (int j = 0; j < (int)new_space->index_count; j++) {
-			struct vy_index *other = vy_index(new_space->index[j]);
-			if (other != index && other->check_is_unique &&
-			    key_def_contains(index->key_def, other->key_def)) {
-				index->check_is_unique = false;
+			struct vy_lsm *other = vy_lsm(new_space->index[j]);
+			if (other != lsm && other->check_is_unique &&
+			    key_def_contains(lsm->key_def, other->key_def)) {
+				lsm->check_is_unique = false;
 				break;
 			}
 		}
@@ -1098,8 +1100,7 @@ fail:
 static int
 vinyl_space_add_primary_key(struct space *space)
 {
-	return vy_index_open(vy_env(space->engine),
-			     vy_index(space->index[0]));
+	return vinyl_index_open(space->index[0]);
 }
 
 static void
@@ -1135,8 +1136,7 @@ vinyl_space_build_secondary_key(struct space *old_space,
 	 *   Engine::buildSecondaryKey(old_space, new_space, new_index_arg);
 	 *  but aware of three cases mentioned above.
 	 */
-	return vy_index_open(vy_env(new_index->engine),
-			     vy_index(new_index));
+	return vinyl_index_open(new_index);
 }
 
 static size_t
@@ -1148,28 +1148,27 @@ vinyl_space_bsize(struct space *space)
 	 * primary indexes, it is basically the size of
 	 * binary data stored in this space's primary index.
 	 */
-	struct index *pk_base = space_index(space, 0);
-	if (pk_base == NULL)
+	struct index *pk = space_index(space, 0);
+	if (pk == NULL)
 		return 0;
-	struct vy_index *pk = vy_index(pk_base);
-	return pk->stat.memory.count.bytes + pk->stat.disk.count.bytes;
+	struct vy_lsm *lsm = vy_lsm(pk);
+	return lsm->stat.memory.count.bytes + lsm->stat.disk.count.bytes;
 }
 
 static ssize_t
-vinyl_index_size(struct index *base)
+vinyl_index_size(struct index *index)
 {
 	/*
-	 * Return the total number of statements in the index.
-	 * Note, it may be greater than the number of tuples
-	 * actually stored in the space, but it should be a
-	 * fairly good estimate.
+	 * Return the total number of statements in the LSM tree.
+	 * Note, it may be greater than the number of tuples actually
+	 * stored in the space, but it should be a fairly good estimate.
 	 */
-	struct vy_index *index = vy_index(base);
-	return index->stat.memory.count.rows + index->stat.disk.count.rows;
+	struct vy_lsm *lsm = vy_lsm(index);
+	return lsm->stat.memory.count.rows + lsm->stat.disk.count.rows;
 }
 
 static ssize_t
-vinyl_index_bsize(struct index *base)
+vinyl_index_bsize(struct index *index)
 {
 	/*
 	 * Return the cost of indexing user data. For both
@@ -1179,11 +1178,11 @@ vinyl_index_bsize(struct index *base)
 	 * total size of statements stored on disk, because
 	 * they are only needed for building the index.
 	 */
-	struct vy_index *index = vy_index(base);
-	ssize_t bsize = vy_index_mem_tree_size(index) +
-		index->page_index_size + index->bloom_size;
-	if (index->index_id > 0)
-		bsize += index->stat.disk.count.bytes;
+	struct vy_lsm *lsm = vy_lsm(index);
+	ssize_t bsize = vy_lsm_mem_tree_size(lsm) +
+		lsm->page_index_size + lsm->bloom_size;
+	if (lsm->index_id > 0)
+		bsize += lsm->stat.disk.count.bytes;
 	return bsize;
 }
 
@@ -1192,25 +1191,25 @@ vinyl_index_bsize(struct index *base)
  */
 
 /**
- * Check if a request has already been committed to an index.
+ * Check if a request has already been committed to an LSM tree.
  *
  * If we're recovering the WAL, it may happen so that this
  * particular run was dumped after the checkpoint, and we're
  * replaying records already present in the database. In this
  * case avoid overwriting a newer version with an older one.
  *
- * If the index is going to be dropped or truncated on WAL
+ * If the LSM tree is going to be dropped or truncated on WAL
  * recovery, there's no point in replaying statements for it,
  * either.
  */
 static inline bool
-vy_is_committed_one(struct vy_env *env, struct vy_index *index)
+vy_is_committed_one(struct vy_env *env, struct vy_lsm *lsm)
 {
 	if (likely(env->status != VINYL_FINAL_RECOVERY_LOCAL))
 		return false;
-	if (index->is_dropped)
+	if (lsm->is_dropped)
 		return true;
-	if (vclock_sum(env->recovery_vclock) <= index->dump_lsn)
+	if (vclock_sum(env->recovery_vclock) <= lsm->dump_lsn)
 		return true;
 	return false;
 }
@@ -1225,16 +1224,16 @@ vy_is_committed(struct vy_env *env, struct space *space)
 	if (likely(env->status != VINYL_FINAL_RECOVERY_LOCAL))
 		return false;
 	for (uint32_t iid = 0; iid < space->index_count; iid++) {
-		struct vy_index *index = vy_index(space->index[iid]);
-		if (!vy_is_committed_one(env, index))
+		struct vy_lsm *lsm = vy_lsm(space->index[iid]);
+		if (!vy_is_committed_one(env, lsm))
 			return false;
 	}
 	return true;
 }
 
 /**
- * Get a vinyl tuple from the index by the key.
- * @param index       Index in which search.
+ * Get a vinyl tuple from the LSM tree by the key.
+ * @param lsm         LSM tree in which search.
  * @param tx          Current transaction.
  * @param rv          Read view.
  * @param key         Key statement.
@@ -1245,7 +1244,7 @@ vy_is_committed(struct vy_env *env, struct space *space)
  * @param -1 Memory error or read error.
  */
 static inline int
-vy_index_get(struct vy_index *index, struct vy_tx *tx,
+vy_lsm_get(struct vy_lsm *lsm, struct vy_tx *tx,
 	     const struct vy_read_view **rv,
 	     struct tuple *key, struct tuple **result)
 {
@@ -1255,14 +1254,14 @@ vy_index_get(struct vy_index *index, struct vy_tx *tx,
 	 */
 	assert(tx == NULL || tx->state == VINYL_TX_READY);
 
-	if (tuple_field_count(key) >= index->cmp_def->part_count) {
-		if (tx != NULL && vy_tx_track_point(tx, index, key) != 0)
+	if (tuple_field_count(key) >= lsm->cmp_def->part_count) {
+		if (tx != NULL && vy_tx_track_point(tx, lsm, key) != 0)
 			return -1;
-		return vy_point_lookup(index, tx, rv, key, result);
+		return vy_point_lookup(lsm, tx, rv, key, result);
 	}
 
 	struct vy_read_iterator itr;
-	vy_read_iterator_open(&itr, index, tx, ITER_EQ, key, rv);
+	vy_read_iterator_open(&itr, lsm, tx, ITER_EQ, key, rv);
 	int rc = vy_read_iterator_next(&itr, result);
 	if (*result != NULL)
 		tuple_ref(*result);
@@ -1271,12 +1270,12 @@ vy_index_get(struct vy_index *index, struct vy_tx *tx,
 }
 
 /**
- * Check if the index contains the key. If true, then set
+ * Check if the LSM tree contains the key. If true, then set
  * a duplicate key error in the diagnostics area.
  * @param env        Vinyl environment.
  * @param tx         Current transaction.
  * @param space      Target space.
- * @param index      Index in which to search.
+ * @param lsm        LSM tree in which to search.
  * @param key        Key statement.
  *
  * @retval  0 Success, the key isn't found.
@@ -1284,7 +1283,7 @@ vy_index_get(struct vy_index *index, struct vy_tx *tx,
  */
 static inline int
 vy_check_is_unique(struct vy_env *env, struct vy_tx *tx, struct space *space,
-		   struct vy_index *index, struct tuple *key)
+		   struct vy_lsm *lsm, struct tuple *key)
 {
 	struct tuple *found;
 	/*
@@ -1293,13 +1292,13 @@ vy_check_is_unique(struct vy_env *env, struct vy_tx *tx, struct space *space,
 	 */
 	if (env->status != VINYL_ONLINE)
 		return 0;
-	if (vy_index_get(index, tx, vy_tx_read_view(tx), key, &found))
+	if (vy_lsm_get(lsm, tx, vy_tx_read_view(tx), key, &found))
 		return -1;
 
 	if (found) {
 		tuple_unref(found);
 		diag_set(ClientError, ER_TUPLE_FOUND,
-			 index_name_by_id(space, index->index_id),
+			 index_name_by_id(space, lsm->index_id),
 			 space_name(space));
 		return -1;
 	}
@@ -1307,11 +1306,11 @@ vy_check_is_unique(struct vy_env *env, struct vy_tx *tx, struct space *space,
 }
 
 /**
- * Insert a tuple in a primary index.
+ * Insert a tuple in a primary index LSM tree.
  * @param env   Vinyl environment.
  * @param tx    Current transaction.
  * @param space Target space.
- * @param pk    Primary vinyl index.
+ * @param pk    Primary index LSM tree.
  * @param stmt  Tuple to insert.
  *
  * @retval  0 Success.
@@ -1319,7 +1318,7 @@ vy_check_is_unique(struct vy_env *env, struct vy_tx *tx, struct space *space,
  */
 static inline int
 vy_insert_primary(struct vy_env *env, struct vy_tx *tx, struct space *space,
-		  struct vy_index *pk, struct tuple *stmt)
+		  struct vy_lsm *pk, struct tuple *stmt)
 {
 	assert(vy_stmt_type(stmt) == IPROTO_INSERT);
 	assert(tx != NULL && tx->state == VINYL_TX_READY);
@@ -1335,11 +1334,11 @@ vy_insert_primary(struct vy_env *env, struct vy_tx *tx, struct space *space,
 }
 
 /**
- * Insert a tuple in a secondary index.
+ * Insert a tuple in a secondary index LSM tree.
  * @param env       Vinyl environment.
  * @param tx        Current transaction.
  * @param space     Target space.
- * @param index     Secondary index.
+ * @param lsm       Secondary index LSM tree.
  * @param stmt      Tuple to replace.
  *
  * @retval  0 Success.
@@ -1347,38 +1346,38 @@ vy_insert_primary(struct vy_env *env, struct vy_tx *tx, struct space *space,
  */
 static int
 vy_insert_secondary(struct vy_env *env, struct vy_tx *tx, struct space *space,
-		    struct vy_index *index, struct tuple *stmt)
+		    struct vy_lsm *lsm, struct tuple *stmt)
 {
 	assert(vy_stmt_type(stmt) == IPROTO_INSERT ||
 	       vy_stmt_type(stmt) == IPROTO_REPLACE);
 	assert(tx != NULL && tx->state == VINYL_TX_READY);
-	assert(index->index_id > 0);
+	assert(lsm->index_id > 0);
 	/*
 	 * If the index is unique then the new tuple must not
 	 * conflict with existing tuples. If the index is not
 	 * unique a conflict is impossible.
 	 */
-	if (index->check_is_unique &&
-	    !key_update_can_be_skipped(index->key_def->column_mask,
+	if (lsm->check_is_unique &&
+	    !key_update_can_be_skipped(lsm->key_def->column_mask,
 				       vy_stmt_column_mask(stmt)) &&
-	    (!index->key_def->is_nullable ||
-	     !vy_tuple_key_contains_null(stmt, index->key_def))) {
-		struct tuple *key = vy_stmt_extract_key(stmt, index->key_def,
-							index->env->key_format);
+	    (!lsm->key_def->is_nullable ||
+	     !vy_tuple_key_contains_null(stmt, lsm->key_def))) {
+		struct tuple *key = vy_stmt_extract_key(stmt, lsm->key_def,
+							lsm->env->key_format);
 		if (key == NULL)
 			return -1;
-		int rc = vy_check_is_unique(env, tx, space, index, key);
+		int rc = vy_check_is_unique(env, tx, space, lsm, key);
 		tuple_unref(key);
 		if (rc != 0)
 			return -1;
 	}
 	/*
 	 * We must always append the statement to transaction write set
-	 * of each index, even if operation itself does not update
-	 * the index, e.g. it's an UPDATE, to ensure we read our
+	 * of each LSM tree, even if operation itself does not update
+	 * the LSM tree, e.g. it's an UPDATE, to ensure we read our
 	 * own writes.
 	 */
-	return vy_tx_set(tx, index, stmt);
+	return vy_tx_set(tx, lsm, stmt);
 }
 
 /**
@@ -1403,7 +1402,7 @@ vy_replace_one(struct vy_env *env, struct vy_tx *tx, struct space *space,
 {
 	(void)env;
 	assert(tx != NULL && tx->state == VINYL_TX_READY);
-	struct vy_index *pk = vy_index(space->index[0]);
+	struct vy_lsm *pk = vy_lsm(space->index[0]);
 	assert(pk->index_id == 0);
 	if (tuple_validate_raw(pk->mem_format, request->tuple))
 		return -1;
@@ -1417,8 +1416,8 @@ vy_replace_one(struct vy_env *env, struct vy_tx *tx, struct space *space,
 	 * old tuple to pass it to the trigger.
 	 */
 	if (stmt != NULL && !rlist_empty(&space->on_replace)) {
-		if (vy_index_get(pk, tx, vy_tx_read_view(tx),
-				 new_tuple, &stmt->old_tuple) != 0)
+		if (vy_lsm_get(pk, tx, vy_tx_read_view(tx),
+			       new_tuple, &stmt->old_tuple) != 0)
 			goto error_unref;
 	}
 	if (vy_tx_set(tx, pk, new_tuple))
@@ -1459,7 +1458,7 @@ vy_replace_impl(struct vy_env *env, struct vy_tx *tx, struct space *space,
 	struct tuple *old_stmt = NULL;
 	struct tuple *new_stmt = NULL;
 	struct tuple *delete = NULL;
-	struct vy_index *pk = vy_index_find(space, 0);
+	struct vy_lsm *pk = vy_lsm_find(space, 0);
 	if (pk == NULL) /* space has no primary key */
 		return -1;
 	/* Primary key is dumped last. */
@@ -1473,7 +1472,7 @@ vy_replace_impl(struct vy_env *env, struct vy_tx *tx, struct space *space,
 		return -1;
 
 	/* Get full tuple from the primary index. */
-	if (vy_index_get(pk, tx, vy_tx_read_view(tx),
+	if (vy_lsm_get(pk, tx, vy_tx_read_view(tx),
 			 new_stmt, &old_stmt) != 0)
 		goto error;
 
@@ -1500,9 +1499,8 @@ vy_replace_impl(struct vy_env *env, struct vy_tx *tx, struct space *space,
 
 	/* Update secondary keys, avoid duplicates. */
 	for (uint32_t iid = 1; iid < space->index_count; ++iid) {
-		struct vy_index *index;
-		index = vy_index(space->index[iid]);
-		if (vy_is_committed_one(env, index))
+		struct vy_lsm *lsm = vy_lsm(space->index[iid]);
+		if (vy_is_committed_one(env, lsm))
 			continue;
 		/*
 		 * Delete goes first, so if old and new keys
@@ -1510,10 +1508,10 @@ vy_replace_impl(struct vy_env *env, struct vy_tx *tx, struct space *space,
 		 * transaction index.
 		 */
 		if (old_stmt != NULL) {
-			if (vy_tx_set(tx, index, delete) != 0)
+			if (vy_tx_set(tx, lsm, delete) != 0)
 				goto error;
 		}
-		if (vy_insert_secondary(env, tx, space, index, new_stmt) != 0)
+		if (vy_insert_secondary(env, tx, space, lsm, new_stmt) != 0)
 			goto error;
 	}
 	if (delete != NULL)
@@ -1538,8 +1536,9 @@ error:
 }
 
 /**
- * Check that the key can be used for search in a unique index.
- * @param  index      Index for checking.
+ * Check that the key can be used for search in a unique index
+ * LSM tree.
+ * @param  lsm        LSM tree for checking.
  * @param  key        MessagePack'ed data, the array without a
  *                    header.
  * @param  part_count Part count of the key.
@@ -1549,34 +1548,34 @@ error:
  *            in the diagnostics area.
  */
 static inline int
-vy_unique_key_validate(struct vy_index *index, const char *key,
+vy_unique_key_validate(struct vy_lsm *lsm, const char *key,
 		       uint32_t part_count)
 {
-	assert(index->opts.is_unique);
+	assert(lsm->opts.is_unique);
 	assert(key != NULL || part_count == 0);
 	/*
-	 * The index contains tuples with concatenation of
+	 * The LSM tree contains tuples with concatenation of
 	 * secondary and primary key fields, while the key
 	 * supplied by the user only contains the secondary key
 	 * fields. Use the correct key def to validate the key.
-	 * The key can be used to look up in the index since the
-	 * supplied key parts uniquely identify the tuple, as long
-	 * as the index is unique.
+	 * The key can be used to look up in the LSM tree since
+	 * the supplied key parts uniquely identify the tuple,
+	 * as long as the index is unique.
 	 */
-	uint32_t original_part_count = index->key_def->part_count;
+	uint32_t original_part_count = lsm->key_def->part_count;
 	if (original_part_count != part_count) {
 		diag_set(ClientError, ER_EXACT_MATCH,
 			 original_part_count, part_count);
 		return -1;
 	}
-	return key_validate_parts(index->cmp_def, key, part_count, false);
+	return key_validate_parts(lsm->cmp_def, key, part_count, false);
 }
 
 /**
- * Find a tuple in the primary index by the key of the specified
- * index.
- * @param index       Index for which the key is specified. Can be
- *                    both primary and secondary.
+ * Find a tuple in the primary index LSM tree by the key of the
+ * specified LSM tree.
+ * @param lsm         LSM tree for which the key is specified.
+ *                    Can be both primary and secondary.
  * @param tx          Current transaction.
  * @param rv          Read view.
  * @param key_raw     MessagePack'ed data, the array without a
@@ -1589,36 +1588,36 @@ vy_unique_key_validate(struct vy_index *index, const char *key,
  * @retval -1 Memory error.
  */
 static inline int
-vy_index_full_by_key(struct vy_index *index, struct vy_tx *tx,
-		     const struct vy_read_view **rv,
-		     const char *key_raw, uint32_t part_count,
-		     struct tuple **result)
+vy_lsm_full_by_key(struct vy_lsm *lsm, struct vy_tx *tx,
+		   const struct vy_read_view **rv,
+		   const char *key_raw, uint32_t part_count,
+		   struct tuple **result)
 {
 	int rc;
-	struct tuple *key = vy_stmt_new_select(index->env->key_format,
+	struct tuple *key = vy_stmt_new_select(lsm->env->key_format,
 					       key_raw, part_count);
 	if (key == NULL)
 		return -1;
 	struct tuple *found;
-	rc = vy_index_get(index, tx, rv, key, &found);
+	rc = vy_lsm_get(lsm, tx, rv, key, &found);
 	tuple_unref(key);
 	if (rc != 0)
 		return -1;
-	if (index->index_id == 0 || found == NULL) {
+	if (lsm->index_id == 0 || found == NULL) {
 		*result = found;
 		return 0;
 	}
 	/*
 	 * No need in vy_tx_track() as the tuple is already
-	 * tracked in the secondary index.
+	 * tracked in the secondary index LSM tree.
 	 */
-	rc = vy_point_lookup(index->pk, tx, rv, found, result);
+	rc = vy_point_lookup(lsm->pk, tx, rv, found, result);
 	tuple_unref(found);
 	return rc;
 }
 
 /**
- * Delete the tuple from all indexes of the vinyl space.
+ * Delete the tuple from all LSM trees of the vinyl space.
  * @param env        Vinyl environment.
  * @param tx         Current transaction.
  * @param space      Vinyl space.
@@ -1631,7 +1630,7 @@ static inline int
 vy_delete_impl(struct vy_env *env, struct vy_tx *tx, struct space *space,
 	       const struct tuple *tuple)
 {
-	struct vy_index *pk = vy_index_find(space, 0);
+	struct vy_lsm *pk = vy_lsm_find(space, 0);
 	if (pk == NULL)
 		return -1;
 	/* Primary key is dumped last. */
@@ -1644,12 +1643,11 @@ vy_delete_impl(struct vy_env *env, struct vy_tx *tx, struct space *space,
 		goto error;
 
 	/* At second, delete from seconary indexes. */
-	struct vy_index *index;
 	for (uint32_t i = 1; i < space->index_count; ++i) {
-		index = vy_index(space->index[i]);
-		if (vy_is_committed_one(env, index))
+		struct vy_lsm *lsm = vy_lsm(space->index[i]);
+		if (vy_is_committed_one(env, lsm))
 			continue;
-		if (vy_tx_set(tx, index, delete) != 0)
+		if (vy_tx_set(tx, lsm, delete) != 0)
 			goto error;
 	}
 	tuple_unref(delete);
@@ -1678,16 +1676,16 @@ vy_delete(struct vy_env *env, struct vy_tx *tx, struct txn_stmt *stmt,
 {
 	if (vy_is_committed(env, space))
 		return 0;
-	struct vy_index *pk = vy_index_find(space, 0);
+	struct vy_lsm *pk = vy_lsm_find(space, 0);
 	if (pk == NULL)
 		return -1;
-	struct vy_index *index = vy_index_find_unique(space, request->index_id);
-	if (index == NULL)
+	struct vy_lsm *lsm = vy_lsm_find_unique(space, request->index_id);
+	if (lsm == NULL)
 		return -1;
 	bool has_secondary = space->index_count > 1;
 	const char *key = request->key;
 	uint32_t part_count = mp_decode_array(&key);
-	if (vy_unique_key_validate(index, key, part_count))
+	if (vy_unique_key_validate(lsm, key, part_count))
 		return -1;
 	/*
 	 * There are two cases when need to get the full tuple
@@ -1700,7 +1698,7 @@ vy_delete(struct vy_env *env, struct vy_tx *tx, struct txn_stmt *stmt,
 	 *   and pass them to indexes for deletion.
 	 */
 	if (has_secondary || !rlist_empty(&space->on_replace)) {
-		if (vy_index_full_by_key(index, tx, vy_tx_read_view(tx),
+		if (vy_lsm_full_by_key(lsm, tx, vy_tx_read_view(tx),
 				key, part_count, &stmt->old_tuple) != 0)
 			return -1;
 		if (stmt->old_tuple == NULL)
@@ -1710,7 +1708,7 @@ vy_delete(struct vy_env *env, struct vy_tx *tx, struct txn_stmt *stmt,
 		assert(stmt->old_tuple != NULL);
 		return vy_delete_impl(env, tx, space, stmt->old_tuple);
 	} else { /* Primary is the single index in the space. */
-		assert(index->index_id == 0);
+		assert(lsm->index_id == 0);
 		struct tuple *delete =
 			vy_stmt_new_surrogate_delete_from_key(request->key,
 							      pk->key_def,
@@ -1730,9 +1728,7 @@ vy_delete(struct vy_env *env, struct vy_tx *tx, struct txn_stmt *stmt,
  * primary key of a tuple, which is prohibited, to avoid funny
  * effects during replication.
  *
- * @param pk         Primary index.
- * @param index_name Name of the index which was updated - it may
- *                   be not the primary index.
+ * @param pk         Primary index LSM tree.
  * @param old_tuple  The tuple before update.
  * @param new_tuple  The tuple after update.
  * @param column_mask Bitmask of the update operation.
@@ -1742,7 +1738,7 @@ vy_delete(struct vy_env *env, struct vy_tx *tx, struct txn_stmt *stmt,
  * @retval -1 Attempt to modify the primary key.
  */
 static inline int
-vy_check_update(struct space *space, const struct vy_index *pk,
+vy_check_update(struct space *space, const struct vy_lsm *pk,
 		const struct tuple *old_tuple, const struct tuple *new_tuple,
 		uint64_t column_mask)
 {
@@ -1776,23 +1772,23 @@ vy_update(struct vy_env *env, struct vy_tx *tx, struct txn_stmt *stmt,
 	assert(tx != NULL && tx->state == VINYL_TX_READY);
 	if (vy_is_committed(env, space))
 		return 0;
-	struct vy_index *index = vy_index_find_unique(space, request->index_id);
-	if (index == NULL)
+	struct vy_lsm *lsm = vy_lsm_find_unique(space, request->index_id);
+	if (lsm == NULL)
 		return -1;
 	const char *key = request->key;
 	uint32_t part_count = mp_decode_array(&key);
-	if (vy_unique_key_validate(index, key, part_count))
+	if (vy_unique_key_validate(lsm, key, part_count))
 		return -1;
 
-	if (vy_index_full_by_key(index, tx, vy_tx_read_view(tx),
-				 key, part_count, &stmt->old_tuple) != 0)
+	if (vy_lsm_full_by_key(lsm, tx, vy_tx_read_view(tx),
+			       key, part_count, &stmt->old_tuple) != 0)
 		return -1;
 	/* Nothing to update. */
 	if (stmt->old_tuple == NULL)
 		return 0;
 
 	/* Apply update operations. */
-	struct vy_index *pk = vy_index(space->index[0]);
+	struct vy_lsm *pk = vy_lsm(space->index[0]);
 	assert(pk != NULL);
 	assert(pk->index_id == 0);
 	/* Primary key is dumped last. */
@@ -1849,12 +1845,12 @@ vy_update(struct vy_env *env, struct vy_tx *tx, struct txn_stmt *stmt,
 	vy_stmt_set_column_mask(delete, column_mask);
 
 	for (uint32_t i = 1; i < space->index_count; ++i) {
-		index = vy_index(space->index[i]);
-		if (vy_is_committed_one(env, index))
+		lsm = vy_lsm(space->index[i]);
+		if (vy_is_committed_one(env, lsm))
 			continue;
-		if (vy_tx_set(tx, index, delete) != 0)
+		if (vy_tx_set(tx, lsm, delete) != 0)
 			goto error;
-		if (vy_insert_secondary(env, tx, space, index, stmt->new_tuple))
+		if (vy_insert_secondary(env, tx, space, lsm, stmt->new_tuple))
 			goto error;
 	}
 	tuple_unref(delete);
@@ -1882,14 +1878,13 @@ vy_insert_first_upsert(struct vy_env *env, struct vy_tx *tx,
 	assert(tx != NULL && tx->state == VINYL_TX_READY);
 	assert(space->index_count > 0);
 	assert(vy_stmt_type(stmt) == IPROTO_INSERT);
-	struct vy_index *pk = vy_index(space->index[0]);
+	struct vy_lsm *pk = vy_lsm(space->index[0]);
 	assert(pk->index_id == 0);
 	if (vy_tx_set(tx, pk, stmt) != 0)
 		return -1;
-	struct vy_index *index;
 	for (uint32_t i = 1; i < space->index_count; ++i) {
-		index = vy_index(space->index[i]);
-		if (vy_insert_secondary(env, tx, space, index, stmt) != 0)
+		struct vy_lsm *lsm = vy_lsm(space->index[i]);
+		if (vy_insert_secondary(env, tx, space, lsm, stmt) != 0)
 			return -1;
 	}
 	return 0;
@@ -1898,7 +1893,7 @@ vy_insert_first_upsert(struct vy_env *env, struct vy_tx *tx,
 /**
  * Insert UPSERT into the write set of the transaction.
  * @param tx        Transaction which deletes.
- * @param index     Index in which \p tx deletes.
+ * @param lsm       LSM tree in which \p tx deletes.
  * @param tuple     MessagePack array.
  * @param tuple_end End of the tuple.
  * @param expr      MessagePack array of update operations.
@@ -1908,7 +1903,7 @@ vy_insert_first_upsert(struct vy_env *env, struct vy_tx *tx,
  * @retval -1 Memory error.
  */
 static int
-vy_index_upsert(struct vy_tx *tx, struct vy_index *index,
+vy_lsm_upsert(struct vy_tx *tx, struct vy_lsm *lsm,
 	  const char *tuple, const char *tuple_end,
 	  const char *expr, const char *expr_end)
 {
@@ -1917,12 +1912,12 @@ vy_index_upsert(struct vy_tx *tx, struct vy_index *index,
 	struct iovec operations[1];
 	operations[0].iov_base = (void *)expr;
 	operations[0].iov_len = expr_end - expr;
-	vystmt = vy_stmt_new_upsert(index->upsert_format, tuple, tuple_end,
+	vystmt = vy_stmt_new_upsert(lsm->upsert_format, tuple, tuple_end,
 				    operations, 1);
 	if (vystmt == NULL)
 		return -1;
 	assert(vy_stmt_type(vystmt) == IPROTO_UPSERT);
-	int rc = vy_tx_set(tx, index, vystmt);
+	int rc = vy_tx_set(tx, lsm, vystmt);
 	tuple_unref(vystmt);
 	return rc;
 }
@@ -2030,7 +2025,7 @@ vy_upsert(struct vy_env *env, struct vy_tx *tx, struct txn_stmt *stmt,
 	const char *tuple_end = request->tuple_end;
 	const char *ops = request->ops;
 	const char *ops_end = request->ops_end;
-	struct vy_index *pk = vy_index_find(space, 0);
+	struct vy_lsm *pk = vy_lsm_find(space, 0);
 	if (pk == NULL)
 		return -1;
 	/* Primary key is dumped last. */
@@ -2039,7 +2034,7 @@ vy_upsert(struct vy_env *env, struct vy_tx *tx, struct txn_stmt *stmt,
 		return -1;
 
 	if (space->index_count == 1 && rlist_empty(&space->on_replace))
-		return vy_index_upsert(tx, pk, tuple, tuple_end, ops, ops_end);
+		return vy_lsm_upsert(tx, pk, tuple, tuple_end, ops, ops_end);
 
 	const char *old_tuple, *old_tuple_end;
 	const char *new_tuple, *new_tuple_end;
@@ -2059,7 +2054,7 @@ vy_upsert(struct vy_env *env, struct vy_tx *tx, struct txn_stmt *stmt,
 					pk->key_def, pk->env->key_format);
 	if (key == NULL)
 		return -1;
-	int rc = vy_index_get(pk, tx, vy_tx_read_view(tx),
+	int rc = vy_lsm_get(pk, tx, vy_tx_read_view(tx),
 			      key, &stmt->old_tuple);
 	tuple_unref(key);
 	if (rc != 0)
@@ -2111,7 +2106,7 @@ vy_upsert(struct vy_env *env, struct vy_tx *tx, struct txn_stmt *stmt,
 		diag_log();
 		/*
 		 * Upsert is skipped, to match the semantics of
-		 * vy_index_upsert().
+		 * vy_lsm_upsert().
 		 */
 		return 0;
 	}
@@ -2121,7 +2116,6 @@ vy_upsert(struct vy_env *env, struct vy_tx *tx, struct txn_stmt *stmt,
 		return 0;
 
 	/* Replace in secondary indexes works as delete insert. */
-	struct vy_index *index;
 	struct tuple *delete = vy_stmt_new_surrogate_delete(mask_format,
 							    stmt->old_tuple);
 	if (delete == NULL)
@@ -2129,12 +2123,12 @@ vy_upsert(struct vy_env *env, struct vy_tx *tx, struct txn_stmt *stmt,
 	vy_stmt_set_column_mask(delete, column_mask);
 
 	for (uint32_t i = 1; i < space->index_count; ++i) {
-		index = vy_index(space->index[i]);
-		if (vy_is_committed_one(env, index))
+		struct vy_lsm *lsm = vy_lsm(space->index[i]);
+		if (vy_is_committed_one(env, lsm))
 			continue;
-		if (vy_tx_set(tx, index, delete) != 0)
+		if (vy_tx_set(tx, lsm, delete) != 0)
 			goto error;
-		if (vy_insert_secondary(env, tx, space, index,
+		if (vy_insert_secondary(env, tx, space, lsm,
 					stmt->new_tuple) != 0)
 			goto error;
 	}
@@ -2164,7 +2158,7 @@ vy_insert(struct vy_env *env, struct vy_tx *tx, struct txn_stmt *stmt,
 	  struct space *space, struct request *request)
 {
 	assert(stmt != NULL);
-	struct vy_index *pk = vy_index_find(space, 0);
+	struct vy_lsm *pk = vy_lsm_find(space, 0);
 	if (pk == NULL)
 		/* The space hasn't the primary index. */
 		return -1;
@@ -2182,10 +2176,10 @@ vy_insert(struct vy_env *env, struct vy_tx *tx, struct txn_stmt *stmt,
 		return -1;
 
 	for (uint32_t iid = 1; iid < space->index_count; ++iid) {
-		struct vy_index *index = vy_index(space->index[iid]);
-		if (vy_is_committed_one(env, index))
+		struct vy_lsm *lsm = vy_lsm(space->index[iid]);
+		if (vy_is_committed_one(env, lsm))
 			continue;
-		if (vy_insert_secondary(env, tx, space, index,
+		if (vy_insert_secondary(env, tx, space, lsm,
 					stmt->new_tuple) != 0)
 			return -1;
 	}
@@ -2544,7 +2538,7 @@ vy_squash_queue_new(void);
 static void
 vy_squash_queue_delete(struct vy_squash_queue *q);
 static void
-vy_squash_schedule(struct vy_index *index, struct tuple *stmt,
+vy_squash_schedule(struct vy_lsm *lsm, struct tuple *stmt,
 		   void /* struct vy_env */ *arg);
 
 static struct vy_env *
@@ -2610,10 +2604,10 @@ vy_env_new(const char *path, size_t memory,
 			    vy_env_dump_complete_cb,
 			    &e->run_env, &e->xm->read_views);
 
-	if (vy_index_env_create(&e->index_env, e->path,
-				&e->scheduler.generation,
-				vy_squash_schedule, e) != 0)
-		goto error_index_env;
+	if (vy_lsm_env_create(&e->lsm_env, e->path,
+			      &e->scheduler.generation,
+			      vy_squash_schedule, e) != 0)
+		goto error_lsm_env;
 
 	struct slab_cache *slab_cache = cord_slab_cache();
 	mempool_create(&e->iterator_pool, slab_cache,
@@ -2627,7 +2621,7 @@ vy_env_new(const char *path, size_t memory,
 	vy_run_env_create(&e->run_env);
 	vy_log_init(e->path);
 	return e;
-error_index_env:
+error_lsm_env:
 	vy_mem_env_destroy(&e->mem_env);
 	vy_scheduler_destroy(&e->scheduler);
 	vy_squash_queue_delete(e->squash_queue);
@@ -2653,7 +2647,7 @@ vy_env_delete(struct vy_env *e)
 	histogram_delete(e->dump_bw);
 	mempool_destroy(&e->iterator_pool);
 	vy_run_env_destroy(&e->run_env);
-	vy_index_env_destroy(&e->index_env);
+	vy_lsm_env_destroy(&e->lsm_env);
 	vy_mem_env_destroy(&e->mem_env);
 	vy_cache_env_destroy(&e->cache_env);
 	vy_quota_destroy(&e->quota);
@@ -2719,7 +2713,7 @@ vinyl_engine_set_too_long_threshold(struct vinyl_engine *vinyl,
 				    double too_long_threshold)
 {
 	vinyl->env->quota.too_long_threshold = too_long_threshold;
-	vinyl->env->index_env.too_long_threshold = too_long_threshold;
+	vinyl->env->lsm_env.too_long_threshold = too_long_threshold;
 }
 
 /** }}} Environment */
@@ -2857,11 +2851,11 @@ vinyl_engine_end_recovery(struct engine *engine)
 		unreachable();
 	}
 	/*
-	 * Do not start reader threads if no Vinyl index was
-	 * recovered. The threads will be started lazily upon
-	 * the first index creation, see vy_index_open().
+	 * Do not start reader threads if no LSM tree was recovered.
+	 * The threads will be started lazily upon the first LSM tree
+	 * creation, see vinyl_index_open().
 	 */
-	if (e->index_env.index_count > 0)
+	if (e->lsm_env.lsm_count > 0)
 		vy_run_env_enable_coio(&e->run_env, e->read_threads);
 	return 0;
 }
@@ -2888,14 +2882,14 @@ struct vy_join_ctx {
 	/** ID of the space currently being relayed. */
 	uint32_t space_id;
 	/**
-	 * Index key definition, as defined by the user.
+	 * LSM tree key definition, as defined by the user.
 	 * We only send the primary key, so the definition
 	 * provided by the user is correct for compare.
 	 */
 	struct key_def *key_def;
-	/** Index format used for REPLACE and DELETE statements. */
+	/** LSM tree format used for REPLACE and DELETE statements. */
 	struct tuple_format *format;
-	/** Index format used for UPSERT statements. */
+	/** LSM tree format used for UPSERT statements. */
 	struct tuple_format *upsert_format;
 	/**
 	 * Write iterator for merging runs before sending
@@ -2930,13 +2924,13 @@ vy_prepare_send_slice(struct vy_join_ctx *ctx,
 		goto out;
 
 	if (slice_info->begin != NULL) {
-		begin = vy_key_from_msgpack(ctx->env->index_env.key_format,
+		begin = vy_key_from_msgpack(ctx->env->lsm_env.key_format,
 					    slice_info->begin);
 		if (begin == NULL)
 			goto out;
 	}
 	if (slice_info->end != NULL) {
-		end = vy_key_from_msgpack(ctx->env->index_env.key_format,
+		end = vy_key_from_msgpack(ctx->env->lsm_env.key_format,
 					  slice_info->end);
 		if (end == NULL)
 			goto out;
@@ -3043,28 +3037,27 @@ out:
 	return rc;
 }
 
-/** Send all tuples stored in the given index. */
+/** Send all tuples stored in the given LSM tree. */
 static int
-vy_send_index(struct vy_join_ctx *ctx,
-	      struct vy_index_recovery_info *index_info)
+vy_send_lsm(struct vy_join_ctx *ctx, struct vy_lsm_recovery_info *lsm_info)
 {
 	int rc = -1;
 
-	if (index_info->is_dropped)
+	if (lsm_info->is_dropped)
 		return 0;
 
 	/*
-	 * We are only interested in the primary index.
+	 * We are only interested in the primary index LSM tree.
 	 * Secondary keys will be rebuilt on the destination.
 	 */
-	if (index_info->index_id != 0)
+	if (lsm_info->index_id != 0)
 		return 0;
 
-	ctx->space_id = index_info->space_id;
+	ctx->space_id = lsm_info->space_id;
 
 	/* Create key definition and tuple format. */
-	ctx->key_def = key_def_new_with_parts(index_info->key_parts,
-					      index_info->key_part_count);
+	ctx->key_def = key_def_new_with_parts(lsm_info->key_parts,
+					      lsm_info->key_part_count);
 	if (ctx->key_def == NULL)
 		goto out;
 	ctx->format = tuple_format_new(&vy_tuple_format_vtab, &ctx->key_def,
@@ -3079,8 +3072,8 @@ vy_send_index(struct vy_join_ctx *ctx,
 
 	/* Send ranges. */
 	struct vy_range_recovery_info *range_info;
-	assert(!rlist_empty(&index_info->ranges));
-	rlist_foreach_entry(range_info, &index_info->ranges, in_index) {
+	assert(!rlist_empty(&lsm_info->ranges));
+	rlist_foreach_entry(range_info, &lsm_info->ranges, in_lsm) {
 		rc = vy_send_range(ctx, range_info);
 		if (rc != 0)
 			break;
@@ -3156,9 +3149,9 @@ vinyl_engine_join(struct engine *engine, struct vclock *vclock,
 		goto out_join_cord;
 	}
 	rc = 0;
-	struct vy_index_recovery_info *index_info;
-	rlist_foreach_entry(index_info, &recovery->indexes, in_recovery) {
-		rc = vy_send_index(ctx, index_info);
+	struct vy_lsm_recovery_info *lsm_info;
+	rlist_foreach_entry(lsm_info, &recovery->lsms, in_recovery) {
+		rc = vy_send_lsm(ctx, lsm_info);
 		if (rc != 0)
 			break;
 	}
@@ -3251,7 +3244,7 @@ vinyl_space_apply_initial_join_row(struct space *space, struct request *request)
  */
 static void
 vy_gc_run(struct vy_env *env,
-	  struct vy_index_recovery_info *index_info,
+	  struct vy_lsm_recovery_info *lsm_info,
 	  struct vy_run_recovery_info *run_info)
 {
 	ERROR_INJECT(ERRINJ_VY_GC,
@@ -3259,8 +3252,8 @@ vy_gc_run(struct vy_env *env,
 				(long long)run_info->id); return;});
 
 	/* Try to delete files. */
-	if (vy_run_remove_files(env->path, index_info->space_id,
-				index_info->index_id, run_info->id) != 0)
+	if (vy_run_remove_files(env->path, lsm_info->space_id,
+				lsm_info->index_id, run_info->id) != 0)
 		return;
 
 	/* Forget the run on success. */
@@ -3287,16 +3280,16 @@ vy_gc(struct vy_env *env, struct vy_recovery *recovery,
       unsigned int gc_mask, int64_t gc_lsn)
 {
 	int loops = 0;
-	struct vy_index_recovery_info *index_info;
-	rlist_foreach_entry(index_info, &recovery->indexes, in_recovery) {
+	struct vy_lsm_recovery_info *lsm_info;
+	rlist_foreach_entry(lsm_info, &recovery->lsms, in_recovery) {
 		struct vy_run_recovery_info *run_info;
-		rlist_foreach_entry(run_info, &index_info->runs, in_index) {
+		rlist_foreach_entry(run_info, &lsm_info->runs, in_lsm) {
 			if ((run_info->is_dropped &&
 			     run_info->gc_lsn < gc_lsn &&
 			     (gc_mask & VY_GC_DROPPED) != 0) ||
 			    (run_info->is_incomplete &&
 			     (gc_mask & VY_GC_INCOMPLETE) != 0)) {
-				vy_gc_run(env, index_info, run_info);
+				vy_gc_run(env, lsm_info, run_info);
 			}
 			if (loops % VY_YIELD_LOOPS == 0)
 				fiber_sleep(0);
@@ -3350,20 +3343,20 @@ vinyl_engine_backup(struct engine *engine, struct vclock *vclock,
 	}
 	int rc = 0;
 	int loops = 0;
-	struct vy_index_recovery_info *index_info;
-	rlist_foreach_entry(index_info, &recovery->indexes, in_recovery) {
-		if (index_info->is_dropped)
+	struct vy_lsm_recovery_info *lsm_info;
+	rlist_foreach_entry(lsm_info, &recovery->lsms, in_recovery) {
+		if (lsm_info->is_dropped)
 			continue;
 		struct vy_run_recovery_info *run_info;
-		rlist_foreach_entry(run_info, &index_info->runs, in_index) {
+		rlist_foreach_entry(run_info, &lsm_info->runs, in_lsm) {
 			if (run_info->is_dropped || run_info->is_incomplete)
 				continue;
 			char path[PATH_MAX];
 			for (int type = 0; type < vy_file_MAX; type++) {
 				vy_run_snprint_path(path, sizeof(path),
 						    env->path,
-						    index_info->space_id,
-						    index_info->index_id,
+						    lsm_info->space_id,
+						    lsm_info->index_id,
 						    run_info->id, type);
 				rc = cb(path, cb_arg);
 				if (rc != 0)
@@ -3390,8 +3383,8 @@ struct vy_squash {
 	struct stailq_entry next;
 	/** Vinyl environment. */
 	struct vy_env *env;
-	/** Index this request is for. */
-	struct vy_index *index;
+	/** LSM tree this request is for. */
+	struct vy_lsm *lsm;
 	/** Key to squash upserts for. */
 	struct tuple *stmt;
 };
@@ -3409,15 +3402,15 @@ struct vy_squash_queue {
 
 static struct vy_squash *
 vy_squash_new(struct mempool *pool, struct vy_env *env,
-	      struct vy_index *index, struct tuple *stmt)
+	      struct vy_lsm *lsm, struct tuple *stmt)
 {
 	struct vy_squash *squash;
 	squash = mempool_alloc(pool);
 	if (squash == NULL)
 		return NULL;
 	squash->env = env;
-	vy_index_ref(index);
-	squash->index = index;
+	vy_lsm_ref(lsm);
+	squash->lsm = lsm;
 	tuple_ref(stmt);
 	squash->stmt = stmt;
 	return squash;
@@ -3426,7 +3419,7 @@ vy_squash_new(struct mempool *pool, struct vy_env *env,
 static void
 vy_squash_delete(struct mempool *pool, struct vy_squash *squash)
 {
-	vy_index_unref(squash->index);
+	vy_lsm_unref(squash->lsm);
 	tuple_unref(squash->stmt);
 	mempool_free(pool, squash);
 }
@@ -3438,23 +3431,23 @@ vy_squash_process(struct vy_squash *squash)
 	if (inj != NULL && inj->dparam > 0)
 		fiber_sleep(inj->dparam);
 
-	struct vy_index *index = squash->index;
+	struct vy_lsm *lsm = squash->lsm;
 	struct vy_env *env = squash->env;
 	/*
 	 * vy_apply_upsert() is used for primary key only,
-	 * so this is the same as index->key_def
+	 * so this is the same as lsm->key_def
 	 */
-	struct key_def *def = index->cmp_def;
+	struct key_def *def = lsm->cmp_def;
 
-	/* Upserts enabled only in the primary index. */
-	assert(index->index_id == 0);
+	/* Upserts enabled only in the primary index LSM tree. */
+	assert(lsm->index_id == 0);
 
 	/*
 	 * Use the committed read view to avoid squashing
 	 * prepared, but not committed statements.
 	 */
 	struct tuple *result;
-	if (vy_point_lookup(index, NULL, &env->xm->p_committed_read_view,
+	if (vy_point_lookup(lsm, NULL, &env->xm->p_committed_read_view,
 			    squash->stmt, &result) != 0)
 		return -1;
 	if (result == NULL)
@@ -3465,7 +3458,7 @@ vy_squash_process(struct vy_squash *squash)
 	 * have been inserted into the in-memory tree. Apply them to
 	 * the result.
 	 */
-	struct vy_mem *mem = index->mem;
+	struct vy_mem *mem = lsm->mem;
 	struct tree_mem_key tree_key = {
 		.stmt = result,
 		.lsn = vy_stmt_lsn(result),
@@ -3501,7 +3494,7 @@ vy_squash_process(struct vy_squash *squash)
 	 * lsns = {1, 2, 3} are squashed. But now the n_upsert
 	 * values in the prepared statements are not correct.
 	 * If we will not update values, then the
-	 * vy_index_commit_upsert will not be able to squash them.
+	 * vy_lsm_commit_upsert will not be able to squash them.
 	 *
 	 * So after squashing it is necessary to update n_upsert
 	 * value in the prepared statements:
@@ -3542,11 +3535,11 @@ vy_squash_process(struct vy_squash *squash)
 			tuple_unref(result);
 			return 0;
 		}
-		assert(index->index_id == 0);
+		assert(lsm->index_id == 0);
 		struct tuple *applied =
 			vy_apply_upsert(mem_stmt, result, def, mem->format,
 					mem->upsert_format, true);
-		index->stat.upsert.applied++;
+		lsm->stat.upsert.applied++;
 		tuple_unref(result);
 		if (applied == NULL)
 			return -1;
@@ -3584,7 +3577,7 @@ vy_squash_process(struct vy_squash *squash)
 		}
 	}
 
-	index->stat.upsert.squashed++;
+	lsm->stat.upsert.squashed++;
 
 	/*
 	 * Insert the resulting REPLACE statement to the mem
@@ -3592,7 +3585,7 @@ vy_squash_process(struct vy_squash *squash)
 	 */
 	size_t mem_used_before = lsregion_used(&env->mem_env.allocator);
 	const struct tuple *region_stmt = NULL;
-	int rc = vy_index_set(index, mem, result, &region_stmt);
+	int rc = vy_lsm_set(lsm, mem, result, &region_stmt);
 	tuple_unref(result);
 	size_t mem_used_after = lsregion_used(&env->mem_env.allocator);
 	assert(mem_used_after >= mem_used_before);
@@ -3661,13 +3654,13 @@ vy_squash_queue_f(va_list va)
  * statement after it. Done in a background fiber.
  */
 static void
-vy_squash_schedule(struct vy_index *index, struct tuple *stmt, void *arg)
+vy_squash_schedule(struct vy_lsm *lsm, struct tuple *stmt, void *arg)
 {
 	struct vy_env *env = arg;
 	struct vy_squash_queue *sq = env->squash_queue;
 
 	say_verbose("%s: schedule upsert optimization for %s",
-		    vy_index_name(index), vy_stmt_str(stmt));
+		    vy_lsm_name(lsm), vy_stmt_str(stmt));
 
 	/* Start the upsert squashing fiber on demand. */
 	if (sq->fiber == NULL) {
@@ -3677,7 +3670,7 @@ vy_squash_schedule(struct vy_index *index, struct tuple *stmt, void *arg)
 		fiber_start(sq->fiber, sq);
 	}
 
-	struct vy_squash *squash = vy_squash_new(&sq->pool, env, index, stmt);
+	struct vy_squash *squash = vy_squash_new(&sq->pool, env, lsm, stmt);
 	if (squash == NULL)
 		goto fail;
 
@@ -3712,8 +3705,8 @@ static void
 vinyl_iterator_close(struct vinyl_iterator *it)
 {
 	vy_read_iterator_close(&it->iterator);
-	vy_index_unref(it->index);
-	it->index = NULL;
+	vy_lsm_unref(it->lsm);
+	it->lsm = NULL;
 	tuple_unref(it->key);
 	it->key = NULL;
 	if (it->tx == &it->tx_autocommit) {
@@ -3736,7 +3729,7 @@ vinyl_iterator_primary_next(struct iterator *base, struct tuple **ret)
 {
 	assert(base->next = vinyl_iterator_primary_next);
 	struct vinyl_iterator *it = (struct vinyl_iterator *)base;
-	assert(it->index->index_id == 0);
+	assert(it->lsm->index_id == 0);
 	struct tuple *tuple;
 
 	if (it->tx == NULL) {
@@ -3770,7 +3763,7 @@ vinyl_iterator_secondary_next(struct iterator *base, struct tuple **ret)
 {
 	assert(base->next = vinyl_iterator_secondary_next);
 	struct vinyl_iterator *it = (struct vinyl_iterator *)base;
-	assert(it->index->index_id > 0);
+	assert(it->lsm->index_id > 0);
 	struct tuple *tuple;
 
 	if (it->tx == NULL) {
@@ -3805,7 +3798,7 @@ vinyl_iterator_secondary_next(struct iterator *base, struct tuple **ret)
 	 * Note, there's no need in vy_tx_track() as the
 	 * tuple is already tracked in the secondary index.
 	 */
-	if (vy_point_lookup(it->index->pk, it->tx, vy_tx_read_view(it->tx),
+	if (vy_point_lookup(it->lsm->pk, it->tx, vy_tx_read_view(it->tx),
 			    tuple, &tuple) != 0)
 		goto fail;
 	*ret = tuple_bless(tuple);
@@ -3831,7 +3824,7 @@ static struct iterator *
 vinyl_index_create_iterator(struct index *base, enum iterator_type type,
 			    const char *key, uint32_t part_count)
 {
-	struct vy_index *index = vy_index(base);
+	struct vy_lsm *lsm = vy_lsm(base);
 	struct vy_env *env = vy_env(base->engine);
 
 	if (type > ITER_GT) {
@@ -3846,22 +3839,22 @@ vinyl_index_create_iterator(struct index *base, enum iterator_type type,
 			 "mempool", "struct vinyl_iterator");
 		return NULL;
 	}
-	it->key = vy_stmt_new_select(index->env->key_format, key, part_count);
+	it->key = vy_stmt_new_select(lsm->env->key_format, key, part_count);
 	if (it->key == NULL) {
 		mempool_free(&env->iterator_pool, it);
 		return NULL;
 	}
 
 	iterator_create(&it->base, base);
-	if (index->index_id == 0)
+	if (lsm->index_id == 0)
 		it->base.next = vinyl_iterator_primary_next;
 	else
 		it->base.next = vinyl_iterator_secondary_next;
 	it->base.free = vinyl_iterator_free;
 
 	it->env = env;
-	it->index = index;
-	vy_index_ref(index);
+	it->lsm = lsm;
+	vy_lsm_ref(lsm);
 
 	struct vy_tx *tx = in_txn() ? in_txn()->engine_tx : NULL;
 	assert(tx == NULL || tx->state == VINYL_TX_READY);
@@ -3879,26 +3872,26 @@ vinyl_index_create_iterator(struct index *base, enum iterator_type type,
 	}
 	it->tx = tx;
 
-	vy_read_iterator_open(&it->iterator, index, tx, type, it->key,
+	vy_read_iterator_open(&it->iterator, lsm, tx, type, it->key,
 			      (const struct vy_read_view **)&tx->read_view);
 	return (struct iterator *)it;
 }
 
 static int
-vinyl_index_get(struct index *base, const char *key,
+vinyl_index_get(struct index *index, const char *key,
 		uint32_t part_count, struct tuple **ret)
 {
-	assert(base->def->opts.is_unique);
-	assert(base->def->key_def->part_count == part_count);
+	assert(index->def->opts.is_unique);
+	assert(index->def->key_def->part_count == part_count);
 
-	struct vy_index *index = vy_index(base);
-	struct vy_env *env = vy_env(base->engine);
+	struct vy_lsm *lsm = vy_lsm(index);
+	struct vy_env *env = vy_env(index->engine);
 	struct vy_tx *tx = in_txn() ? in_txn()->engine_tx : NULL;
 	const struct vy_read_view **rv = (tx != NULL ? vy_tx_read_view(tx) :
 					  &env->xm->p_global_read_view);
 
 	struct tuple *tuple;
-	if (vy_index_full_by_key(index, tx, rv, key, part_count, &tuple) != 0)
+	if (vy_lsm_full_by_key(lsm, tx, rv, key, part_count, &tuple) != 0)
 		return -1;
 
 	if (tuple != NULL) {
diff --git a/src/box/vy_cache.h b/src/box/vy_cache.h
index fceda5a2..54fd2892 100644
--- a/src/box/vy_cache.h
+++ b/src/box/vy_cache.h
@@ -150,7 +150,7 @@ void
 vy_cache_env_set_quota(struct vy_cache_env *e, size_t quota);
 
 /**
- * Tuple cache (of one particular index)
+ * Tuple cache (of one particular LSM tree)
  */
 struct vy_cache {
 	/**
diff --git a/src/box/vy_log.c b/src/box/vy_log.c
index 0a5dd26e..a9fe68cc 100644
--- a/src/box/vy_log.c
+++ b/src/box/vy_log.c
@@ -68,13 +68,13 @@
  * Used for packing a record in MsgPack.
  */
 enum vy_log_key {
-	VY_LOG_KEY_INDEX_ID		= 0,
+	VY_LOG_KEY_LSM_ID		= 0,
 	VY_LOG_KEY_RANGE_ID		= 1,
 	VY_LOG_KEY_RUN_ID		= 2,
 	VY_LOG_KEY_BEGIN		= 3,
 	VY_LOG_KEY_END			= 4,
-	VY_LOG_KEY_INDEX_DEF_ID		= 5,
-	VY_LOG_KEY_SPACE_DEF_ID		= 6,
+	VY_LOG_KEY_INDEX_ID		= 5,
+	VY_LOG_KEY_SPACE_ID		= 6,
 	VY_LOG_KEY_DEF			= 7,
 	VY_LOG_KEY_SLICE_ID		= 8,
 	VY_LOG_KEY_DUMP_LSN		= 9,
@@ -85,13 +85,13 @@ enum vy_log_key {
 
 /** vy_log_key -> human readable name. */
 static const char *vy_log_key_name[] = {
-	[VY_LOG_KEY_INDEX_ID]		= "index_id",
+	[VY_LOG_KEY_LSM_ID]		= "lsm_id",
 	[VY_LOG_KEY_RANGE_ID]		= "range_id",
 	[VY_LOG_KEY_RUN_ID]		= "run_id",
 	[VY_LOG_KEY_BEGIN]		= "begin",
 	[VY_LOG_KEY_END]		= "end",
-	[VY_LOG_KEY_INDEX_DEF_ID]	= "index_def_id",
-	[VY_LOG_KEY_SPACE_DEF_ID]	= "space_def_id",
+	[VY_LOG_KEY_INDEX_ID]		= "index_id",
+	[VY_LOG_KEY_SPACE_ID]		= "space_id",
 	[VY_LOG_KEY_DEF]		= "key_def",
 	[VY_LOG_KEY_SLICE_ID]		= "slice_id",
 	[VY_LOG_KEY_DUMP_LSN]		= "dump_lsn",
@@ -102,8 +102,8 @@ static const char *vy_log_key_name[] = {
 
 /** vy_log_type -> human readable name. */
 static const char *vy_log_type_name[] = {
-	[VY_LOG_CREATE_INDEX]		= "create_index",
-	[VY_LOG_DROP_INDEX]		= "drop_index",
+	[VY_LOG_CREATE_LSM]		= "create_lsm",
+	[VY_LOG_DROP_LSM]		= "drop_lsm",
 	[VY_LOG_INSERT_RANGE]		= "insert_range",
 	[VY_LOG_DELETE_RANGE]		= "delete_range",
 	[VY_LOG_PREPARE_RUN]		= "prepare_run",
@@ -112,9 +112,9 @@ static const char *vy_log_type_name[] = {
 	[VY_LOG_FORGET_RUN]		= "forget_run",
 	[VY_LOG_INSERT_SLICE]		= "insert_slice",
 	[VY_LOG_DELETE_SLICE]		= "delete_slice",
-	[VY_LOG_DUMP_INDEX]		= "dump_index",
+	[VY_LOG_DUMP_LSM]		= "dump_lsm",
 	[VY_LOG_SNAPSHOT]		= "snapshot",
-	[VY_LOG_TRUNCATE_INDEX]		= "truncate_index",
+	[VY_LOG_TRUNCATE_LSM]		= "truncate_lsm",
 };
 
 /** Metadata log object. */
@@ -209,10 +209,9 @@ vy_log_record_snprint(char *buf, int size, const struct vy_log_record *record)
 	assert(record->type < vy_log_record_type_MAX);
 	SNPRINT(total, snprintf, buf, size, "%s{",
 		vy_log_type_name[record->type]);
-	if (record->index_id > 0)
+	if (record->lsm_id > 0)
 		SNPRINT(total, snprintf, buf, size, "%s=%"PRIi64", ",
-			vy_log_key_name[VY_LOG_KEY_INDEX_ID],
-			record->index_id);
+			vy_log_key_name[VY_LOG_KEY_LSM_ID], record->lsm_id);
 	if (record->range_id > 0)
 		SNPRINT(total, snprintf, buf, size, "%s=%"PRIi64", ",
 			vy_log_key_name[VY_LOG_KEY_RANGE_ID],
@@ -233,14 +232,12 @@ vy_log_record_snprint(char *buf, int size, const struct vy_log_record *record)
 		SNPRINT(total, mp_snprint, buf, size, record->end);
 		SNPRINT(total, snprintf, buf, size, ", ");
 	}
-	if (record->index_def_id > 0)
+	if (record->index_id > 0)
 		SNPRINT(total, snprintf, buf, size, "%s=%"PRIu32", ",
-			vy_log_key_name[VY_LOG_KEY_INDEX_DEF_ID],
-			record->index_def_id);
-	if (record->space_def_id > 0)
+			vy_log_key_name[VY_LOG_KEY_INDEX_ID], record->index_id);
+	if (record->space_id > 0)
 		SNPRINT(total, snprintf, buf, size, "%s=%"PRIu32", ",
-			vy_log_key_name[VY_LOG_KEY_SPACE_DEF_ID],
-			record->space_def_id);
+			vy_log_key_name[VY_LOG_KEY_SPACE_ID], record->space_id);
 	if (record->key_parts != NULL) {
 		SNPRINT(total, snprintf, buf, size, "%s=",
 			vy_log_key_name[VY_LOG_KEY_DEF]);
@@ -307,9 +304,9 @@ vy_log_record_encode(const struct vy_log_record *record,
 	size += mp_sizeof_array(2);
 	size += mp_sizeof_uint(record->type);
 	size_t n_keys = 0;
-	if (record->index_id > 0) {
-		size += mp_sizeof_uint(VY_LOG_KEY_INDEX_ID);
-		size += mp_sizeof_uint(record->index_id);
+	if (record->lsm_id > 0) {
+		size += mp_sizeof_uint(VY_LOG_KEY_LSM_ID);
+		size += mp_sizeof_uint(record->lsm_id);
 		n_keys++;
 	}
 	if (record->range_id > 0) {
@@ -338,14 +335,14 @@ vy_log_record_encode(const struct vy_log_record *record,
 		size += p - record->end;
 		n_keys++;
 	}
-	if (record->index_def_id > 0) {
-		size += mp_sizeof_uint(VY_LOG_KEY_INDEX_DEF_ID);
-		size += mp_sizeof_uint(record->index_def_id);
+	if (record->index_id > 0) {
+		size += mp_sizeof_uint(VY_LOG_KEY_INDEX_ID);
+		size += mp_sizeof_uint(record->index_id);
 		n_keys++;
 	}
-	if (record->space_def_id > 0) {
-		size += mp_sizeof_uint(VY_LOG_KEY_SPACE_DEF_ID);
-		size += mp_sizeof_uint(record->space_def_id);
+	if (record->space_id > 0) {
+		size += mp_sizeof_uint(VY_LOG_KEY_SPACE_ID);
+		size += mp_sizeof_uint(record->space_id);
 		n_keys++;
 	}
 	if (record->key_parts != NULL) {
@@ -389,9 +386,9 @@ vy_log_record_encode(const struct vy_log_record *record,
 	pos = mp_encode_array(pos, 2);
 	pos = mp_encode_uint(pos, record->type);
 	pos = mp_encode_map(pos, n_keys);
-	if (record->index_id > 0) {
-		pos = mp_encode_uint(pos, VY_LOG_KEY_INDEX_ID);
-		pos = mp_encode_uint(pos, record->index_id);
+	if (record->lsm_id > 0) {
+		pos = mp_encode_uint(pos, VY_LOG_KEY_LSM_ID);
+		pos = mp_encode_uint(pos, record->lsm_id);
 	}
 	if (record->range_id > 0) {
 		pos = mp_encode_uint(pos, VY_LOG_KEY_RANGE_ID);
@@ -415,13 +412,13 @@ vy_log_record_encode(const struct vy_log_record *record,
 		memcpy(pos, record->end, p - record->end);
 		pos += p - record->end;
 	}
-	if (record->index_def_id > 0) {
-		pos = mp_encode_uint(pos, VY_LOG_KEY_INDEX_DEF_ID);
-		pos = mp_encode_uint(pos, record->index_def_id);
+	if (record->index_id > 0) {
+		pos = mp_encode_uint(pos, VY_LOG_KEY_INDEX_ID);
+		pos = mp_encode_uint(pos, record->index_id);
 	}
-	if (record->space_def_id > 0) {
-		pos = mp_encode_uint(pos, VY_LOG_KEY_SPACE_DEF_ID);
-		pos = mp_encode_uint(pos, record->space_def_id);
+	if (record->space_id > 0) {
+		pos = mp_encode_uint(pos, VY_LOG_KEY_SPACE_ID);
+		pos = mp_encode_uint(pos, record->space_id);
 	}
 	if (record->key_parts != NULL) {
 		pos = mp_encode_uint(pos, VY_LOG_KEY_DEF);
@@ -504,8 +501,8 @@ vy_log_record_decode(struct vy_log_record *record,
 	for (uint32_t i = 0; i < n_keys; i++) {
 		uint32_t key = mp_decode_uint(&pos);
 		switch (key) {
-		case VY_LOG_KEY_INDEX_ID:
-			record->index_id = mp_decode_uint(&pos);
+		case VY_LOG_KEY_LSM_ID:
+			record->lsm_id = mp_decode_uint(&pos);
 			break;
 		case VY_LOG_KEY_RANGE_ID:
 			record->range_id = mp_decode_uint(&pos);
@@ -523,11 +520,11 @@ vy_log_record_decode(struct vy_log_record *record,
 			record->end = mp_decode_array(&tmp) > 0 ? pos : NULL;
 			mp_next(&pos);
 			break;
-		case VY_LOG_KEY_INDEX_DEF_ID:
-			record->index_def_id = mp_decode_uint(&pos);
+		case VY_LOG_KEY_INDEX_ID:
+			record->index_id = mp_decode_uint(&pos);
 			break;
-		case VY_LOG_KEY_SPACE_DEF_ID:
-			record->space_def_id = mp_decode_uint(&pos);
+		case VY_LOG_KEY_SPACE_ID:
+			record->space_id = mp_decode_uint(&pos);
 			break;
 		case VY_LOG_KEY_DEF: {
 			uint32_t part_count = mp_decode_array(&pos);
@@ -573,16 +570,16 @@ vy_log_record_decode(struct vy_log_record *record,
 			goto fail;
 		}
 	}
-	if (record->type == VY_LOG_CREATE_INDEX && record->commit_lsn == 0) {
+	if (record->type == VY_LOG_CREATE_LSM && record->commit_lsn == 0) {
 		/*
-		 * We used to use LSN as unique index identifier
+		 * We used to use LSN as unique LSM tree identifier
 		 * and didn't store LSN separately so if there's
 		 * no 'commit_lsn' field in the record, we are
 		 * recovering from an old vylog and 'id' is in
 		 * fact the LSN of the WAL record that committed
-		 * the index.
+		 * the LSM tree.
 		 */
-		record->commit_lsn = record->index_id;
+		record->commit_lsn = record->lsm_id;
 	}
 	return 0;
 fail:
@@ -1034,7 +1031,7 @@ vy_log_tx_do_commit(bool no_discard)
 
 	/*
 	 * During recovery, we may replay records we failed to commit
-	 * before restart (e.g. drop index). Since the log isn't open
+	 * before restart (e.g. drop LSM tree). Since the log isn't open
 	 * yet, simply leave them in the tx buffer to be flushed upon
 	 * recovery completion.
 	 */
@@ -1109,10 +1106,10 @@ vy_recovery_index_id_hash(uint32_t space_id, uint32_t index_id)
 	return ((uint64_t)space_id << 32) + index_id;
 }
 
-/** Lookup a vinyl index in vy_recovery::index_id_hash map. */
-struct vy_index_recovery_info *
-vy_recovery_index_by_id(struct vy_recovery *recovery,
-			uint32_t space_id, uint32_t index_id)
+/** Lookup an LSM tree in vy_recovery::index_id_hash map. */
+struct vy_lsm_recovery_info *
+vy_recovery_lsm_by_index_id(struct vy_recovery *recovery,
+			    uint32_t space_id, uint32_t index_id)
 {
 	int64_t key = vy_recovery_index_id_hash(space_id, index_id);
 	struct mh_i64ptr_t *h = recovery->index_id_hash;
@@ -1122,11 +1119,11 @@ vy_recovery_index_by_id(struct vy_recovery *recovery,
 	return mh_i64ptr_node(h, k)->val;
 }
 
-/** Lookup a vinyl index in vy_recovery::index_hash map. */
-static struct vy_index_recovery_info *
-vy_recovery_lookup_index(struct vy_recovery *recovery, int64_t id)
+/** Lookup an LSM tree in vy_recovery::index_hash map. */
+static struct vy_lsm_recovery_info *
+vy_recovery_lookup_lsm(struct vy_recovery *recovery, int64_t id)
 {
-	struct mh_i64ptr_t *h = recovery->index_hash;
+	struct mh_i64ptr_t *h = recovery->lsm_hash;
 	mh_int_t k = mh_i64ptr_find(h, id, NULL);
 	if (k == mh_end(h))
 		return NULL;
@@ -1167,18 +1164,18 @@ vy_recovery_lookup_slice(struct vy_recovery *recovery, int64_t slice_id)
 }
 
 /**
- * Handle a VY_LOG_CREATE_INDEX log record.
- * This function allocates a new vinyl index with ID @id
+ * Handle a VY_LOG_CREATE_LSM log record.
+ * This function allocates a new vinyl LSM tree with ID @id
  * and inserts it to the hash.
  * Return 0 on success, -1 on failure (ID collision or OOM).
  */
 static int
-vy_recovery_create_index(struct vy_recovery *recovery, int64_t id,
-			 uint32_t space_id, uint32_t index_id,
-			 const struct key_part_def *key_parts,
-			 uint32_t key_part_count, int64_t commit_lsn)
+vy_recovery_create_lsm(struct vy_recovery *recovery, int64_t id,
+		       uint32_t space_id, uint32_t index_id,
+		       const struct key_part_def *key_parts,
+		       uint32_t key_part_count, int64_t commit_lsn)
 {
-	struct vy_index_recovery_info *index;
+	struct vy_lsm_recovery_info *lsm;
 	struct key_part_def *key_parts_copy;
 	struct mh_i64ptr_node_t node;
 	struct mh_i64ptr_t *h;
@@ -1186,11 +1183,11 @@ vy_recovery_create_index(struct vy_recovery *recovery, int64_t id,
 
 	/*
 	 * Make a copy of the key definition to be used for
-	 * the new index incarnation.
+	 * the new LSM tree incarnation.
 	 */
 	if (key_parts == NULL) {
 		diag_set(ClientError, ER_INVALID_VYLOG_FILE,
-			 tt_sprintf("Missing key definition for index %lld",
+			 tt_sprintf("Missing key definition for LSM tree %lld",
 				    (long long)id));
 		return -1;
 	}
@@ -1203,77 +1200,77 @@ vy_recovery_create_index(struct vy_recovery *recovery, int64_t id,
 	memcpy(key_parts_copy, key_parts, sizeof(*key_parts) * key_part_count);
 
 	/*
-	 * Look up the index in the hash.
+	 * Look up the LSM tree in the hash.
 	 */
 	h = recovery->index_id_hash;
 	node.key = vy_recovery_index_id_hash(space_id, index_id);
 	k = mh_i64ptr_find(h, node.key, NULL);
-	index = (k != mh_end(h)) ? mh_i64ptr_node(h, k)->val : NULL;
+	lsm = (k != mh_end(h)) ? mh_i64ptr_node(h, k)->val : NULL;
 
-	if (index == NULL) {
+	if (lsm == NULL) {
 		/*
-		 * This is the first time the index is created
+		 * This is the first time the LSM tree is created
 		 * (there's no previous incarnation in the context).
-		 * Allocate a node for the index and add it to
+		 * Allocate a node for the LSM tree and add it to
 		 * the hash.
 		 */
-		index = malloc(sizeof(*index));
-		if (index == NULL) {
-			diag_set(OutOfMemory, sizeof(*index),
-				 "malloc", "struct vy_index_recovery_info");
+		lsm = malloc(sizeof(*lsm));
+		if (lsm == NULL) {
+			diag_set(OutOfMemory, sizeof(*lsm),
+				 "malloc", "struct vy_lsm_recovery_info");
 			free(key_parts_copy);
 			return -1;
 		}
-		index->index_id = index_id;
-		index->space_id = space_id;
-		rlist_create(&index->ranges);
-		rlist_create(&index->runs);
+		lsm->index_id = index_id;
+		lsm->space_id = space_id;
+		rlist_create(&lsm->ranges);
+		rlist_create(&lsm->runs);
 
-		node.val = index;
+		node.val = lsm;
 		if (mh_i64ptr_put(h, &node, NULL, NULL) == mh_end(h)) {
 			diag_set(OutOfMemory, 0, "mh_i64ptr_put",
 				 "mh_i64ptr_node_t");
 			free(key_parts_copy);
-			free(index);
+			free(lsm);
 			return -1;
 		}
-		rlist_add_entry(&recovery->indexes, index, in_recovery);
+		rlist_add_entry(&recovery->lsms, lsm, in_recovery);
 	} else {
 		/*
-		 * The index was dropped and recreated with the
+		 * The LSM tree was dropped and recreated with the
 		 * same ID. Update its key definition (because it
 		 * could have changed since the last time it was
 		 * used) and reset its state.
 		 */
-		if (!index->is_dropped) {
+		if (!lsm->is_dropped) {
 			diag_set(ClientError, ER_INVALID_VYLOG_FILE,
-				 tt_sprintf("Index %u/%u created twice",
+				 tt_sprintf("LSM tree %u/%u created twice",
 					    (unsigned)space_id,
 					    (unsigned)index_id));
 			free(key_parts_copy);
 			return -1;
 		}
-		assert(index->index_id == index_id);
-		assert(index->space_id == space_id);
-		free(index->key_parts);
+		assert(lsm->index_id == index_id);
+		assert(lsm->space_id == space_id);
+		free(lsm->key_parts);
 	}
 
-	index->id = id;
-	index->key_parts = key_parts_copy;
-	index->key_part_count = key_part_count;
-	index->is_dropped = false;
-	index->commit_lsn = commit_lsn;
-	index->dump_lsn = -1;
+	lsm->id = id;
+	lsm->key_parts = key_parts_copy;
+	lsm->key_part_count = key_part_count;
+	lsm->is_dropped = false;
+	lsm->commit_lsn = commit_lsn;
+	lsm->dump_lsn = -1;
 
 	/*
-	 * Add the index to the hash.
+	 * Add the LSM tree to the hash.
 	 */
-	h = recovery->index_hash;
+	h = recovery->lsm_hash;
 	node.key = id;
-	node.val = index;
+	node.val = lsm;
 	if (mh_i64ptr_find(h, id, NULL) != mh_end(h)) {
 		diag_set(ClientError, ER_INVALID_VYLOG_FILE,
-			 tt_sprintf("Duplicate index id %lld",
+			 tt_sprintf("Duplicate LSM tree id %lld",
 				    (long long)id));
 		return -1;
 	}
@@ -1290,72 +1287,72 @@ vy_recovery_create_index(struct vy_recovery *recovery, int64_t id,
 }
 
 /**
- * Handle a VY_LOG_DROP_INDEX log record.
- * This function marks the vinyl index with ID @id as dropped.
- * All ranges and runs of the index must have been deleted by now.
- * Returns 0 on success, -1 if ID not found or index is already marked.
+ * Handle a VY_LOG_DROP_LSM log record.
+ * This function marks the LSM tree with ID @id as dropped.
+ * All ranges and runs of the LSM tree must have been deleted by now.
+ * Returns 0 on success, -1 if ID not found or LSM tree is already marked.
  */
 static int
-vy_recovery_drop_index(struct vy_recovery *recovery, int64_t id)
+vy_recovery_drop_lsm(struct vy_recovery *recovery, int64_t id)
 {
-	struct vy_index_recovery_info *index;
-	index = vy_recovery_lookup_index(recovery, id);
-	if (index == NULL) {
+	struct vy_lsm_recovery_info *lsm;
+	lsm = vy_recovery_lookup_lsm(recovery, id);
+	if (lsm == NULL) {
 		diag_set(ClientError, ER_INVALID_VYLOG_FILE,
-			 tt_sprintf("Index %lld deleted but not registered",
+			 tt_sprintf("LSM tree %lld deleted but not registered",
 				    (long long)id));
 		return -1;
 	}
-	if (index->is_dropped) {
+	if (lsm->is_dropped) {
 		diag_set(ClientError, ER_INVALID_VYLOG_FILE,
-			 tt_sprintf("Index %lld deleted twice",
+			 tt_sprintf("LSM tree %lld deleted twice",
 				    (long long)id));
 		return -1;
 	}
-	if (!rlist_empty(&index->ranges)) {
+	if (!rlist_empty(&lsm->ranges)) {
 		diag_set(ClientError, ER_INVALID_VYLOG_FILE,
-			 tt_sprintf("Dropped index %lld has ranges",
+			 tt_sprintf("Dropped LSM tree %lld has ranges",
 				    (long long)id));
 		return -1;
 	}
 	struct vy_run_recovery_info *run;
-	rlist_foreach_entry(run, &index->runs, in_index) {
+	rlist_foreach_entry(run, &lsm->runs, in_lsm) {
 		if (!run->is_dropped && !run->is_incomplete) {
 			diag_set(ClientError, ER_INVALID_VYLOG_FILE,
-				 tt_sprintf("Dropped index %lld has active "
+				 tt_sprintf("Dropped LSM tree %lld has active "
 					    "runs", (long long)id));
 			return -1;
 		}
 	}
-	index->is_dropped = true;
+	lsm->is_dropped = true;
 	return 0;
 }
 
 /**
- * Handle a VY_LOG_DUMP_INDEX log record.
- * This function updates LSN of the last dump of the vinyl index
+ * Handle a VY_LOG_DUMP_LSM log record.
+ * This function updates LSN of the last dump of the LSM tree
  * with ID @id.
- * Returns 0 on success, -1 if ID not found or index is dropped.
+ * Returns 0 on success, -1 if ID not found or LSM tree is dropped.
  */
 static int
-vy_recovery_dump_index(struct vy_recovery *recovery,
-		       int64_t id, int64_t dump_lsn)
+vy_recovery_dump_lsm(struct vy_recovery *recovery,
+		     int64_t id, int64_t dump_lsn)
 {
-	struct vy_index_recovery_info *index;
-	index = vy_recovery_lookup_index(recovery, id);
-	if (index == NULL) {
+	struct vy_lsm_recovery_info *lsm;
+	lsm = vy_recovery_lookup_lsm(recovery, id);
+	if (lsm == NULL) {
 		diag_set(ClientError, ER_INVALID_VYLOG_FILE,
-			 tt_sprintf("Dump of unregistered index %lld",
+			 tt_sprintf("Dump of unregistered LSM tree %lld",
 				    (long long)id));
 		return -1;
 	}
-	if (index->is_dropped) {
+	if (lsm->is_dropped) {
 		diag_set(ClientError, ER_INVALID_VYLOG_FILE,
-			 tt_sprintf("Dump of deleted index %lld",
+			 tt_sprintf("Dump of deleted LSM tree %lld",
 				    (long long)id));
 		return -1;
 	}
-	index->dump_lsn = dump_lsn;
+	lsm->dump_lsn = dump_lsn;
 	return 0;
 }
 
@@ -1387,7 +1384,7 @@ vy_recovery_do_create_run(struct vy_recovery *recovery, int64_t run_id)
 	run->is_incomplete = false;
 	run->is_dropped = false;
 	run->data = NULL;
-	rlist_create(&run->in_index);
+	rlist_create(&run->in_lsm);
 	if (recovery->max_id < run_id)
 		recovery->max_id = run_id;
 	return run;
@@ -1396,21 +1393,21 @@ vy_recovery_do_create_run(struct vy_recovery *recovery, int64_t run_id)
 /**
  * Handle a VY_LOG_PREPARE_RUN log record.
  * This function creates a new incomplete vinyl run with ID @run_id
- * and adds it to the list of runs of the index with ID @index_id.
- * Return 0 on success, -1 if run already exists, index not found,
+ * and adds it to the list of runs of the LSM tree with ID @lsm_id.
+ * Return 0 on success, -1 if run already exists, LSM tree not found,
  * or OOM.
  */
 static int
-vy_recovery_prepare_run(struct vy_recovery *recovery, int64_t index_id,
+vy_recovery_prepare_run(struct vy_recovery *recovery, int64_t lsm_id,
 			int64_t run_id)
 {
-	struct vy_index_recovery_info *index;
-	index = vy_recovery_lookup_index(recovery, index_id);
-	if (index == NULL) {
+	struct vy_lsm_recovery_info *lsm;
+	lsm = vy_recovery_lookup_lsm(recovery, lsm_id);
+	if (lsm == NULL) {
 		diag_set(ClientError, ER_INVALID_VYLOG_FILE,
 			 tt_sprintf("Run %lld created for unregistered "
-				    "index %lld", (long long)run_id,
-				    (long long)index_id));
+				    "LSM tree %lld", (long long)run_id,
+				    (long long)lsm_id));
 		return -1;
 	}
 	if (vy_recovery_lookup_run(recovery, run_id) != NULL) {
@@ -1424,36 +1421,36 @@ vy_recovery_prepare_run(struct vy_recovery *recovery, int64_t index_id,
 	if (run == NULL)
 		return -1;
 	run->is_incomplete = true;
-	rlist_add_entry(&index->runs, run, in_index);
+	rlist_add_entry(&lsm->runs, run, in_lsm);
 	return 0;
 }
 
 /**
  * Handle a VY_LOG_CREATE_RUN log record.
  * This function adds the vinyl run with ID @run_id to the list
- * of runs of the index with ID @index_id and marks it committed.
+ * of runs of the LSM tree with ID @sm_id and marks it committed.
  * If the run does not exist, it will be created.
- * Return 0 on success, -1 if index not found, run or index
+ * Return 0 on success, -1 if LSM tree not found, run or LSM tree
  * is dropped, or OOM.
  */
 static int
-vy_recovery_create_run(struct vy_recovery *recovery, int64_t index_id,
+vy_recovery_create_run(struct vy_recovery *recovery, int64_t lsm_id,
 		       int64_t run_id, int64_t dump_lsn)
 {
-	struct vy_index_recovery_info *index;
-	index = vy_recovery_lookup_index(recovery, index_id);
-	if (index == NULL) {
+	struct vy_lsm_recovery_info *lsm;
+	lsm = vy_recovery_lookup_lsm(recovery, lsm_id);
+	if (lsm == NULL) {
 		diag_set(ClientError, ER_INVALID_VYLOG_FILE,
 			 tt_sprintf("Run %lld created for unregistered "
-				    "index %lld", (long long)run_id,
-				    (long long)index_id));
+				    "LSM tree %lld", (long long)run_id,
+				    (long long)lsm_id));
 		return -1;
 	}
-	if (index->is_dropped) {
+	if (lsm->is_dropped) {
 		diag_set(ClientError, ER_INVALID_VYLOG_FILE,
 			 tt_sprintf("Run %lld created for deleted "
-				    "index %lld", (long long)run_id,
-				    (long long)index_id));
+				    "LSM tree %lld", (long long)run_id,
+				    (long long)lsm_id));
 		return -1;
 	}
 	struct vy_run_recovery_info *run;
@@ -1471,7 +1468,7 @@ vy_recovery_create_run(struct vy_recovery *recovery, int64_t index_id,
 	}
 	run->dump_lsn = dump_lsn;
 	run->is_incomplete = false;
-	rlist_move_entry(&index->runs, run, in_index);
+	rlist_move_entry(&lsm->runs, run, in_lsm);
 	return 0;
 }
 
@@ -1523,7 +1520,7 @@ vy_recovery_forget_run(struct vy_recovery *recovery, int64_t run_id)
 	}
 	struct vy_run_recovery_info *run = mh_i64ptr_node(h, k)->val;
 	mh_i64ptr_del(h, k, NULL);
-	rlist_del_entry(run, in_index);
+	rlist_del_entry(run, in_lsm);
 	free(run);
 	return 0;
 }
@@ -1532,11 +1529,11 @@ vy_recovery_forget_run(struct vy_recovery *recovery, int64_t run_id)
  * Handle a VY_LOG_INSERT_RANGE log record.
  * This function allocates a new vinyl range with ID @range_id,
  * inserts it to the hash, and adds it to the list of ranges of the
- * index with ID @index_id.
+ * LSM tree with ID @lsm_id.
  * Return 0 on success, -1 on failure (ID collision or OOM).
  */
 static int
-vy_recovery_insert_range(struct vy_recovery *recovery, int64_t index_id,
+vy_recovery_insert_range(struct vy_recovery *recovery, int64_t lsm_id,
 			 int64_t range_id, const char *begin, const char *end)
 {
 	if (vy_recovery_lookup_range(recovery, range_id) != NULL) {
@@ -1545,13 +1542,13 @@ vy_recovery_insert_range(struct vy_recovery *recovery, int64_t index_id,
 				    (long long)range_id));
 		return -1;
 	}
-	struct vy_index_recovery_info *index;
-	index = vy_recovery_lookup_index(recovery, index_id);
-	if (index == NULL) {
+	struct vy_lsm_recovery_info *lsm;
+	lsm = vy_recovery_lookup_lsm(recovery, lsm_id);
+	if (lsm == NULL) {
 		diag_set(ClientError, ER_INVALID_VYLOG_FILE,
 			 tt_sprintf("Range %lld created for unregistered "
-				    "index %lld", (long long)range_id,
-				    (long long)index_id));
+				    "LSM tree %lld", (long long)range_id,
+				    (long long)lsm_id));
 		return -1;
 	}
 
@@ -1593,7 +1590,7 @@ vy_recovery_insert_range(struct vy_recovery *recovery, int64_t index_id,
 	} else
 		range->end = NULL;
 	rlist_create(&range->slices);
-	rlist_add_entry(&index->ranges, range, in_index);
+	rlist_add_entry(&lsm->ranges, range, in_lsm);
 	if (recovery->max_id < range_id)
 		recovery->max_id = range_id;
 	return 0;
@@ -1624,7 +1621,7 @@ vy_recovery_delete_range(struct vy_recovery *recovery, int64_t range_id)
 		return -1;
 	}
 	mh_i64ptr_del(h, k, NULL);
-	rlist_del_entry(range, in_index);
+	rlist_del_entry(range, in_lsm);
 	free(range);
 	return 0;
 }
@@ -1757,28 +1754,28 @@ vy_recovery_process_record(struct vy_recovery *recovery,
 {
 	int rc;
 	switch (record->type) {
-	case VY_LOG_CREATE_INDEX:
-		rc = vy_recovery_create_index(recovery, record->index_id,
-				record->space_def_id, record->index_def_id,
+	case VY_LOG_CREATE_LSM:
+		rc = vy_recovery_create_lsm(recovery, record->lsm_id,
+				record->space_id, record->index_id,
 				record->key_parts, record->key_part_count,
 				record->commit_lsn);
 		break;
-	case VY_LOG_DROP_INDEX:
-		rc = vy_recovery_drop_index(recovery, record->index_id);
+	case VY_LOG_DROP_LSM:
+		rc = vy_recovery_drop_lsm(recovery, record->lsm_id);
 		break;
 	case VY_LOG_INSERT_RANGE:
-		rc = vy_recovery_insert_range(recovery, record->index_id,
+		rc = vy_recovery_insert_range(recovery, record->lsm_id,
 				record->range_id, record->begin, record->end);
 		break;
 	case VY_LOG_DELETE_RANGE:
 		rc = vy_recovery_delete_range(recovery, record->range_id);
 		break;
 	case VY_LOG_PREPARE_RUN:
-		rc = vy_recovery_prepare_run(recovery, record->index_id,
+		rc = vy_recovery_prepare_run(recovery, record->lsm_id,
 					     record->run_id);
 		break;
 	case VY_LOG_CREATE_RUN:
-		rc = vy_recovery_create_run(recovery, record->index_id,
+		rc = vy_recovery_create_run(recovery, record->lsm_id,
 					    record->run_id, record->dump_lsn);
 		break;
 	case VY_LOG_DROP_RUN:
@@ -1796,11 +1793,11 @@ vy_recovery_process_record(struct vy_recovery *recovery,
 	case VY_LOG_DELETE_SLICE:
 		rc = vy_recovery_delete_slice(recovery, record->slice_id);
 		break;
-	case VY_LOG_DUMP_INDEX:
-		rc = vy_recovery_dump_index(recovery, record->index_id,
+	case VY_LOG_DUMP_LSM:
+		rc = vy_recovery_dump_lsm(recovery, record->lsm_id,
 					    record->dump_lsn);
 		break;
-	case VY_LOG_TRUNCATE_INDEX:
+	case VY_LOG_TRUNCATE_LSM:
 		/* Not used anymore, ignore. */
 		rc = 0;
 		break;
@@ -1829,21 +1826,21 @@ vy_recovery_new_f(va_list ap)
 		goto fail;
 	}
 
-	rlist_create(&recovery->indexes);
+	rlist_create(&recovery->lsms);
 	recovery->index_id_hash = NULL;
-	recovery->index_hash = NULL;
+	recovery->lsm_hash = NULL;
 	recovery->range_hash = NULL;
 	recovery->run_hash = NULL;
 	recovery->slice_hash = NULL;
 	recovery->max_id = -1;
 
 	recovery->index_id_hash = mh_i64ptr_new();
-	recovery->index_hash = mh_i64ptr_new();
+	recovery->lsm_hash = mh_i64ptr_new();
 	recovery->range_hash = mh_i64ptr_new();
 	recovery->run_hash = mh_i64ptr_new();
 	recovery->slice_hash = mh_i64ptr_new();
 	if (recovery->index_id_hash == NULL ||
-	    recovery->index_hash == NULL ||
+	    recovery->lsm_hash == NULL ||
 	    recovery->range_hash == NULL ||
 	    recovery->run_hash == NULL ||
 	    recovery->slice_hash == NULL) {
@@ -1961,16 +1958,16 @@ vy_recovery_delete(struct vy_recovery *recovery)
 	if (recovery->index_id_hash != NULL) {
 		mh_int_t i;
 		mh_foreach(recovery->index_id_hash, i) {
-			struct vy_index_recovery_info *index;
-			index = mh_i64ptr_node(recovery->index_id_hash, i)->val;
-			free(index->key_parts);
-			free(index);
+			struct vy_lsm_recovery_info *lsm;
+			lsm = mh_i64ptr_node(recovery->index_id_hash, i)->val;
+			free(lsm->key_parts);
+			free(lsm);
 		}
 		mh_i64ptr_delete(recovery->index_id_hash);
 	}
-	if (recovery->index_hash != NULL) {
+	if (recovery->lsm_hash != NULL) {
 		/* Hash entries were deleted along with index_id_hash. */
-		mh_i64ptr_delete(recovery->index_hash);
+		mh_i64ptr_delete(recovery->lsm_hash);
 	}
 	if (recovery->range_hash != NULL)
 		vy_recovery_delete_hash(recovery->range_hash);
@@ -1996,9 +1993,9 @@ vy_log_append_record(struct xlog *xlog, struct vy_log_record *record)
 	return 0;
 }
 
-/** Write all records corresponding to an index to vylog. */
+/** Write all records corresponding to an LSM tree to vylog. */
 static int
-vy_log_append_index(struct xlog *xlog, struct vy_index_recovery_info *index)
+vy_log_append_lsm(struct xlog *xlog, struct vy_lsm_recovery_info *lsm)
 {
 	struct vy_range_recovery_info *range;
 	struct vy_slice_recovery_info *slice;
@@ -2006,25 +2003,25 @@ vy_log_append_index(struct xlog *xlog, struct vy_index_recovery_info *index)
 	struct vy_log_record record;
 
 	vy_log_record_init(&record);
-	record.type = VY_LOG_CREATE_INDEX;
-	record.index_id = index->id;
-	record.index_def_id = index->index_id;
-	record.space_def_id = index->space_id;
-	record.key_parts = index->key_parts;
-	record.key_part_count = index->key_part_count;
+	record.type = VY_LOG_CREATE_LSM;
+	record.lsm_id = lsm->id;
+	record.index_id = lsm->index_id;
+	record.space_id = lsm->space_id;
+	record.key_parts = lsm->key_parts;
+	record.key_part_count = lsm->key_part_count;
 	if (vy_log_append_record(xlog, &record) != 0)
 		return -1;
 
-	if (index->dump_lsn >= 0) {
+	if (lsm->dump_lsn >= 0) {
 		vy_log_record_init(&record);
-		record.type = VY_LOG_DUMP_INDEX;
-		record.index_id = index->id;
-		record.dump_lsn = index->dump_lsn;
+		record.type = VY_LOG_DUMP_LSM;
+		record.lsm_id = lsm->id;
+		record.dump_lsn = lsm->dump_lsn;
 		if (vy_log_append_record(xlog, &record) != 0)
 			return -1;
 	}
 
-	rlist_foreach_entry(run, &index->runs, in_index) {
+	rlist_foreach_entry(run, &lsm->runs, in_lsm) {
 		vy_log_record_init(&record);
 		if (run->is_incomplete) {
 			record.type = VY_LOG_PREPARE_RUN;
@@ -2032,7 +2029,7 @@ vy_log_append_index(struct xlog *xlog, struct vy_index_recovery_info *index)
 			record.type = VY_LOG_CREATE_RUN;
 			record.dump_lsn = run->dump_lsn;
 		}
-		record.index_id = index->id;
+		record.lsm_id = lsm->id;
 		record.run_id = run->id;
 		if (vy_log_append_record(xlog, &record) != 0)
 			return -1;
@@ -2048,10 +2045,10 @@ vy_log_append_index(struct xlog *xlog, struct vy_index_recovery_info *index)
 			return -1;
 	}
 
-	rlist_foreach_entry(range, &index->ranges, in_index) {
+	rlist_foreach_entry(range, &lsm->ranges, in_lsm) {
 		vy_log_record_init(&record);
 		record.type = VY_LOG_INSERT_RANGE;
-		record.index_id = index->id;
+		record.lsm_id = lsm->id;
 		record.range_id = range->id;
 		record.begin = range->begin;
 		record.end = range->end;
@@ -2075,10 +2072,10 @@ vy_log_append_index(struct xlog *xlog, struct vy_index_recovery_info *index)
 		}
 	}
 
-	if (index->is_dropped) {
+	if (lsm->is_dropped) {
 		vy_log_record_init(&record);
-		record.type = VY_LOG_DROP_INDEX;
-		record.index_id = index->id;
+		record.type = VY_LOG_DROP_LSM;
+		record.lsm_id = lsm->id;
 		if (vy_log_append_record(xlog, &record) != 0)
 			return -1;
 	}
@@ -2100,14 +2097,14 @@ vy_log_create(const struct vclock *vclock, struct vy_recovery *recovery)
 
 	mh_int_t i;
 	mh_foreach(recovery->index_id_hash, i) {
-		struct vy_index_recovery_info *index;
-		index = mh_i64ptr_node(recovery->index_id_hash, i)->val;
+		struct vy_lsm_recovery_info *lsm;
+		lsm = mh_i64ptr_node(recovery->index_id_hash, i)->val;
 		/*
-		 * Purge dropped indexes that are not referenced by runs
+		 * Purge dropped LSM trees that are not referenced by runs
 		 * (and thus not needed for garbage collection) from the
 		 * log on rotation.
 		 */
-		if (index->is_dropped && rlist_empty(&index->runs))
+		if (lsm->is_dropped && rlist_empty(&lsm->runs))
 			continue;
 
 		/* Create the log file on the first write. */
@@ -2115,7 +2112,7 @@ vy_log_create(const struct vclock *vclock, struct vy_recovery *recovery)
 		    xdir_create_xlog(&vy_log.dir, &xlog, vclock) != 0)
 			goto err_create_xlog;
 
-		if (vy_log_append_index(&xlog, index) != 0)
+		if (vy_log_append_lsm(&xlog, lsm) != 0)
 			goto err_write_xlog;
 	}
 	if (!xlog_is_open(&xlog))
diff --git a/src/box/vy_log.h b/src/box/vy_log.h
index 8e1a2a1d..2dad4ee9 100644
--- a/src/box/vy_log.h
+++ b/src/box/vy_log.h
@@ -64,19 +64,19 @@ struct mh_i64ptr_t;
 /** Type of a metadata log record. */
 enum vy_log_record_type {
 	/**
-	 * Create a new vinyl index.
-	 * Requires vy_log_record::index_id, index_def_id, space_def_id,
+	 * Create a new LSM tree.
+	 * Requires vy_log_record::lsm_id, index_id, space_id,
 	 * key_def (with primary key parts), commit_lsn.
 	 */
-	VY_LOG_CREATE_INDEX		= 0,
+	VY_LOG_CREATE_LSM		= 0,
 	/**
-	 * Drop an index.
-	 * Requires vy_log_record::index_id.
+	 * Drop an LSM tree.
+	 * Requires vy_log_record::lsm_id.
 	 */
-	VY_LOG_DROP_INDEX		= 1,
+	VY_LOG_DROP_LSM			= 1,
 	/**
-	 * Insert a new range into a vinyl index.
-	 * Requires vy_log_record::index_id, range_id, begin, end.
+	 * Insert a new range into an LSM tree.
+	 * Requires vy_log_record::lsm_id, range_id, begin, end.
 	 */
 	VY_LOG_INSERT_RANGE		= 2,
 	/**
@@ -86,7 +86,7 @@ enum vy_log_record_type {
 	VY_LOG_DELETE_RANGE		= 3,
 	/**
 	 * Prepare a vinyl run file.
-	 * Requires vy_log_record::index_id, run_id.
+	 * Requires vy_log_record::lsm_id, run_id.
 	 *
 	 * Record of this type is written before creating a run file.
 	 * It is needed to keep track of unfinished due to errors run
@@ -95,7 +95,7 @@ enum vy_log_record_type {
 	VY_LOG_PREPARE_RUN		= 4,
 	/**
 	 * Commit a vinyl run file creation.
-	 * Requires vy_log_record::index_id, run_id, dump_lsn.
+	 * Requires vy_log_record::lsm_id, run_id, dump_lsn.
 	 *
 	 * Written after a run file was successfully created.
 	 */
@@ -135,10 +135,10 @@ enum vy_log_record_type {
 	 */
 	VY_LOG_DELETE_SLICE		= 9,
 	/**
-	 * Update LSN of the last index dump.
-	 * Requires vy_log_record::index_id, dump_lsn.
+	 * Update LSN of the last LSM tree dump.
+	 * Requires vy_log_record::lsm_id, dump_lsn.
 	 */
-	VY_LOG_DUMP_INDEX		= 10,
+	VY_LOG_DUMP_LSM			= 10,
 	/**
 	 * We don't split vylog into snapshot and log - all records
 	 * are written to the same file. Since we need to load a
@@ -151,19 +151,19 @@ enum vy_log_record_type {
 	 */
 	VY_LOG_SNAPSHOT			= 11,
 	/**
-	 * When we used LSN for identifying indexes in vylog, we
-	 * couldn't simply recreate an index on space truncation,
-	 * because in case the space had more than one index, we
+	 * When we used LSN for identifying LSM trees in vylog, we
+	 * couldn't simply recreate an LSM tree on space truncation,
+	 * because in case the space had more than one LSM tree, we
 	 * wouldn't be able to distinguish them after truncation.
 	 * So we wrote special 'truncate' record.
 	 *
-	 * Now, we assign a unique id to each index and so we don't
+	 * Now, we assign a unique id to each LSM tree and so we don't
 	 * need a special record type for space truncation. If we
 	 * are recovering from an old vylog, we simply ignore all
 	 * 'truncate' records - this will result in replay of all
 	 * WAL records written after truncation.
 	 */
-	VY_LOG_TRUNCATE_INDEX		= 12,
+	VY_LOG_TRUNCATE_LSM		= 12,
 
 	vy_log_record_type_MAX
 };
@@ -172,8 +172,8 @@ enum vy_log_record_type {
 struct vy_log_record {
 	/** Type of the record. */
 	enum vy_log_record_type type;
-	/** Unique ID of the vinyl index. */
-	int64_t index_id;
+	/** Unique ID of the vinyl LSM tree. */
+	int64_t lsm_id;
 	/** Unique ID of the vinyl range. */
 	int64_t range_id;
 	/** Unique ID of the vinyl run. */
@@ -191,9 +191,9 @@ struct vy_log_record {
 	 */
 	const char *end;
 	/** Ordinal index number in the space. */
-	uint32_t index_def_id;
+	uint32_t index_id;
 	/** Space ID. */
-	uint32_t space_def_id;
+	uint32_t space_id;
 	/** Index key definition, as defined by the user. */
 	const struct key_def *key_def;
 	/** Array of key part definitions. */
@@ -216,14 +216,14 @@ struct vy_log_record {
 /** Recovery context. */
 struct vy_recovery {
 	/**
-	 * List of all indexes stored in the recovery context,
-	 * linked by vy_index_recovery_info::in_recovery.
+	 * List of all LSM trees stored in the recovery context,
+	 * linked by vy_lsm_recovery_info::in_recovery.
 	 */
-	struct rlist indexes;
-	/** space_id, index_id -> vy_index_recovery_info. */
+	struct rlist lsms;
+	/** space_id, index_id -> vy_lsm_recovery_info. */
 	struct mh_i64ptr_t *index_id_hash;
-	/** ID -> vy_index_recovery_info. */
-	struct mh_i64ptr_t *index_hash;
+	/** ID -> vy_lsm_recovery_info. */
+	struct mh_i64ptr_t *lsm_hash;
 	/** ID -> vy_range_recovery_info. */
 	struct mh_i64ptr_t *range_hash;
 	/** ID -> vy_run_recovery_info. */
@@ -237,11 +237,11 @@ struct vy_recovery {
 	int64_t max_id;
 };
 
-/** Vinyl index info stored in a recovery context. */
-struct vy_index_recovery_info {
-	/** Link in vy_recovery::indexes. */
+/** LSM tree info stored in a recovery context. */
+struct vy_lsm_recovery_info {
+	/** Link in vy_recovery::lsms. */
 	struct rlist in_recovery;
-	/** ID of the index. */
+	/** ID of the LSM tree. */
 	int64_t id;
 	/** Ordinal index number in the space. */
 	uint32_t index_id;
@@ -251,29 +251,29 @@ struct vy_index_recovery_info {
 	struct key_part_def *key_parts;
 	/** Number of key parts. */
 	uint32_t key_part_count;
-	/** True if the index was dropped. */
+	/** True if the LSM tree was dropped. */
 	bool is_dropped;
-	/** LSN of the WAL row that committed the index. */
+	/** LSN of the WAL row that committed the LSM tree. */
 	int64_t commit_lsn;
-	/** LSN of the last index dump. */
+	/** LSN of the last LSM tree dump. */
 	int64_t dump_lsn;
 	/**
-	 * List of all ranges in the index, linked by
-	 * vy_range_recovery_info::in_index.
+	 * List of all ranges in the LSM tree, linked by
+	 * vy_range_recovery_info::in_lsm.
 	 */
 	struct rlist ranges;
 	/**
-	 * List of all runs created for the index
+	 * List of all runs created for the LSM tree
 	 * (both committed and not), linked by
-	 * vy_run_recovery_info::in_index.
+	 * vy_run_recovery_info::in_lsm.
 	 */
 	struct rlist runs;
 };
 
 /** Vinyl range info stored in a recovery context. */
 struct vy_range_recovery_info {
-	/** Link in vy_index_recovery_info::ranges. */
-	struct rlist in_index;
+	/** Link in vy_lsm_recovery_info::ranges. */
+	struct rlist in_lsm;
 	/** ID of the range. */
 	int64_t id;
 	/** Start of the range, stored in MsgPack array. */
@@ -291,8 +291,8 @@ struct vy_range_recovery_info {
 
 /** Run info stored in a recovery context. */
 struct vy_run_recovery_info {
-	/** Link in vy_index_recovery_info::runs. */
-	struct rlist in_index;
+	/** Link in vy_lsm_recovery_info::runs. */
+	struct rlist in_lsm;
 	/** ID of the run. */
 	int64_t id;
 	/** Max LSN stored on disk. */
@@ -356,10 +356,10 @@ vy_log_open(struct xlog *xlog);
 /**
  * Rotate the metadata log. This function creates a new
  * xlog file in the log directory having vclock @vclock
- * and writes records required to recover active indexes.
+ * and writes records required to recover active LSM trees.
  * The goal of log rotation is to compact the log file by
  * discarding records cancelling each other and records left
- * from dropped indexes.
+ * from dropped LSM trees.
  *
  * Returns 0 on success, -1 on failure.
  */
@@ -439,7 +439,7 @@ vy_log_bootstrap(void);
  * Prepare the metadata log for recovery from the file having
  * vclock @vclock and return the recovery context.
  *
- * After this function is called, vinyl indexes may be recovered from
+ * After this function is called, LSM trees may be recovered from
  * the log using vy_recovery methods. When recovery is complete,
  * one must call vy_log_end_recovery(). After that the recovery context
  * may be deleted with vy_recovery_delete().
@@ -481,13 +481,13 @@ void
 vy_recovery_delete(struct vy_recovery *recovery);
 
 /**
- * Look up the last incarnation of an index stored in a recovery context.
+ * Look up the last incarnation of an LSM tree stored in a recovery context.
  *
- * Returns NULL if the index was not found.
+ * Returns NULL if the LSM tree was not found.
  */
-struct vy_index_recovery_info *
-vy_recovery_index_by_id(struct vy_recovery *recovery,
-			uint32_t space_id, uint32_t index_id);
+struct vy_lsm_recovery_info *
+vy_recovery_lsm_by_index_id(struct vy_recovery *recovery,
+			    uint32_t space_id, uint32_t index_id);
 
 /**
  * Initialize a log record with default values.
@@ -500,42 +500,42 @@ vy_log_record_init(struct vy_log_record *record)
 	memset(record, 0, sizeof(*record));
 }
 
-/** Helper to log a vinyl index creation. */
+/** Helper to log a vinyl LSM tree creation. */
 static inline void
-vy_log_create_index(int64_t id, uint32_t space_id, uint32_t index_id,
-		    const struct key_def *key_def, int64_t commit_lsn)
+vy_log_create_lsm(int64_t id, uint32_t space_id, uint32_t index_id,
+		  const struct key_def *key_def, int64_t commit_lsn)
 {
 	struct vy_log_record record;
 	vy_log_record_init(&record);
-	record.type = VY_LOG_CREATE_INDEX;
-	record.index_id = id;
-	record.space_def_id = space_id;
-	record.index_def_id = index_id;
+	record.type = VY_LOG_CREATE_LSM;
+	record.lsm_id = id;
+	record.space_id = space_id;
+	record.index_id = index_id;
 	record.key_def = key_def;
 	record.commit_lsn = commit_lsn;
 	vy_log_write(&record);
 }
 
-/** Helper to log a vinyl index drop. */
+/** Helper to log a vinyl LSM tree drop. */
 static inline void
-vy_log_drop_index(int64_t id)
+vy_log_drop_lsm(int64_t id)
 {
 	struct vy_log_record record;
 	vy_log_record_init(&record);
-	record.type = VY_LOG_DROP_INDEX;
-	record.index_id = id;
+	record.type = VY_LOG_DROP_LSM;
+	record.lsm_id = id;
 	vy_log_write(&record);
 }
 
 /** Helper to log a vinyl range insertion. */
 static inline void
-vy_log_insert_range(int64_t index_id, int64_t range_id,
+vy_log_insert_range(int64_t lsm_id, int64_t range_id,
 		    const char *begin, const char *end)
 {
 	struct vy_log_record record;
 	vy_log_record_init(&record);
 	record.type = VY_LOG_INSERT_RANGE;
-	record.index_id = index_id;
+	record.lsm_id = lsm_id;
 	record.range_id = range_id;
 	record.begin = begin;
 	record.end = end;
@@ -555,24 +555,24 @@ vy_log_delete_range(int64_t range_id)
 
 /** Helper to log a vinyl run file creation. */
 static inline void
-vy_log_prepare_run(int64_t index_id, int64_t run_id)
+vy_log_prepare_run(int64_t lsm_id, int64_t run_id)
 {
 	struct vy_log_record record;
 	vy_log_record_init(&record);
 	record.type = VY_LOG_PREPARE_RUN;
-	record.index_id = index_id;
+	record.lsm_id = lsm_id;
 	record.run_id = run_id;
 	vy_log_write(&record);
 }
 
 /** Helper to log a vinyl run creation. */
 static inline void
-vy_log_create_run(int64_t index_id, int64_t run_id, int64_t dump_lsn)
+vy_log_create_run(int64_t lsm_id, int64_t run_id, int64_t dump_lsn)
 {
 	struct vy_log_record record;
 	vy_log_record_init(&record);
 	record.type = VY_LOG_CREATE_RUN;
-	record.index_id = index_id;
+	record.lsm_id = lsm_id;
 	record.run_id = run_id;
 	record.dump_lsn = dump_lsn;
 	vy_log_write(&record);
@@ -628,14 +628,14 @@ vy_log_delete_slice(int64_t slice_id)
 	vy_log_write(&record);
 }
 
-/** Helper to log index dump. */
+/** Helper to log LSM tree dump. */
 static inline void
-vy_log_dump_index(int64_t id, int64_t dump_lsn)
+vy_log_dump_lsm(int64_t id, int64_t dump_lsn)
 {
 	struct vy_log_record record;
 	vy_log_record_init(&record);
-	record.type = VY_LOG_DUMP_INDEX;
-	record.index_id = id;
+	record.type = VY_LOG_DUMP_LSM;
+	record.lsm_id = id;
 	record.dump_lsn = dump_lsn;
 	vy_log_write(&record);
 }
diff --git a/src/box/vy_index.c b/src/box/vy_lsm.c
similarity index 56%
rename from src/box/vy_index.c
rename to src/box/vy_lsm.c
index 09894982..88a75e32 100644
--- a/src/box/vy_index.c
+++ b/src/box/vy_lsm.c
@@ -28,7 +28,7 @@
  * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  */
-#include "vy_index.h"
+#include "vy_lsm.h"
 
 #include "trivia/util.h"
 #include <stdbool.h>
@@ -53,36 +53,36 @@
 #include "vy_read_set.h"
 
 void
-vy_index_validate_formats(const struct vy_index *index)
+vy_lsm_validate_formats(const struct vy_lsm *lsm)
 {
-	(void) index;
-	assert(index->disk_format != NULL);
-	assert(index->mem_format != NULL);
-	assert(index->mem_format_with_colmask != NULL);
-	assert(index->upsert_format != NULL);
-	uint32_t index_field_count = index->mem_format->index_field_count;
+	(void) lsm;
+	assert(lsm->disk_format != NULL);
+	assert(lsm->mem_format != NULL);
+	assert(lsm->mem_format_with_colmask != NULL);
+	assert(lsm->upsert_format != NULL);
+	uint32_t index_field_count = lsm->mem_format->index_field_count;
 	(void) index_field_count;
-	if (index->index_id == 0) {
-		assert(index->disk_format == index->mem_format);
-		assert(index->disk_format->index_field_count ==
+	if (lsm->index_id == 0) {
+		assert(lsm->disk_format == lsm->mem_format);
+		assert(lsm->disk_format->index_field_count ==
 		       index_field_count);
-		assert(index->mem_format_with_colmask->index_field_count ==
+		assert(lsm->mem_format_with_colmask->index_field_count ==
 		       index_field_count);
 	} else {
-		assert(index->disk_format != index->mem_format);
-		assert(index->disk_format->index_field_count <=
+		assert(lsm->disk_format != lsm->mem_format);
+		assert(lsm->disk_format->index_field_count <=
 		       index_field_count);
 	}
-	assert(index->upsert_format->index_field_count == index_field_count);
-	assert(index->mem_format_with_colmask->index_field_count ==
+	assert(lsm->upsert_format->index_field_count == index_field_count);
+	assert(lsm->mem_format_with_colmask->index_field_count ==
 	       index_field_count);
 }
 
 int
-vy_index_env_create(struct vy_index_env *env, const char *path,
-		    int64_t *p_generation,
-		    vy_upsert_thresh_cb upsert_thresh_cb,
-		    void *upsert_thresh_arg)
+vy_lsm_env_create(struct vy_lsm_env *env, const char *path,
+		  int64_t *p_generation,
+		  vy_upsert_thresh_cb upsert_thresh_cb,
+		  void *upsert_thresh_arg)
 {
 	env->key_format = tuple_format_new(&vy_tuple_format_vtab,
 					   NULL, 0, 0, NULL, 0, NULL);
@@ -99,40 +99,40 @@ vy_index_env_create(struct vy_index_env *env, const char *path,
 	env->upsert_thresh_cb = upsert_thresh_cb;
 	env->upsert_thresh_arg = upsert_thresh_arg;
 	env->too_long_threshold = TIMEOUT_INFINITY;
-	env->index_count = 0;
+	env->lsm_count = 0;
 	return 0;
 }
 
 void
-vy_index_env_destroy(struct vy_index_env *env)
+vy_lsm_env_destroy(struct vy_lsm_env *env)
 {
 	tuple_unref(env->empty_key);
 	tuple_format_unref(env->key_format);
 }
 
 const char *
-vy_index_name(struct vy_index *index)
+vy_lsm_name(struct vy_lsm *lsm)
 {
 	char *buf = tt_static_buf();
 	snprintf(buf, TT_STATIC_BUF_LEN, "%u/%u",
-		 (unsigned)index->space_id, (unsigned)index->index_id);
+		 (unsigned)lsm->space_id, (unsigned)lsm->index_id);
 	return buf;
 }
 
 size_t
-vy_index_mem_tree_size(struct vy_index *index)
+vy_lsm_mem_tree_size(struct vy_lsm *lsm)
 {
 	struct vy_mem *mem;
-	size_t size = index->mem->tree_extent_size;
-	rlist_foreach_entry(mem, &index->sealed, in_sealed)
+	size_t size = lsm->mem->tree_extent_size;
+	rlist_foreach_entry(mem, &lsm->sealed, in_sealed)
 		size += mem->tree_extent_size;
 	return size;
 }
 
-struct vy_index *
-vy_index_new(struct vy_index_env *index_env, struct vy_cache_env *cache_env,
+struct vy_lsm *
+vy_lsm_new(struct vy_lsm_env *lsm_env, struct vy_cache_env *cache_env,
 	     struct vy_mem_env *mem_env, struct index_def *index_def,
-	     struct tuple_format *format, struct vy_index *pk)
+	     struct tuple_format *format, struct vy_lsm *pk)
 {
 	static int64_t run_buckets[] = {
 		0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 15, 20, 25, 50, 100,
@@ -141,17 +141,17 @@ vy_index_new(struct vy_index_env *index_env, struct vy_cache_env *cache_env,
 	assert(index_def->key_def->part_count > 0);
 	assert(index_def->iid == 0 || pk != NULL);
 
-	struct vy_index *index = calloc(1, sizeof(struct vy_index));
-	if (index == NULL) {
-		diag_set(OutOfMemory, sizeof(struct vy_index),
-			 "calloc", "struct vy_index");
+	struct vy_lsm *lsm = calloc(1, sizeof(struct vy_lsm));
+	if (lsm == NULL) {
+		diag_set(OutOfMemory, sizeof(struct vy_lsm),
+			 "calloc", "struct vy_lsm");
 		goto fail;
 	}
-	index->env = index_env;
+	lsm->env = lsm_env;
 
-	index->tree = malloc(sizeof(*index->tree));
-	if (index->tree == NULL) {
-		diag_set(OutOfMemory, sizeof(*index->tree),
+	lsm->tree = malloc(sizeof(*lsm->tree));
+	if (lsm->tree == NULL) {
+		diag_set(OutOfMemory, sizeof(*lsm->tree),
 			 "malloc", "vy_range_tree_t");
 		goto fail_tree;
 	}
@@ -164,105 +164,105 @@ vy_index_new(struct vy_index_env *index_env, struct vy_cache_env *cache_env,
 	if (cmp_def == NULL)
 		goto fail_cmp_def;
 
-	index->cmp_def = cmp_def;
-	index->key_def = key_def;
+	lsm->cmp_def = cmp_def;
+	lsm->key_def = key_def;
 	if (index_def->iid == 0) {
 		/*
 		 * Disk tuples can be returned to an user from a
 		 * primary key. And they must have field
 		 * definitions as well as space->format tuples.
 		 */
-		index->disk_format = format;
+		lsm->disk_format = format;
 		tuple_format_ref(format);
 	} else {
-		index->disk_format = tuple_format_new(&vy_tuple_format_vtab,
-						      &cmp_def, 1, 0, NULL, 0,
-						      NULL);
-		if (index->disk_format == NULL)
+		lsm->disk_format = tuple_format_new(&vy_tuple_format_vtab,
+						    &cmp_def, 1, 0, NULL, 0,
+						    NULL);
+		if (lsm->disk_format == NULL)
 			goto fail_format;
 		for (uint32_t i = 0; i < cmp_def->part_count; ++i) {
 			uint32_t fieldno = cmp_def->parts[i].fieldno;
-			index->disk_format->fields[fieldno].is_nullable =
+			lsm->disk_format->fields[fieldno].is_nullable =
 				format->fields[fieldno].is_nullable;
 		}
 	}
-	tuple_format_ref(index->disk_format);
+	tuple_format_ref(lsm->disk_format);
 
 	if (index_def->iid == 0) {
-		index->upsert_format =
+		lsm->upsert_format =
 			vy_tuple_format_new_upsert(format);
-		if (index->upsert_format == NULL)
+		if (lsm->upsert_format == NULL)
 			goto fail_upsert_format;
-		tuple_format_ref(index->upsert_format);
+		tuple_format_ref(lsm->upsert_format);
 
-		index->mem_format_with_colmask =
+		lsm->mem_format_with_colmask =
 			vy_tuple_format_new_with_colmask(format);
-		if (index->mem_format_with_colmask == NULL)
+		if (lsm->mem_format_with_colmask == NULL)
 			goto fail_mem_format_with_colmask;
-		tuple_format_ref(index->mem_format_with_colmask);
+		tuple_format_ref(lsm->mem_format_with_colmask);
 	} else {
-		index->mem_format_with_colmask = pk->mem_format_with_colmask;
-		index->upsert_format = pk->upsert_format;
-		tuple_format_ref(index->mem_format_with_colmask);
-		tuple_format_ref(index->upsert_format);
+		lsm->mem_format_with_colmask = pk->mem_format_with_colmask;
+		lsm->upsert_format = pk->upsert_format;
+		tuple_format_ref(lsm->mem_format_with_colmask);
+		tuple_format_ref(lsm->upsert_format);
 	}
 
-	if (vy_index_stat_create(&index->stat) != 0)
+	if (vy_lsm_stat_create(&lsm->stat) != 0)
 		goto fail_stat;
 
-	index->run_hist = histogram_new(run_buckets, lengthof(run_buckets));
-	if (index->run_hist == NULL)
+	lsm->run_hist = histogram_new(run_buckets, lengthof(run_buckets));
+	if (lsm->run_hist == NULL)
 		goto fail_run_hist;
 
-	index->mem = vy_mem_new(mem_env, *index->env->p_generation,
-				cmp_def, format, index->mem_format_with_colmask,
-				index->upsert_format, schema_version);
-	if (index->mem == NULL)
+	lsm->mem = vy_mem_new(mem_env, *lsm->env->p_generation,
+			      cmp_def, format, lsm->mem_format_with_colmask,
+			      lsm->upsert_format, schema_version);
+	if (lsm->mem == NULL)
 		goto fail_mem;
 
-	index->id = -1;
-	index->refs = 1;
-	index->dump_lsn = -1;
-	vy_cache_create(&index->cache, cache_env, cmp_def);
-	rlist_create(&index->sealed);
-	vy_range_tree_new(index->tree);
-	vy_range_heap_create(&index->range_heap);
-	rlist_create(&index->runs);
-	index->pk = pk;
+	lsm->id = -1;
+	lsm->refs = 1;
+	lsm->dump_lsn = -1;
+	vy_cache_create(&lsm->cache, cache_env, cmp_def);
+	rlist_create(&lsm->sealed);
+	vy_range_tree_new(lsm->tree);
+	vy_range_heap_create(&lsm->range_heap);
+	rlist_create(&lsm->runs);
+	lsm->pk = pk;
 	if (pk != NULL)
-		vy_index_ref(pk);
-	index->mem_format = format;
-	tuple_format_ref(index->mem_format);
-	index->in_dump.pos = UINT32_MAX;
-	index->in_compact.pos = UINT32_MAX;
-	index->space_id = index_def->space_id;
-	index->index_id = index_def->iid;
-	index->opts = index_def->opts;
-	index->check_is_unique = index->opts.is_unique;
-	vy_index_read_set_new(&index->read_set);
-
-	index_env->index_count++;
-	vy_index_validate_formats(index);
-	return index;
+		vy_lsm_ref(pk);
+	lsm->mem_format = format;
+	tuple_format_ref(lsm->mem_format);
+	lsm->in_dump.pos = UINT32_MAX;
+	lsm->in_compact.pos = UINT32_MAX;
+	lsm->space_id = index_def->space_id;
+	lsm->index_id = index_def->iid;
+	lsm->opts = index_def->opts;
+	lsm->check_is_unique = lsm->opts.is_unique;
+	vy_lsm_read_set_new(&lsm->read_set);
+
+	lsm_env->lsm_count++;
+	vy_lsm_validate_formats(lsm);
+	return lsm;
 
 fail_mem:
-	histogram_delete(index->run_hist);
+	histogram_delete(lsm->run_hist);
 fail_run_hist:
-	vy_index_stat_destroy(&index->stat);
+	vy_lsm_stat_destroy(&lsm->stat);
 fail_stat:
-	tuple_format_unref(index->mem_format_with_colmask);
+	tuple_format_unref(lsm->mem_format_with_colmask);
 fail_mem_format_with_colmask:
-	tuple_format_unref(index->upsert_format);
+	tuple_format_unref(lsm->upsert_format);
 fail_upsert_format:
-	tuple_format_unref(index->disk_format);
+	tuple_format_unref(lsm->disk_format);
 fail_format:
 	key_def_delete(cmp_def);
 fail_cmp_def:
 	key_def_delete(key_def);
 fail_key_def:
-	free(index->tree);
+	free(lsm->tree);
 fail_tree:
-	free(index);
+	free(lsm);
 fail:
 	return NULL;
 }
@@ -280,67 +280,67 @@ vy_range_tree_free_cb(vy_range_tree_t *t, struct vy_range *range, void *arg)
 }
 
 void
-vy_index_delete(struct vy_index *index)
+vy_lsm_delete(struct vy_lsm *lsm)
 {
-	assert(index->refs == 0);
-	assert(index->in_dump.pos == UINT32_MAX);
-	assert(index->in_compact.pos == UINT32_MAX);
-	assert(vy_index_read_set_empty(&index->read_set));
-	assert(index->env->index_count > 0);
+	assert(lsm->refs == 0);
+	assert(lsm->in_dump.pos == UINT32_MAX);
+	assert(lsm->in_compact.pos == UINT32_MAX);
+	assert(vy_lsm_read_set_empty(&lsm->read_set));
+	assert(lsm->env->lsm_count > 0);
 
-	index->env->index_count--;
+	lsm->env->lsm_count--;
 
-	if (index->pk != NULL)
-		vy_index_unref(index->pk);
+	if (lsm->pk != NULL)
+		vy_lsm_unref(lsm->pk);
 
 	struct vy_mem *mem, *next_mem;
-	rlist_foreach_entry_safe(mem, &index->sealed, in_sealed, next_mem)
+	rlist_foreach_entry_safe(mem, &lsm->sealed, in_sealed, next_mem)
 		vy_mem_delete(mem);
-	vy_mem_delete(index->mem);
+	vy_mem_delete(lsm->mem);
 
 	struct vy_run *run, *next_run;
-	rlist_foreach_entry_safe(run, &index->runs, in_index, next_run)
-		vy_index_remove_run(index, run);
-
-	vy_range_tree_iter(index->tree, NULL, vy_range_tree_free_cb, NULL);
-	vy_range_heap_destroy(&index->range_heap);
-	tuple_format_unref(index->disk_format);
-	tuple_format_unref(index->mem_format_with_colmask);
-	tuple_format_unref(index->upsert_format);
-	key_def_delete(index->cmp_def);
-	key_def_delete(index->key_def);
-	histogram_delete(index->run_hist);
-	vy_index_stat_destroy(&index->stat);
-	vy_cache_destroy(&index->cache);
-	tuple_format_unref(index->mem_format);
-	free(index->tree);
-	TRASH(index);
-	free(index);
+	rlist_foreach_entry_safe(run, &lsm->runs, in_lsm, next_run)
+		vy_lsm_remove_run(lsm, run);
+
+	vy_range_tree_iter(lsm->tree, NULL, vy_range_tree_free_cb, NULL);
+	vy_range_heap_destroy(&lsm->range_heap);
+	tuple_format_unref(lsm->disk_format);
+	tuple_format_unref(lsm->mem_format_with_colmask);
+	tuple_format_unref(lsm->upsert_format);
+	key_def_delete(lsm->cmp_def);
+	key_def_delete(lsm->key_def);
+	histogram_delete(lsm->run_hist);
+	vy_lsm_stat_destroy(&lsm->stat);
+	vy_cache_destroy(&lsm->cache);
+	tuple_format_unref(lsm->mem_format);
+	free(lsm->tree);
+	TRASH(lsm);
+	free(lsm);
 }
 
-/** Initialize the range tree of a new index. */
+/** Initialize the range tree of a new LSM tree. */
 static int
-vy_index_init_range_tree(struct vy_index *index)
+vy_lsm_init_range_tree(struct vy_lsm *lsm)
 {
 	struct vy_range *range = vy_range_new(vy_log_next_id(), NULL, NULL,
-					      index->cmp_def);
+					      lsm->cmp_def);
 	if (range == NULL)
 		return -1;
 
-	assert(index->range_count == 0);
-	vy_index_add_range(index, range);
-	vy_index_acct_range(index, range);
+	assert(lsm->range_count == 0);
+	vy_lsm_add_range(lsm, range);
+	vy_lsm_acct_range(lsm, range);
 	return 0;
 }
 
 int
-vy_index_create(struct vy_index *index)
+vy_lsm_create(struct vy_lsm *lsm)
 {
-	/* Make index directory. */
+	/* Make LSM tree directory. */
 	int rc;
 	char path[PATH_MAX];
-	vy_index_snprint_path(path, sizeof(path), index->env->path,
-			      index->space_id, index->index_id);
+	vy_lsm_snprint_path(path, sizeof(path), lsm->env->path,
+			    lsm->space_id, lsm->index_id);
 	char *path_sep = path;
 	while (*path_sep == '/') {
 		/* Don't create root */
@@ -367,17 +367,16 @@ vy_index_create(struct vy_index *index)
 	}
 
 	/* Assign unique id. */
-	assert(index->id < 0);
-	index->id = vy_log_next_id();
+	assert(lsm->id < 0);
+	lsm->id = vy_log_next_id();
 
 	/* Allocate initial range. */
-	return vy_index_init_range_tree(index);
+	return vy_lsm_init_range_tree(lsm);
 }
 
 static struct vy_run *
-vy_index_recover_run(struct vy_index *index,
-		     struct vy_run_recovery_info *run_info,
-		     struct vy_run_env *run_env, bool force_recovery)
+vy_lsm_recover_run(struct vy_lsm *lsm, struct vy_run_recovery_info *run_info,
+		   struct vy_run_env *run_env, bool force_recovery)
 {
 	assert(!run_info->is_dropped);
 	assert(!run_info->is_incomplete);
@@ -392,18 +391,18 @@ vy_index_recover_run(struct vy_index *index,
 		return NULL;
 
 	run->dump_lsn = run_info->dump_lsn;
-	if (vy_run_recover(run, index->env->path,
-			   index->space_id, index->index_id) != 0 &&
+	if (vy_run_recover(run, lsm->env->path,
+			   lsm->space_id, lsm->index_id) != 0 &&
 	    (!force_recovery ||
-	     vy_run_rebuild_index(run, index->env->path,
-				  index->space_id, index->index_id,
-				  index->cmp_def, index->key_def,
-				  index->mem_format, index->upsert_format,
-				  &index->opts) != 0)) {
+	     vy_run_rebuild_index(run, lsm->env->path,
+				  lsm->space_id, lsm->index_id,
+				  lsm->cmp_def, lsm->key_def,
+				  lsm->mem_format, lsm->upsert_format,
+				  &lsm->opts) != 0)) {
 		vy_run_unref(run);
 		return NULL;
 	}
-	vy_index_add_run(index, run);
+	vy_lsm_add_run(lsm, run);
 
 	/*
 	 * The same run can be referenced by more than one slice
@@ -411,48 +410,48 @@ vy_index_recover_run(struct vy_index *index,
 	 * the same run multiple times.
 	 *
 	 * Runs are stored with their reference counters elevated.
-	 * We drop the extra references as soon as index recovery
-	 * is complete (see vy_index_recover()).
+	 * We drop the extra references as soon as LSM tree recovery
+	 * is complete (see vy_lsm_recover()).
 	 */
 	run_info->data = run;
 	return run;
 }
 
 static struct vy_slice *
-vy_index_recover_slice(struct vy_index *index, struct vy_range *range,
-		       struct vy_slice_recovery_info *slice_info,
-		       struct vy_run_env *run_env, bool force_recovery)
+vy_lsm_recover_slice(struct vy_lsm *lsm, struct vy_range *range,
+		     struct vy_slice_recovery_info *slice_info,
+		     struct vy_run_env *run_env, bool force_recovery)
 {
 	struct tuple *begin = NULL, *end = NULL;
 	struct vy_slice *slice = NULL;
 	struct vy_run *run;
 
 	if (slice_info->begin != NULL) {
-		begin = vy_key_from_msgpack(index->env->key_format,
+		begin = vy_key_from_msgpack(lsm->env->key_format,
 					    slice_info->begin);
 		if (begin == NULL)
 			goto out;
 	}
 	if (slice_info->end != NULL) {
-		end = vy_key_from_msgpack(index->env->key_format,
+		end = vy_key_from_msgpack(lsm->env->key_format,
 					  slice_info->end);
 		if (end == NULL)
 			goto out;
 	}
 	if (begin != NULL && end != NULL &&
-	    vy_key_compare(begin, end, index->cmp_def) >= 0) {
+	    vy_key_compare(begin, end, lsm->cmp_def) >= 0) {
 		diag_set(ClientError, ER_INVALID_VYLOG_FILE,
 			 tt_sprintf("begin >= end for slice %lld",
 				    (long long)slice_info->id));
 		goto out;
 	}
 
-	run = vy_index_recover_run(index, slice_info->run,
-				   run_env, force_recovery);
+	run = vy_lsm_recover_run(lsm, slice_info->run,
+				 run_env, force_recovery);
 	if (run == NULL)
 		goto out;
 
-	slice = vy_slice_new(slice_info->id, run, begin, end, index->cmp_def);
+	slice = vy_slice_new(slice_info->id, run, begin, end, lsm->cmp_def);
 	if (slice == NULL)
 		goto out;
 
@@ -466,34 +465,34 @@ out:
 }
 
 static struct vy_range *
-vy_index_recover_range(struct vy_index *index,
-		       struct vy_range_recovery_info *range_info,
-		       struct vy_run_env *run_env, bool force_recovery)
+vy_lsm_recover_range(struct vy_lsm *lsm,
+		     struct vy_range_recovery_info *range_info,
+		     struct vy_run_env *run_env, bool force_recovery)
 {
 	struct tuple *begin = NULL, *end = NULL;
 	struct vy_range *range = NULL;
 
 	if (range_info->begin != NULL) {
-		begin = vy_key_from_msgpack(index->env->key_format,
+		begin = vy_key_from_msgpack(lsm->env->key_format,
 					    range_info->begin);
 		if (begin == NULL)
 			goto out;
 	}
 	if (range_info->end != NULL) {
-		end = vy_key_from_msgpack(index->env->key_format,
+		end = vy_key_from_msgpack(lsm->env->key_format,
 					  range_info->end);
 		if (end == NULL)
 			goto out;
 	}
 	if (begin != NULL && end != NULL &&
-	    vy_key_compare(begin, end, index->cmp_def) >= 0) {
+	    vy_key_compare(begin, end, lsm->cmp_def) >= 0) {
 		diag_set(ClientError, ER_INVALID_VYLOG_FILE,
 			 tt_sprintf("begin >= end for range %lld",
 				    (long long)range_info->id));
 		goto out;
 	}
 
-	range = vy_range_new(range_info->id, begin, end, index->cmp_def);
+	range = vy_range_new(range_info->id, begin, end, lsm->cmp_def);
 	if (range == NULL)
 		goto out;
 
@@ -504,14 +503,14 @@ vy_index_recover_range(struct vy_index *index,
 	 */
 	struct vy_slice_recovery_info *slice_info;
 	rlist_foreach_entry_reverse(slice_info, &range_info->slices, in_range) {
-		if (vy_index_recover_slice(index, range, slice_info,
-					   run_env, force_recovery) == NULL) {
+		if (vy_lsm_recover_slice(lsm, range, slice_info,
+					 run_env, force_recovery) == NULL) {
 			vy_range_delete(range);
 			range = NULL;
 			goto out;
 		}
 	}
-	vy_index_add_range(index, range);
+	vy_lsm_add_range(lsm, range);
 out:
 	if (begin != NULL)
 		tuple_unref(begin);
@@ -521,13 +520,13 @@ out:
 }
 
 int
-vy_index_recover(struct vy_index *index, struct vy_recovery *recovery,
+vy_lsm_recover(struct vy_lsm *lsm, struct vy_recovery *recovery,
 		 struct vy_run_env *run_env, int64_t lsn,
 		 bool is_checkpoint_recovery, bool force_recovery)
 {
-	assert(index->id < 0);
-	assert(!index->is_committed);
-	assert(index->range_count == 0);
+	assert(lsm->id < 0);
+	assert(!lsm->is_committed);
+	assert(lsm->range_count == 0);
 
 	/*
 	 * Backward compatibility fixup: historically, we used
@@ -536,89 +535,90 @@ vy_index_recover(struct vy_index *index, struct vy_recovery *recovery,
 	 * index by 1. So for legacy indexes use the LSN from
 	 * index options.
 	 */
-	if (index->opts.lsn != 0)
-		lsn = index->opts.lsn;
+	if (lsm->opts.lsn != 0)
+		lsn = lsm->opts.lsn;
 
 	/*
-	 * Look up the last incarnation of the index in vylog.
+	 * Look up the last incarnation of the LSM tree in vylog.
 	 */
-	struct vy_index_recovery_info *index_info;
-	index_info = vy_recovery_index_by_id(recovery,
-			index->space_id, index->index_id);
+	struct vy_lsm_recovery_info *lsm_info;
+	lsm_info = vy_recovery_lsm_by_index_id(recovery,
+			lsm->space_id, lsm->index_id);
 	if (is_checkpoint_recovery) {
-		if (index_info == NULL) {
+		if (lsm_info == NULL) {
 			/*
-			 * All indexes created from snapshot rows must
+			 * All LSM trees created from snapshot rows must
 			 * be present in vylog, because snapshot can
 			 * only succeed if vylog has been successfully
 			 * flushed.
 			 */
 			diag_set(ClientError, ER_INVALID_VYLOG_FILE,
-				 tt_sprintf("Index %u/%u not found",
-					    (unsigned)index->space_id,
-					    (unsigned)index->index_id));
+				 tt_sprintf("LSM tree %u/%u not found",
+					    (unsigned)lsm->space_id,
+					    (unsigned)lsm->index_id));
 			return -1;
 		}
-		if (lsn > index_info->commit_lsn) {
+		if (lsn > lsm_info->commit_lsn) {
 			/*
-			 * The last incarnation of the index was created
-			 * before the last checkpoint, load it now.
+			 * The last incarnation of the LSM tree was
+			 * created before the last checkpoint, load
+			 * it now.
 			 */
-			lsn = index_info->commit_lsn;
+			lsn = lsm_info->commit_lsn;
 		}
 	}
 
-	if (index_info == NULL || lsn > index_info->commit_lsn) {
+	if (lsm_info == NULL || lsn > lsm_info->commit_lsn) {
 		/*
-		 * If we failed to log index creation before restart,
-		 * we won't find it in the log on recovery. This is
-		 * OK as the index doesn't have any runs in this case.
-		 * We will retry to log index in vy_index_commit_create().
+		 * If we failed to log LSM tree creation before restart,
+		 * we won't find it in the log on recovery. This is OK as
+		 * the LSM tree doesn't have any runs in this case. We will
+		 * retry to log LSM tree in vinyl_index_commit_create().
 		 * For now, just create the initial range and assign id.
 		 */
-		index->id = vy_log_next_id();
-		return vy_index_init_range_tree(index);
+		lsm->id = vy_log_next_id();
+		return vy_lsm_init_range_tree(lsm);
 	}
 
-	index->id = index_info->id;
-	index->is_committed = true;
+	lsm->id = lsm_info->id;
+	lsm->is_committed = true;
 
-	if (lsn < index_info->commit_lsn || index_info->is_dropped) {
+	if (lsn < lsm_info->commit_lsn || lsm_info->is_dropped) {
 		/*
-		 * Loading a past incarnation of the index, i.e.
-		 * the index is going to dropped during final
+		 * Loading a past incarnation of the LSM tree, i.e.
+		 * the LSM tree is going to dropped during final
 		 * recovery. Mark it as such.
 		 */
-		index->is_dropped = true;
+		lsm->is_dropped = true;
 		/*
-		 * We need range tree initialized for all indexes,
+		 * We need range tree initialized for all LSM trees,
 		 * even for dropped ones.
 		 */
-		return vy_index_init_range_tree(index);
+		return vy_lsm_init_range_tree(lsm);
 	}
 
 	/*
-	 * Loading the last incarnation of the index from vylog.
+	 * Loading the last incarnation of the LSM tree from vylog.
 	 */
-	index->dump_lsn = index_info->dump_lsn;
+	lsm->dump_lsn = lsm_info->dump_lsn;
 
 	int rc = 0;
 	struct vy_range_recovery_info *range_info;
-	rlist_foreach_entry(range_info, &index_info->ranges, in_index) {
-		if (vy_index_recover_range(index, range_info,
-					   run_env, force_recovery) == NULL) {
+	rlist_foreach_entry(range_info, &lsm_info->ranges, in_lsm) {
+		if (vy_lsm_recover_range(lsm, range_info, run_env,
+					 force_recovery) == NULL) {
 			rc = -1;
 			break;
 		}
 	}
 
 	/*
-	 * vy_index_recover_run() elevates reference counter
+	 * vy_lsm_recover_run() elevates reference counter
 	 * of each recovered run. We need to drop the extra
 	 * references once we are done.
 	 */
 	struct vy_run *run;
-	rlist_foreach_entry(run, &index->runs, in_index) {
+	rlist_foreach_entry(run, &lsm->runs, in_lsm) {
 		assert(run->refs > 1);
 		vy_run_unref(run);
 	}
@@ -627,12 +627,12 @@ vy_index_recover(struct vy_index *index, struct vy_recovery *recovery,
 		return -1;
 
 	/*
-	 * Account ranges to the index and check that the range tree
+	 * Account ranges to the LSM tree and check that the range tree
 	 * does not have holes or overlaps.
 	 */
 	struct vy_range *range, *prev = NULL;
-	for (range = vy_range_tree_first(index->tree); range != NULL;
-	     prev = range, range = vy_range_tree_next(index->tree, range)) {
+	for (range = vy_range_tree_first(lsm->tree); range != NULL;
+	     prev = range, range = vy_range_tree_next(lsm->tree, range)) {
 		if (prev == NULL && range->begin != NULL) {
 			diag_set(ClientError, ER_INVALID_VYLOG_FILE,
 				 tt_sprintf("Range %lld is leftmost but "
@@ -644,7 +644,7 @@ vy_index_recover(struct vy_index *index, struct vy_recovery *recovery,
 		if (prev != NULL &&
 		    (prev->end == NULL || range->begin == NULL ||
 		     (cmp = vy_key_compare(prev->end, range->begin,
-					   index->cmp_def)) != 0)) {
+					   lsm->cmp_def)) != 0)) {
 			const char *errmsg = cmp > 0 ?
 				"Nearby ranges %lld and %lld overlap" :
 				"Keys between ranges %lld and %lld not spanned";
@@ -654,12 +654,12 @@ vy_index_recover(struct vy_index *index, struct vy_recovery *recovery,
 					    (long long)range->id));
 			return -1;
 		}
-		vy_index_acct_range(index, range);
+		vy_lsm_acct_range(lsm, range);
 	}
 	if (prev == NULL) {
 		diag_set(ClientError, ER_INVALID_VYLOG_FILE,
-			 tt_sprintf("Index %lld has empty range tree",
-				    (long long)index->id));
+			 tt_sprintf("LSM tree %lld has empty range tree",
+				    (long long)lsm->id));
 		return -1;
 	}
 	if (prev->end != NULL) {
@@ -673,17 +673,17 @@ vy_index_recover(struct vy_index *index, struct vy_recovery *recovery,
 }
 
 int64_t
-vy_index_generation(struct vy_index *index)
+vy_lsm_generation(struct vy_lsm *lsm)
 {
-	struct vy_mem *oldest = rlist_empty(&index->sealed) ? index->mem :
-		rlist_last_entry(&index->sealed, struct vy_mem, in_sealed);
+	struct vy_mem *oldest = rlist_empty(&lsm->sealed) ? lsm->mem :
+		rlist_last_entry(&lsm->sealed, struct vy_mem, in_sealed);
 	return oldest->generation;
 }
 
 int
-vy_index_compact_priority(struct vy_index *index)
+vy_lsm_compact_priority(struct vy_lsm *lsm)
 {
-	struct heap_node *n = vy_range_heap_top(&index->range_heap);
+	struct heap_node *n = vy_range_heap_top(&lsm->range_heap);
 	if (n == NULL)
 		return 0;
 	struct vy_range *range = container_of(n, struct vy_range, heap_node);
@@ -691,98 +691,98 @@ vy_index_compact_priority(struct vy_index *index)
 }
 
 void
-vy_index_add_run(struct vy_index *index, struct vy_run *run)
+vy_lsm_add_run(struct vy_lsm *lsm, struct vy_run *run)
 {
-	assert(rlist_empty(&run->in_index));
-	rlist_add_entry(&index->runs, run, in_index);
-	index->run_count++;
-	vy_disk_stmt_counter_add(&index->stat.disk.count, &run->count);
+	assert(rlist_empty(&run->in_lsm));
+	rlist_add_entry(&lsm->runs, run, in_lsm);
+	lsm->run_count++;
+	vy_disk_stmt_counter_add(&lsm->stat.disk.count, &run->count);
 
-	index->bloom_size += vy_run_bloom_size(run);
-	index->page_index_size += run->page_index_size;
+	lsm->bloom_size += vy_run_bloom_size(run);
+	lsm->page_index_size += run->page_index_size;
 
-	index->env->bloom_size += vy_run_bloom_size(run);
-	index->env->page_index_size += run->page_index_size;
+	lsm->env->bloom_size += vy_run_bloom_size(run);
+	lsm->env->page_index_size += run->page_index_size;
 }
 
 void
-vy_index_remove_run(struct vy_index *index, struct vy_run *run)
+vy_lsm_remove_run(struct vy_lsm *lsm, struct vy_run *run)
 {
-	assert(index->run_count > 0);
-	assert(!rlist_empty(&run->in_index));
-	rlist_del_entry(run, in_index);
-	index->run_count--;
-	vy_disk_stmt_counter_sub(&index->stat.disk.count, &run->count);
+	assert(lsm->run_count > 0);
+	assert(!rlist_empty(&run->in_lsm));
+	rlist_del_entry(run, in_lsm);
+	lsm->run_count--;
+	vy_disk_stmt_counter_sub(&lsm->stat.disk.count, &run->count);
 
-	index->bloom_size -= vy_run_bloom_size(run);
-	index->page_index_size -= run->page_index_size;
+	lsm->bloom_size -= vy_run_bloom_size(run);
+	lsm->page_index_size -= run->page_index_size;
 
-	index->env->bloom_size -= vy_run_bloom_size(run);
-	index->env->page_index_size -= run->page_index_size;
+	lsm->env->bloom_size -= vy_run_bloom_size(run);
+	lsm->env->page_index_size -= run->page_index_size;
 }
 
 void
-vy_index_add_range(struct vy_index *index, struct vy_range *range)
+vy_lsm_add_range(struct vy_lsm *lsm, struct vy_range *range)
 {
 	assert(range->heap_node.pos == UINT32_MAX);
-	vy_range_heap_insert(&index->range_heap, &range->heap_node);
-	vy_range_tree_insert(index->tree, range);
-	index->range_count++;
+	vy_range_heap_insert(&lsm->range_heap, &range->heap_node);
+	vy_range_tree_insert(lsm->tree, range);
+	lsm->range_count++;
 }
 
 void
-vy_index_remove_range(struct vy_index *index, struct vy_range *range)
+vy_lsm_remove_range(struct vy_lsm *lsm, struct vy_range *range)
 {
 	assert(range->heap_node.pos != UINT32_MAX);
-	vy_range_heap_delete(&index->range_heap, &range->heap_node);
-	vy_range_tree_remove(index->tree, range);
-	index->range_count--;
+	vy_range_heap_delete(&lsm->range_heap, &range->heap_node);
+	vy_range_tree_remove(lsm->tree, range);
+	lsm->range_count--;
 }
 
 void
-vy_index_acct_range(struct vy_index *index, struct vy_range *range)
+vy_lsm_acct_range(struct vy_lsm *lsm, struct vy_range *range)
 {
-	histogram_collect(index->run_hist, range->slice_count);
+	histogram_collect(lsm->run_hist, range->slice_count);
 }
 
 void
-vy_index_unacct_range(struct vy_index *index, struct vy_range *range)
+vy_lsm_unacct_range(struct vy_lsm *lsm, struct vy_range *range)
 {
-	histogram_discard(index->run_hist, range->slice_count);
+	histogram_discard(lsm->run_hist, range->slice_count);
 }
 
 int
-vy_index_rotate_mem(struct vy_index *index)
+vy_lsm_rotate_mem(struct vy_lsm *lsm)
 {
 	struct vy_mem *mem;
 
-	assert(index->mem != NULL);
-	mem = vy_mem_new(index->mem->env, *index->env->p_generation,
-			 index->cmp_def, index->mem_format,
-			 index->mem_format_with_colmask,
-			 index->upsert_format, schema_version);
+	assert(lsm->mem != NULL);
+	mem = vy_mem_new(lsm->mem->env, *lsm->env->p_generation,
+			 lsm->cmp_def, lsm->mem_format,
+			 lsm->mem_format_with_colmask,
+			 lsm->upsert_format, schema_version);
 	if (mem == NULL)
 		return -1;
 
-	rlist_add_entry(&index->sealed, index->mem, in_sealed);
-	index->mem = mem;
-	index->mem_list_version++;
+	rlist_add_entry(&lsm->sealed, lsm->mem, in_sealed);
+	lsm->mem = mem;
+	lsm->mem_list_version++;
 	return 0;
 }
 
 void
-vy_index_delete_mem(struct vy_index *index, struct vy_mem *mem)
+vy_lsm_delete_mem(struct vy_lsm *lsm, struct vy_mem *mem)
 {
 	assert(!rlist_empty(&mem->in_sealed));
 	rlist_del_entry(mem, in_sealed);
-	vy_stmt_counter_sub(&index->stat.memory.count, &mem->count);
+	vy_stmt_counter_sub(&lsm->stat.memory.count, &mem->count);
 	vy_mem_delete(mem);
-	index->mem_list_version++;
+	lsm->mem_list_version++;
 }
 
 int
-vy_index_set(struct vy_index *index, struct vy_mem *mem,
-	     const struct tuple *stmt, const struct tuple **region_stmt)
+vy_lsm_set(struct vy_lsm *lsm, struct vy_mem *mem,
+	   const struct tuple *stmt, const struct tuple **region_stmt)
 {
 	assert(vy_stmt_is_refable(stmt));
 	assert(*region_stmt == NULL || !vy_stmt_is_refable(*region_stmt));
@@ -796,7 +796,7 @@ vy_index_set(struct vy_index *index, struct vy_mem *mem,
 	}
 
 	/* We can't free region_stmt below, so let's add it to the stats */
-	index->stat.memory.count.bytes += tuple_size(stmt);
+	lsm->stat.memory.count.bytes += tuple_size(stmt);
 
 	uint32_t format_id = stmt->format_id;
 	if (vy_stmt_type(*region_stmt) != IPROTO_UPSERT) {
@@ -820,15 +820,15 @@ vy_index_set(struct vy_index *index, struct vy_mem *mem,
 /**
  * Calculate and record the number of sequential upserts, squash
  * immediately or schedule upsert process if needed.
- * Additional handler used in vy_index_commit_stmt() for UPSERT
+ * Additional handler used in vy_lsm_commit_stmt() for UPSERT
  * statements.
  *
- * @param index Index the statement was committed to.
+ * @param lsm   LSM tree the statement was committed to.
  * @param mem   In-memory tree where the statement was saved.
  * @param stmt  UPSERT statement to squash.
  */
 static void
-vy_index_commit_upsert(struct vy_index *index, struct vy_mem *mem,
+vy_lsm_commit_upsert(struct vy_lsm *lsm, struct vy_mem *mem,
 		       const struct tuple *stmt)
 {
 	assert(vy_stmt_type(stmt) == IPROTO_UPSERT);
@@ -837,7 +837,7 @@ vy_index_commit_upsert(struct vy_index *index, struct vy_mem *mem,
 	 * UPSERT is enabled only for the spaces with the single
 	 * index.
 	 */
-	assert(index->index_id == 0);
+	assert(lsm->index_id == 0);
 
 	const struct tuple *older;
 	int64_t lsn = vy_stmt_lsn(stmt);
@@ -870,15 +870,15 @@ vy_index_commit_upsert(struct vy_index *index, struct vy_mem *mem,
 		assert(older != NULL && vy_stmt_type(older) == IPROTO_UPSERT &&
 		       vy_stmt_n_upserts(older) == VY_UPSERT_THRESHOLD - 1);
 #endif
-		if (index->env->upsert_thresh_cb == NULL) {
+		if (lsm->env->upsert_thresh_cb == NULL) {
 			/* Squash callback is not installed. */
 			return;
 		}
 
-		struct tuple *dup = vy_stmt_dup(stmt, index->upsert_format);
+		struct tuple *dup = vy_stmt_dup(stmt, lsm->upsert_format);
 		if (dup != NULL) {
-			index->env->upsert_thresh_cb(index, dup,
-					index->env->upsert_thresh_arg);
+			lsm->env->upsert_thresh_cb(lsm, dup,
+					lsm->env->upsert_thresh_arg);
 			tuple_unref(dup);
 		}
 		/*
@@ -893,15 +893,15 @@ vy_index_commit_upsert(struct vy_index *index, struct vy_mem *mem,
 	 * then we can turn the UPSERT into the REPLACE.
 	 */
 	if (n_upserts == 0 &&
-	    index->stat.memory.count.rows == index->mem->count.rows &&
-	    index->run_count == 0) {
+	    lsm->stat.memory.count.rows == lsm->mem->count.rows &&
+	    lsm->run_count == 0) {
 		older = vy_mem_older_lsn(mem, stmt);
 		assert(older == NULL || vy_stmt_type(older) != IPROTO_UPSERT);
 		struct tuple *upserted =
-			vy_apply_upsert(stmt, older, index->cmp_def,
-					index->mem_format,
-					index->upsert_format, false);
-		index->stat.upsert.applied++;
+			vy_apply_upsert(stmt, older, lsm->cmp_def,
+					lsm->mem_format,
+					lsm->upsert_format, false);
+		lsm->stat.upsert.applied++;
 
 		if (upserted == NULL) {
 			/* OOM */
@@ -934,53 +934,53 @@ vy_index_commit_upsert(struct vy_index *index, struct vy_mem *mem,
 			return;
 		}
 
-		int rc = vy_index_set(index, mem, upserted, &region_stmt);
+		int rc = vy_lsm_set(lsm, mem, upserted, &region_stmt);
 		/**
 		 * Since we have already allocated mem statement and
 		 * now we replacing one statement with another, the
-		 * vy_index_set() cannot fail.
+		 * vy_lsm_set() cannot fail.
 		 */
 		assert(rc == 0); (void)rc;
 		tuple_unref(upserted);
 		vy_mem_commit_stmt(mem, region_stmt);
-		index->stat.upsert.squashed++;
+		lsm->stat.upsert.squashed++;
 	}
 }
 
 void
-vy_index_commit_stmt(struct vy_index *index, struct vy_mem *mem,
+vy_lsm_commit_stmt(struct vy_lsm *lsm, struct vy_mem *mem,
 		     const struct tuple *stmt)
 {
 	vy_mem_commit_stmt(mem, stmt);
 
-	index->stat.memory.count.rows++;
+	lsm->stat.memory.count.rows++;
 
 	if (vy_stmt_type(stmt) == IPROTO_UPSERT)
-		vy_index_commit_upsert(index, mem, stmt);
+		vy_lsm_commit_upsert(lsm, mem, stmt);
 
-	vy_stmt_counter_acct_tuple(&index->stat.put, stmt);
+	vy_stmt_counter_acct_tuple(&lsm->stat.put, stmt);
 
 	/* Invalidate cache element. */
-	vy_cache_on_write(&index->cache, stmt, NULL);
+	vy_cache_on_write(&lsm->cache, stmt, NULL);
 }
 
 void
-vy_index_rollback_stmt(struct vy_index *index, struct vy_mem *mem,
+vy_lsm_rollback_stmt(struct vy_lsm *lsm, struct vy_mem *mem,
 		       const struct tuple *stmt)
 {
 	vy_mem_rollback_stmt(mem, stmt);
 
 	/* Invalidate cache element. */
-	vy_cache_on_write(&index->cache, stmt, NULL);
+	vy_cache_on_write(&lsm->cache, stmt, NULL);
 }
 
 bool
-vy_index_split_range(struct vy_index *index, struct vy_range *range)
+vy_lsm_split_range(struct vy_lsm *lsm, struct vy_range *range)
 {
-	struct tuple_format *key_format = index->env->key_format;
+	struct tuple_format *key_format = lsm->env->key_format;
 
 	const char *split_key_raw;
-	if (!vy_range_needs_split(range, &index->opts, &split_key_raw))
+	if (!vy_range_needs_split(range, &lsm->opts, &split_key_raw))
 		return false;
 
 	/* Split a range in two parts. */
@@ -1007,7 +1007,7 @@ vy_index_split_range(struct vy_index *index, struct vy_range *range)
 	struct vy_range *part, *parts[2] = {NULL, };
 	for (int i = 0; i < n_parts; i++) {
 		part = vy_range_new(vy_log_next_id(), keys[i], keys[i + 1],
-				    index->cmp_def);
+				    lsm->cmp_def);
 		if (part == NULL)
 			goto fail;
 		parts[i] = part;
@@ -1018,7 +1018,7 @@ vy_index_split_range(struct vy_index *index, struct vy_range *range)
 		 */
 		rlist_foreach_entry_reverse(slice, &range->slices, in_range) {
 			if (vy_slice_cut(slice, vy_log_next_id(), part->begin,
-					 part->end, index->cmp_def,
+					 part->end, lsm->cmp_def,
 					 &new_slice) != 0)
 				goto fail;
 			if (new_slice != NULL)
@@ -1036,7 +1036,7 @@ vy_index_split_range(struct vy_index *index, struct vy_range *range)
 	vy_log_delete_range(range->id);
 	for (int i = 0; i < n_parts; i++) {
 		part = parts[i];
-		vy_log_insert_range(index->id, part->id,
+		vy_log_insert_range(lsm->id, part->id,
 				    tuple_data_or_null(part->begin),
 				    tuple_data_or_null(part->end));
 		rlist_foreach_entry(slice, &part->slices, in_range)
@@ -1048,19 +1048,19 @@ vy_index_split_range(struct vy_index *index, struct vy_range *range)
 		goto fail;
 
 	/*
-	 * Replace the old range in the index.
+	 * Replace the old range in the LSM tree.
 	 */
-	vy_index_unacct_range(index, range);
-	vy_index_remove_range(index, range);
+	vy_lsm_unacct_range(lsm, range);
+	vy_lsm_remove_range(lsm, range);
 
 	for (int i = 0; i < n_parts; i++) {
 		part = parts[i];
-		vy_index_add_range(index, part);
-		vy_index_acct_range(index, part);
+		vy_lsm_add_range(lsm, part);
+		vy_lsm_acct_range(lsm, part);
 	}
-	index->range_tree_version++;
+	lsm->range_tree_version++;
 
-	say_info("%s: split range %s by key %s", vy_index_name(index),
+	say_info("%s: split range %s by key %s", vy_lsm_name(lsm),
 		 vy_range_str(range), tuple_str(split_key));
 
 	rlist_foreach_entry(slice, &range->slices, in_range)
@@ -1078,34 +1078,34 @@ fail:
 
 	diag_log();
 	say_error("%s: failed to split range %s",
-		  vy_index_name(index), vy_range_str(range));
+		  vy_lsm_name(lsm), vy_range_str(range));
 	return false;
 }
 
 bool
-vy_index_coalesce_range(struct vy_index *index, struct vy_range *range)
+vy_lsm_coalesce_range(struct vy_lsm *lsm, struct vy_range *range)
 {
 	struct vy_range *first, *last;
-	if (!vy_range_needs_coalesce(range, index->tree, &index->opts,
+	if (!vy_range_needs_coalesce(range, lsm->tree, &lsm->opts,
 				     &first, &last))
 		return false;
 
 	struct vy_range *result = vy_range_new(vy_log_next_id(),
-			first->begin, last->end, index->cmp_def);
+			first->begin, last->end, lsm->cmp_def);
 	if (result == NULL)
 		goto fail_range;
 
 	struct vy_range *it;
-	struct vy_range *end = vy_range_tree_next(index->tree, last);
+	struct vy_range *end = vy_range_tree_next(lsm->tree, last);
 
 	/*
 	 * Log change in metadata.
 	 */
 	vy_log_tx_begin();
-	vy_log_insert_range(index->id, result->id,
+	vy_log_insert_range(lsm->id, result->id,
 			    tuple_data_or_null(result->begin),
 			    tuple_data_or_null(result->end));
-	for (it = first; it != end; it = vy_range_tree_next(index->tree, it)) {
+	for (it = first; it != end; it = vy_range_tree_next(lsm->tree, it)) {
 		struct vy_slice *slice;
 		rlist_foreach_entry(slice, &it->slices, in_range)
 			vy_log_delete_slice(slice->id);
@@ -1125,9 +1125,9 @@ vy_index_coalesce_range(struct vy_index *index, struct vy_range *range)
 	 */
 	it = first;
 	while (it != end) {
-		struct vy_range *next = vy_range_tree_next(index->tree, it);
-		vy_index_unacct_range(index, it);
-		vy_index_remove_range(index, it);
+		struct vy_range *next = vy_range_tree_next(lsm->tree, it);
+		vy_lsm_unacct_range(lsm, it);
+		vy_lsm_remove_range(lsm, it);
 		rlist_splice(&result->slices, &it->slices);
 		result->slice_count += it->slice_count;
 		vy_disk_stmt_counter_add(&result->count, &it->count);
@@ -1141,12 +1141,12 @@ vy_index_coalesce_range(struct vy_index *index, struct vy_range *range)
 	 * as soon as we can.
 	 */
 	result->compact_priority = result->slice_count;
-	vy_index_acct_range(index, result);
-	vy_index_add_range(index, result);
-	index->range_tree_version++;
+	vy_lsm_acct_range(lsm, result);
+	vy_lsm_add_range(lsm, result);
+	lsm->range_tree_version++;
 
 	say_info("%s: coalesced ranges %s",
-		 vy_index_name(index), vy_range_str(result));
+		 vy_lsm_name(lsm), vy_range_str(result));
 	return true;
 
 fail_commit:
@@ -1154,6 +1154,6 @@ fail_commit:
 fail_range:
 	diag_log();
 	say_error("%s: failed to coalesce range %s",
-		  vy_index_name(index), vy_range_str(range));
+		  vy_lsm_name(lsm), vy_range_str(range));
 	return false;
 }
diff --git a/src/box/vy_index.h b/src/box/vy_lsm.h
similarity index 59%
rename from src/box/vy_index.h
rename to src/box/vy_lsm.h
index 33d1da4a..dcfbcb7b 100644
--- a/src/box/vy_index.h
+++ b/src/box/vy_lsm.h
@@ -1,5 +1,5 @@
-#ifndef INCLUDES_TARANTOOL_BOX_VY_INDEX_H
-#define INCLUDES_TARANTOOL_BOX_VY_INDEX_H
+#ifndef INCLUDES_TARANTOOL_BOX_VY_LSM_H
+#define INCLUDES_TARANTOOL_BOX_VY_LSM_H
 /*
  * Copyright 2010-2017, Tarantool AUTHORS, please see AUTHORS file.
  *
@@ -52,7 +52,7 @@ extern "C" {
 struct histogram;
 struct tuple;
 struct tuple_format;
-struct vy_index;
+struct vy_lsm;
 struct vy_mem;
 struct vy_mem_env;
 struct vy_recovery;
@@ -60,10 +60,10 @@ struct vy_run;
 struct vy_run_env;
 
 typedef void
-(*vy_upsert_thresh_cb)(struct vy_index *index, struct tuple *stmt, void *arg);
+(*vy_upsert_thresh_cb)(struct vy_lsm *lsm, struct tuple *stmt, void *arg);
 
-/** Common index environment. */
-struct vy_index_env {
+/** Common LSM tree environment. */
+struct vy_lsm_env {
 	/** Path to the data directory. */
 	const char *path;
 	/** Memory generation counter. */
@@ -84,27 +84,29 @@ struct vy_index_env {
 	vy_upsert_thresh_cb upsert_thresh_cb;
 	/** Argument passed to upsert_thresh_cb. */
 	void *upsert_thresh_arg;
-	/** Number of indexes in this environment. */
-	int index_count;
+	/** Number of LSM trees in this environment. */
+	int lsm_count;
 	/** Size of memory used for bloom filters. */
 	size_t bloom_size;
 	/** Size of memory used for page index. */
 	size_t page_index_size;
 };
 
-/** Create a common index environment. */
+/** Create a common LSM tree environment. */
 int
-vy_index_env_create(struct vy_index_env *env, const char *path,
-		    int64_t *p_generation,
-		    vy_upsert_thresh_cb upsert_thresh_cb,
-		    void *upsert_thresh_arg);
+vy_lsm_env_create(struct vy_lsm_env *env, const char *path,
+		  int64_t *p_generation,
+		  vy_upsert_thresh_cb upsert_thresh_cb,
+		  void *upsert_thresh_arg);
 
-/** Destroy a common index environment. */
+/** Destroy a common LSM tree environment. */
 void
-vy_index_env_destroy(struct vy_index_env *env);
+vy_lsm_env_destroy(struct vy_lsm_env *env);
 
 /**
  * A struct for primary and secondary Vinyl indexes.
+ * Named after the data structure used for organizing
+ * data on disk - log-structured merge-tree (LSM tree).
  *
  * Vinyl primary and secondary indexes work differently:
  *
@@ -125,7 +127,7 @@ vy_index_env_destroy(struct vy_index_env *env);
  * When a search in a secondary index is made, we first look up
  * the secondary index tuple, containing the primary key, and then
  * use this key to find the original tuple in the primary index.
-
+ *
  * While the primary index has only one key_def that is
  * used for validating and comparing tuples, secondary index needs
  * two:
@@ -140,19 +142,19 @@ vy_index_env_destroy(struct vy_index_env *env);
  *   parts concatenated together construe the tuple of the
  *   secondary key, i.e. the tuple stored. This is key_def.
  */
-struct vy_index {
-	/** Common index environment. */
-	struct vy_index_env *env;
+struct vy_lsm {
+	/** Common LSM tree environment. */
+	struct vy_lsm_env *env;
 	/**
-	 * Reference counter. Used to postpone index deletion
+	 * Reference counter. Used to postpone LSM tree deletion
 	 * until all pending operations have completed.
 	 */
 	int refs;
-	/** Unique ID of this index. */
+	/** Unique ID of this LSM tree. */
 	int64_t id;
-	/** Ordinal index number in the index array. */
+	/** ID of the index this LSM tree is for. */
 	uint32_t index_id;
-	/** ID of the space this index belongs to. */
+	/** ID of the space this LSM tree is for. */
 	uint32_t space_id;
 	/** Index options. */
 	struct index_opts opts;
@@ -161,14 +163,15 @@ struct vy_index {
 	/** Key definition passed by the user. */
 	struct key_def *key_def;
 	/**
-	 * If the following flag is set the index is unique and
-	 * it must be checked for duplicates on INSERT. Otherwise,
-	 * the check can be skipped, either because this index
-	 * is not unique or it is a part of another unique index.
+	 * If the following flag is set, the index this LSM tree
+	 * is created for is unique and it must be checked for
+	 * duplicates on INSERT. Otherwise, the check can be skipped,
+	 * either because the index is not unique or it is a part
+	 * of another unique index.
 	 */
 	bool check_is_unique;
 	/**
-	 * Tuple format for tuples of this index created when
+	 * Tuple format for tuples of this LSM tree created when
 	 * reading pages from disk.
 	 * Is distinct from mem_format only for secondary keys,
 	 * whose tuples have MP_NIL in all "gap" positions between
@@ -180,7 +183,7 @@ struct vy_index {
 	 * tuples.
 	 */
 	struct tuple_format *disk_format;
-	/** Tuple format of the space this index belongs to. */
+	/** Tuple format of the space this LSM tree belongs to. */
 	struct tuple_format *mem_format;
 	/**
 	 * Format for tuples of type REPLACE or DELETE which
@@ -196,14 +199,16 @@ struct vy_index {
 	 */
 	struct tuple_format *upsert_format;
 	/**
-	 * Primary index of the same space or NULL if this index
-	 * is primary. Referenced by each secondary index.
+	 * If this LSM tree is for a secondary index, the following
+	 * variable points to the LSM tree of the primary index of
+	 * the same space, otherwise it is set to NULL. Referenced
+	 * by each secondary index.
 	 */
-	struct vy_index *pk;
-	/** Index statistics. */
-	struct vy_index_stat stat;
+	struct vy_lsm *pk;
+	/** LSM tree statistics. */
+	struct vy_lsm_stat stat;
 	/**
-	 * Merge cache of this index. Contains hottest tuples
+	 * Merge cache of this LSM tree. Contains hottest tuples
 	 * with continuation markers.
 	 */
 	struct vy_cache cache;
@@ -216,23 +221,23 @@ struct vy_index {
 	 */
 	struct rlist sealed;
 	/**
-	 * Tree of all ranges of this index, linked by
+	 * Tree of all ranges of this LSM tree, linked by
 	 * vy_range->tree_node, ordered by vy_range->begin.
 	 */
 	vy_range_tree_t *tree;
-	/** Number of ranges in this index. */
+	/** Number of ranges in this LSM tree. */
 	int range_count;
 	/** Heap of ranges, prioritized by compact_priority. */
 	heap_t range_heap;
 	/**
-	 * List of all runs created for this index,
-	 * linked by vy_run->in_index.
+	 * List of all runs created for this LSM tree,
+	 * linked by vy_run->in_lsm.
 	 */
 	struct rlist runs;
 	/** Number of entries in all ranges. */
 	int run_count;
 	/**
-	 * Histogram accounting how many ranges of the index
+	 * Histogram accounting how many ranges of the LSM tree
 	 * have a particular number of runs.
 	 */
 	struct histogram *run_hist;
@@ -251,182 +256,182 @@ struct vy_index {
 	 */
 	uint32_t range_tree_version;
 	/**
-	 * LSN of the last dump or -1 if the index has not
+	 * LSN of the last dump or -1 if the LSM tree has not
 	 * been dumped yet.
 	 */
 	int64_t dump_lsn;
 	/*
-	 * This flag is set if the index creation was
+	 * This flag is set if the LSM tree creation was
 	 * committed to the metadata log.
 	 */
 	bool is_committed;
 	/**
-	 * This flag is set if the index was dropped.
-	 * It is also set on local recovery if the index
+	 * This flag is set if the LSM tree was dropped.
+	 * It is also set on local recovery if the LSM tree
 	 * will be dropped when WAL is replayed.
 	 */
 	bool is_dropped;
 	/**
-	 * If pin_count > 0 the index can't be scheduled for dump.
+	 * If pin_count > 0 the LSM tree can't be scheduled for dump.
 	 * Used to make sure that the primary index is dumped last.
 	 */
 	int pin_count;
-	/** Set if the index is currently being dumped. */
+	/** Set if the LSM tree is currently being dumped. */
 	bool is_dumping;
 	/** Link in vy_scheduler->dump_heap. */
 	struct heap_node in_dump;
 	/** Link in vy_scheduler->compact_heap. */
 	struct heap_node in_compact;
 	/**
-	 * Interval tree containing reads from this index done by all
-	 * active transactions. Linked by vy_tx_interval->in_index.
+	 * Interval tree containing reads from this LSM tree done by
+	 * all active transactions. Linked by vy_tx_interval->in_lsm.
 	 * Used to abort transactions that conflict with a write to
-	 * the index.
+	 * this LSM tree.
 	 */
-	vy_index_read_set_t read_set;
+	vy_lsm_read_set_t read_set;
 };
 
 /**
- * Assert if an index formats are inconsistent.
- * @param index Index to validate.
+ * Assert if an LSM tree formats are inconsistent.
+ * @param lsm LSM tree to validate.
  */
 void
-vy_index_validate_formats(const struct vy_index *index);
+vy_lsm_validate_formats(const struct vy_lsm *lsm);
 
-/** Return index name. Used for logging. */
+/** Return LSM tree name. Used for logging. */
 const char *
-vy_index_name(struct vy_index *index);
+vy_lsm_name(struct vy_lsm *lsm);
 
 /** Return sum size of memory tree extents. */
 size_t
-vy_index_mem_tree_size(struct vy_index *index);
+vy_lsm_mem_tree_size(struct vy_lsm *lsm);
 
-/** Allocate a new index object. */
-struct vy_index *
-vy_index_new(struct vy_index_env *index_env, struct vy_cache_env *cache_env,
+/** Allocate a new LSM tree object. */
+struct vy_lsm *
+vy_lsm_new(struct vy_lsm_env *lsm_env, struct vy_cache_env *cache_env,
 	     struct vy_mem_env *mem_env, struct index_def *index_def,
-	     struct tuple_format *format, struct vy_index *pk);
+	     struct tuple_format *format, struct vy_lsm *pk);
 
-/** Free an index object. */
+/** Free an LSM tree object. */
 void
-vy_index_delete(struct vy_index *index);
+vy_lsm_delete(struct vy_lsm *lsm);
 
 /**
- * Increment the reference counter of a vinyl index.
- * An index cannot be deleted if its reference counter
- * is elevated.
+ * Increment the reference counter of an LSM tree.
+ * An LSM tree cannot be deleted if its reference
+ * counter is elevated.
  */
 static inline void
-vy_index_ref(struct vy_index *index)
+vy_lsm_ref(struct vy_lsm *lsm)
 {
-	assert(index->refs >= 0);
-	index->refs++;
+	assert(lsm->refs >= 0);
+	lsm->refs++;
 }
 
 /**
- * Decrement the reference counter of a vinyl index.
- * If the reference counter reaches 0, the index is
- * deleted with vy_index_delete().
+ * Decrement the reference counter of an LSM tree.
+ * If the reference counter reaches 0, the LSM tree
+ * is deleted with vy_lsm_delete().
  */
 static inline void
-vy_index_unref(struct vy_index *index)
+vy_lsm_unref(struct vy_lsm *lsm)
 {
-	assert(index->refs > 0);
-	if (--index->refs == 0)
-		vy_index_delete(index);
+	assert(lsm->refs > 0);
+	if (--lsm->refs == 0)
+		vy_lsm_delete(lsm);
 }
 
 /**
- * Create a new vinyl index.
+ * Create a new LSM tree.
  *
- * This function is called when an index is created after recovery
- * is complete or during remote recovery. It initializes the range
- * tree and makes the index directory.
+ * This function is called when an LSM tree is created
+ * after recovery is complete or during remote recovery.
+ * It initializes the range tree and makes the LSM tree
+ * directory.
  */
 int
-vy_index_create(struct vy_index *index);
+vy_lsm_create(struct vy_lsm *lsm);
 
 /**
- * Load a vinyl index from disk. Called on local recovery.
+ * Load an LSM tree from disk. Called on local recovery.
  *
- * This function retrieves the index structure from the
- * metadata log, rebuilds the range tree, and opens run
- * files.
+ * This function retrieves the LSM tree structure from the
+ * metadata log, rebuilds the range tree, and opens run files.
  *
- * If @is_checkpoint_recovery is set, the index is recovered from
- * the last snapshot. In particular, this means that the index
- * must have been logged in the metadata log and so if the
+ * If @is_checkpoint_recovery is set, the LSM tree is recovered
+ * from the last snapshot. In particular, this means that the LSM
+ * tree must have been logged in the metadata log and so if the
  * function does not find it in the recovery context, it will
- * fail. If the flag is unset, the index is recovered from a
- * WAL, in which case a missing index is OK - it just means we
+ * fail. If the flag is unset, the LSM tree is recovered from a
+ * WAL, in which case a missing LSM tree is OK - it just means we
  * failed to log it before restart and have to retry during
  * WAL replay.
  *
- * @lsn is the LSN of the row that created the index.
- * If the index is recovered from a snapshot, it is set
+ * @lsn is the LSN of the WAL row that created the LSM tree.
+ * If the LSM tree is recovered from a snapshot, it is set
  * to the snapshot signature.
  */
 int
-vy_index_recover(struct vy_index *index, struct vy_recovery *recovery,
+vy_lsm_recover(struct vy_lsm *lsm, struct vy_recovery *recovery,
 		 struct vy_run_env *run_env, int64_t lsn,
 		 bool is_checkpoint_recovery, bool force_recovery);
 
 /**
- * Return generation of in-memory data stored in an index
+ * Return generation of in-memory data stored in an LSM tree
  * (min over vy_mem->generation).
  */
 int64_t
-vy_index_generation(struct vy_index *index);
+vy_lsm_generation(struct vy_lsm *lsm);
 
-/** Return max compact_priority among ranges of an index. */
+/** Return max compact_priority among ranges of an LSM tree. */
 int
-vy_index_compact_priority(struct vy_index *index);
+vy_lsm_compact_priority(struct vy_lsm *lsm);
 
-/** Add a run to the list of runs of an index. */
+/** Add a run to the list of runs of an LSM tree. */
 void
-vy_index_add_run(struct vy_index *index, struct vy_run *run);
+vy_lsm_add_run(struct vy_lsm *lsm, struct vy_run *run);
 
-/** Remove a run from the list of runs of an index. */
+/** Remove a run from the list of runs of an LSM tree. */
 void
-vy_index_remove_run(struct vy_index *index, struct vy_run *run);
+vy_lsm_remove_run(struct vy_lsm *lsm, struct vy_run *run);
 
 /**
  * Add a range to both the range tree and the range heap
- * of an index.
+ * of an LSM tree.
  */
 void
-vy_index_add_range(struct vy_index *index, struct vy_range *range);
+vy_lsm_add_range(struct vy_lsm *lsm, struct vy_range *range);
 
 /**
  * Remove a range from both the range tree and the range
- * heap of an index.
+ * heap of an LSM tree.
  */
 void
-vy_index_remove_range(struct vy_index *index, struct vy_range *range);
+vy_lsm_remove_range(struct vy_lsm *lsm, struct vy_range *range);
 
-/** Account a range to the run histogram of an index. */
+/** Account a range to the run histogram of an LSM tree. */
 void
-vy_index_acct_range(struct vy_index *index, struct vy_range *range);
+vy_lsm_acct_range(struct vy_lsm *lsm, struct vy_range *range);
 
-/** Unaccount a range from the run histogram of an index. */
+/** Unaccount a range from the run histogram of an LSM tree. */
 void
-vy_index_unacct_range(struct vy_index *index, struct vy_range *range);
+vy_lsm_unacct_range(struct vy_lsm *lsm, struct vy_range *range);
 
 /**
- * Allocate a new active in-memory index for an index while moving
- * the old one to the sealed list. Used by the dump task in order
- * not to bother about synchronization with concurrent insertions
- * while an index is being dumped.
+ * Allocate a new active in-memory index for an LSM tree while
+ * moving the old one to the sealed list. Used by the dump task
+ * in order not to bother about synchronization with concurrent
+ * insertions while an LSM tree is being dumped.
  */
 int
-vy_index_rotate_mem(struct vy_index *index);
+vy_lsm_rotate_mem(struct vy_lsm *lsm);
 
 /**
- * Remove an in-memory tree from the sealed list of a vinyl index,
+ * Remove an in-memory tree from the sealed list of an LSM tree,
  * unaccount and delete it.
  */
 void
-vy_index_delete_mem(struct vy_index *index, struct vy_mem *mem);
+vy_lsm_delete_mem(struct vy_lsm *lsm, struct vy_mem *mem);
 
 /**
  * Split a range if it has grown too big, return true if the range
@@ -436,7 +441,7 @@ vy_index_delete_mem(struct vy_index *index, struct vy_mem *mem);
  * operations, like writing a run file, and is done immediately.
  */
 bool
-vy_index_split_range(struct vy_index *index, struct vy_range *range);
+vy_lsm_split_range(struct vy_lsm *lsm, struct vy_range *range);
 
 /**
  * Coalesce a range with one or more its neighbors if it is too small,
@@ -449,16 +454,16 @@ vy_index_split_range(struct vy_index *index, struct vy_range *range);
  * case of merging key ranges.
  */
 bool
-vy_index_coalesce_range(struct vy_index *index, struct vy_range *range);
+vy_lsm_coalesce_range(struct vy_lsm *lsm, struct vy_range *range);
 
 /**
- * Insert a statement into the index's in-memory tree. If the
- * region_stmt is NULL and the statement is successfully inserted
+ * Insert a statement into the in-memory index of an LSM tree. If
+ * the region_stmt is NULL and the statement is successfully inserted
  * then the new lsregion statement is returned via @a region_stmt.
- * Either vy_index_commit_stmt() or vy_index_rollback_stmt() must
+ * Either vy_lsm_commit_stmt() or vy_lsm_rollback_stmt() must
  * be called on success.
  *
- * @param index       Index the statement is for.
+ * @param lsm         LSM tree the statement is for.
  * @param mem         In-memory tree to insert the statement into.
  * @param stmt        Statement, allocated on malloc().
  * @param region_stmt NULL or the same statement, allocated on
@@ -468,33 +473,34 @@ vy_index_coalesce_range(struct vy_index *index, struct vy_range *range);
  * @retval -1 Memory error.
  */
 int
-vy_index_set(struct vy_index *index, struct vy_mem *mem,
-	     const struct tuple *stmt, const struct tuple **region_stmt);
+vy_lsm_set(struct vy_lsm *lsm, struct vy_mem *mem,
+	   const struct tuple *stmt, const struct tuple **region_stmt);
 
 /**
- * Confirm that the statement stays in the index's in-memory tree.
+ * Confirm that the statement stays in the in-memory index of
+ * an LSM tree.
  *
- * @param index Index the statement is for.
+ * @param lsm   LSM tree the statement is for.
  * @param mem   In-memory tree where the statement was saved.
  * @param stmt  Statement allocated from lsregion.
  */
 void
-vy_index_commit_stmt(struct vy_index *index, struct vy_mem *mem,
-		     const struct tuple *stmt);
+vy_lsm_commit_stmt(struct vy_lsm *lsm, struct vy_mem *mem,
+		   const struct tuple *stmt);
 
 /**
- * Erase a statement from the index's in-memory tree.
+ * Erase a statement from the in-memory index of an LSM tree.
  *
- * @param index Index to erase from.
+ * @param lsm   LSM tree to erase from.
  * @param mem   In-memory tree where the statement was saved.
  * @param stmt  Statement allocated from lsregion.
  */
 void
-vy_index_rollback_stmt(struct vy_index *index, struct vy_mem *mem,
-		       const struct tuple *stmt);
+vy_lsm_rollback_stmt(struct vy_lsm *lsm, struct vy_mem *mem,
+		     const struct tuple *stmt);
 
 #if defined(__cplusplus)
 } /* extern "C" */
 #endif /* defined(__cplusplus) */
 
-#endif /* INCLUDES_TARANTOOL_BOX_VY_INDEX_H */
+#endif /* INCLUDES_TARANTOOL_BOX_VY_LSM_H */
diff --git a/src/box/vy_mem.c b/src/box/vy_mem.c
index 629634bb..1105340c 100644
--- a/src/box/vy_mem.c
+++ b/src/box/vy_mem.c
@@ -212,7 +212,7 @@ vy_mem_insert_upsert(struct vy_mem *mem, const struct tuple *stmt)
 	 * UPSERT, n = threshold + 1,
 	 * UPSERT, n = threshold + 1, all following ones have
 	 *         ...                threshold + 1.
-	 * These values are used by vy_index_commit to squash
+	 * These values are used by vy_lsm_commit_upsert to squash
 	 * UPSERTs subsequence.
 	 */
 	vy_mem_tree_iterator_next(&mem->tree, &inserted);
diff --git a/src/box/vy_point_lookup.c b/src/box/vy_point_lookup.c
index d92cb94f..c5aa9719 100644
--- a/src/box/vy_point_lookup.c
+++ b/src/box/vy_point_lookup.c
@@ -39,7 +39,7 @@
 
 #include "fiber.h"
 
-#include "vy_index.h"
+#include "vy_lsm.h"
 #include "vy_stmt.h"
 #include "vy_tx.h"
 #include "vy_mem.h"
@@ -123,18 +123,18 @@ vy_stmt_history_is_terminal(struct rlist *history)
  * Add one or no statement to the history list.
  */
 static int
-vy_point_lookup_scan_txw(struct vy_index *index, struct vy_tx *tx,
+vy_point_lookup_scan_txw(struct vy_lsm *lsm, struct vy_tx *tx,
 			 struct tuple *key, struct rlist *history)
 {
 	if (tx == NULL)
 		return 0;
-	index->stat.txw.iterator.lookup++;
+	lsm->stat.txw.iterator.lookup++;
 	struct txv *txv =
-		write_set_search_key(&tx->write_set, index, key);
-	assert(txv == NULL || txv->index == index);
+		write_set_search_key(&tx->write_set, lsm, key);
+	assert(txv == NULL || txv->lsm == lsm);
 	if (txv == NULL)
 		return 0;
-	vy_stmt_counter_acct_tuple(&index->stat.txw.iterator.get,
+	vy_stmt_counter_acct_tuple(&lsm->stat.txw.iterator.get,
 				   txv->stmt);
 	struct vy_stmt_history_node *node = vy_stmt_history_node_new();
 	if (node == NULL)
@@ -146,21 +146,21 @@ vy_point_lookup_scan_txw(struct vy_index *index, struct vy_tx *tx,
 }
 
 /**
- * Scan index cache for given key.
+ * Scan LSM tree cache for given key.
  * Add one or no statement to the history list.
  */
 static int
-vy_point_lookup_scan_cache(struct vy_index *index,
+vy_point_lookup_scan_cache(struct vy_lsm *lsm,
 			   const struct vy_read_view **rv,
 			   struct tuple *key, struct rlist *history)
 {
-	index->cache.stat.lookup++;
-	struct tuple *stmt = vy_cache_get(&index->cache, key);
+	lsm->cache.stat.lookup++;
+	struct tuple *stmt = vy_cache_get(&lsm->cache, key);
 
 	if (stmt == NULL || vy_stmt_lsn(stmt) > (*rv)->vlsn)
 		return 0;
 
-	vy_stmt_counter_acct_tuple(&index->cache.stat.get, stmt);
+	vy_stmt_counter_acct_tuple(&lsm->cache.stat.get, stmt);
 	struct vy_stmt_history_node *node = vy_stmt_history_node_new();
 	if (node == NULL)
 		return -1;
@@ -176,7 +176,7 @@ vy_point_lookup_scan_cache(struct vy_index *index,
  * Add found statements to the history list up to terminal statement.
  */
 static int
-vy_point_lookup_scan_mem(struct vy_index *index, struct vy_mem *mem,
+vy_point_lookup_scan_mem(struct vy_lsm *lsm, struct vy_mem *mem,
 			 const struct vy_read_view **rv,
 			 struct tuple *key, struct rlist *history)
 {
@@ -186,7 +186,7 @@ vy_point_lookup_scan_mem(struct vy_index *index, struct vy_mem *mem,
 	bool exact;
 	struct vy_mem_tree_iterator mem_itr =
 		vy_mem_tree_lower_bound(&mem->tree, &tree_key, &exact);
-	index->stat.memory.iterator.lookup++;
+	lsm->stat.memory.iterator.lookup++;
 	const struct tuple *stmt = NULL;
 	if (!vy_mem_tree_iterator_is_invalid(&mem_itr)) {
 		stmt = *vy_mem_tree_iterator_get_elem(&mem->tree, &mem_itr);
@@ -202,7 +202,7 @@ vy_point_lookup_scan_mem(struct vy_index *index, struct vy_mem *mem,
 		if (node == NULL)
 			return -1;
 
-		vy_stmt_counter_acct_tuple(&index->stat.memory.iterator.get,
+		vy_stmt_counter_acct_tuple(&lsm->stat.memory.iterator.get,
 					   stmt);
 
 		node->src_type = ITER_SRC_MEM;
@@ -226,22 +226,21 @@ vy_point_lookup_scan_mem(struct vy_index *index, struct vy_mem *mem,
 }
 
 /**
- * Scan all mems that belongs to the index.
+ * Scan all mems that belongs to the LSM tree.
  * Add found statements to the history list up to terminal statement.
  */
 static int
-vy_point_lookup_scan_mems(struct vy_index *index,
-			  const struct vy_read_view **rv,
+vy_point_lookup_scan_mems(struct vy_lsm *lsm, const struct vy_read_view **rv,
 			  struct tuple *key, struct rlist *history)
 {
-	assert(index->mem != NULL);
-	int rc = vy_point_lookup_scan_mem(index, index->mem, rv, key, history);
+	assert(lsm->mem != NULL);
+	int rc = vy_point_lookup_scan_mem(lsm, lsm->mem, rv, key, history);
 	struct vy_mem *mem;
-	rlist_foreach_entry(mem, &index->sealed, in_sealed) {
+	rlist_foreach_entry(mem, &lsm->sealed, in_sealed) {
 		if (rc != 0 || vy_stmt_history_is_terminal(history))
 			return rc;
 
-		rc = vy_point_lookup_scan_mem(index, mem, rv, key, history);
+		rc = vy_point_lookup_scan_mem(lsm, mem, rv, key, history);
 	}
 	return 0;
 }
@@ -253,7 +252,7 @@ vy_point_lookup_scan_mems(struct vy_index *index,
  * was found.
  */
 static int
-vy_point_lookup_scan_slice(struct vy_index *index, struct vy_slice *slice,
+vy_point_lookup_scan_slice(struct vy_lsm *lsm, struct vy_slice *slice,
 			   const struct vy_read_view **rv, struct tuple *key,
 			   struct rlist *history, bool *terminal_found)
 {
@@ -264,10 +263,10 @@ vy_point_lookup_scan_slice(struct vy_index *index, struct vy_slice *slice,
 	 * format in vy_mem.
 	 */
 	struct vy_run_iterator run_itr;
-	vy_run_iterator_open(&run_itr, &index->stat.disk.iterator, slice,
-			     ITER_EQ, key, rv, index->cmp_def, index->key_def,
-			     index->disk_format, index->upsert_format,
-			     index->index_id == 0);
+	vy_run_iterator_open(&run_itr, &lsm->stat.disk.iterator, slice,
+			     ITER_EQ, key, rv, lsm->cmp_def, lsm->key_def,
+			     lsm->disk_format, lsm->upsert_format,
+			     lsm->index_id == 0);
 	struct tuple *stmt;
 	rc = vy_run_iterator_next_key(&run_itr, &stmt);
 	while (rc == 0 && stmt != NULL) {
@@ -297,11 +296,10 @@ vy_point_lookup_scan_slice(struct vy_index *index, struct vy_slice *slice,
  * that complete history from runs will be extracted.
  */
 static int
-vy_point_lookup_scan_slices(struct vy_index *index,
-			    const struct vy_read_view **rv,
+vy_point_lookup_scan_slices(struct vy_lsm *lsm, const struct vy_read_view **rv,
 			    struct tuple *key, struct rlist *history)
 {
-	struct vy_range *range = vy_range_tree_find_by_key(index->tree,
+	struct vy_range *range = vy_range_tree_find_by_key(lsm->tree,
 							   ITER_EQ, key);
 	assert(range != NULL);
 	int slice_count = range->slice_count;
@@ -323,7 +321,7 @@ vy_point_lookup_scan_slices(struct vy_index *index,
 	bool terminal_found = false;
 	for (i = 0; i < slice_count; i++) {
 		if (rc == 0 && !terminal_found)
-			rc = vy_point_lookup_scan_slice(index, slices[i],
+			rc = vy_point_lookup_scan_slice(lsm, slices[i],
 					rv, key, history, &terminal_found);
 		vy_slice_unpin(slices[i]);
 	}
@@ -334,7 +332,7 @@ vy_point_lookup_scan_slices(struct vy_index *index,
  * Get a resultant statement from collected history. Add to cache if possible.
  */
 static int
-vy_point_lookup_apply_history(struct vy_index *index,
+vy_point_lookup_apply_history(struct vy_lsm *lsm,
 			      const struct vy_read_view **rv,
 			      struct tuple *key, struct rlist *history,
 			      struct tuple **ret)
@@ -365,9 +363,9 @@ vy_point_lookup_apply_history(struct vy_index *index,
 		       vy_stmt_lsn(node->stmt) <= (*rv)->vlsn);
 
 		struct tuple *stmt = vy_apply_upsert(node->stmt, curr_stmt,
-					index->cmp_def, index->mem_format,
-					index->upsert_format, true);
-		index->stat.upsert.applied++;
+					lsm->cmp_def, lsm->mem_format,
+					lsm->upsert_format, true);
+		lsm->stat.upsert.applied++;
 		if (stmt == NULL)
 			return -1;
 		if (curr_stmt != NULL)
@@ -376,62 +374,62 @@ vy_point_lookup_apply_history(struct vy_index *index,
 		node = rlist_prev_entry_safe(node, history, link);
 	}
 	if (curr_stmt != NULL) {
-		vy_stmt_counter_acct_tuple(&index->stat.get, curr_stmt);
+		vy_stmt_counter_acct_tuple(&lsm->stat.get, curr_stmt);
 		*ret = curr_stmt;
 	}
 	/**
 	 * Add a statement to the cache
 	 */
 	if ((*rv)->vlsn == INT64_MAX) /* Do not store non-latest data */
-		vy_cache_add(&index->cache, curr_stmt, NULL, key, ITER_EQ);
+		vy_cache_add(&lsm->cache, curr_stmt, NULL, key, ITER_EQ);
 	return 0;
 }
 
 int
-vy_point_lookup(struct vy_index *index, struct vy_tx *tx,
+vy_point_lookup(struct vy_lsm *lsm, struct vy_tx *tx,
 		const struct vy_read_view **rv,
 		struct tuple *key, struct tuple **ret)
 {
-	assert(tuple_field_count(key) >= index->cmp_def->part_count);
+	assert(tuple_field_count(key) >= lsm->cmp_def->part_count);
 
 	*ret = NULL;
 	size_t region_svp = region_used(&fiber()->gc);
 	double start_time = ev_monotonic_now(loop());
 	int rc = 0;
 
-	index->stat.lookup++;
+	lsm->stat.lookup++;
 	/* History list */
 	struct rlist history;
 restart:
 	rlist_create(&history);
 
-	rc = vy_point_lookup_scan_txw(index, tx, key, &history);
+	rc = vy_point_lookup_scan_txw(lsm, tx, key, &history);
 	if (rc != 0 || vy_stmt_history_is_terminal(&history))
 		goto done;
 
-	rc = vy_point_lookup_scan_cache(index, rv, key, &history);
+	rc = vy_point_lookup_scan_cache(lsm, rv, key, &history);
 	if (rc != 0 || vy_stmt_history_is_terminal(&history))
 		goto done;
 
-	rc = vy_point_lookup_scan_mems(index, rv, key, &history);
+	rc = vy_point_lookup_scan_mems(lsm, rv, key, &history);
 	if (rc != 0 || vy_stmt_history_is_terminal(&history))
 		goto done;
 
 	/* Save version before yield */
-	uint32_t mem_list_version = index->mem_list_version;
+	uint32_t mem_list_version = lsm->mem_list_version;
 
-	rc = vy_point_lookup_scan_slices(index, rv, key, &history);
+	rc = vy_point_lookup_scan_slices(lsm, rv, key, &history);
 	if (rc != 0)
 		goto done;
 
 	ERROR_INJECT(ERRINJ_VY_POINT_ITER_WAIT, {
-		while (mem_list_version == index->mem_list_version)
+		while (mem_list_version == lsm->mem_list_version)
 			fiber_sleep(0.01);
 		/* Turn of the injection to avoid infinite loop */
 		errinj(ERRINJ_VY_POINT_ITER_WAIT, ERRINJ_BOOL)->bparam = false;
 	});
 
-	if (mem_list_version != index->mem_list_version) {
+	if (mem_list_version != lsm->mem_list_version) {
 		/*
 		 * Mem list was changed during yield. This could be rotation
 		 * or a dump. In case of dump the memory referenced by
@@ -445,7 +443,7 @@ restart:
 
 done:
 	if (rc == 0) {
-		rc = vy_point_lookup_apply_history(index, rv, key,
+		rc = vy_point_lookup_apply_history(lsm, rv, key,
 						   &history, ret);
 	}
 	vy_stmt_history_cleanup(&history, region_svp);
@@ -454,11 +452,11 @@ done:
 		return -1;
 
 	double latency = ev_monotonic_now(loop()) - start_time;
-	latency_collect(&index->stat.latency, latency);
+	latency_collect(&lsm->stat.latency, latency);
 
-	if (latency > index->env->too_long_threshold) {
+	if (latency > lsm->env->too_long_threshold) {
 		say_warn("%s: get(%s) => %s took too long: %.3f sec",
-			 vy_index_name(index), tuple_str(key),
+			 vy_lsm_name(lsm), tuple_str(key),
 			 vy_stmt_str(*ret), latency);
 	}
 	return 0;
diff --git a/src/box/vy_point_lookup.h b/src/box/vy_point_lookup.h
index a4dbe77e..d74be9a9 100644
--- a/src/box/vy_point_lookup.h
+++ b/src/box/vy_point_lookup.h
@@ -33,7 +33,7 @@
 
 /**
  * Point lookup is a special case of read iterator that is designed for
- * retrieving one value from index by a full key (all parts are present).
+ * retrieving one value from an LSM tree by a full key (all parts are present).
  *
  * Iterator collects necessary history of the given key from different sources
  * (txw, cache, mems, runs) that consists of some number of sequential upserts
@@ -51,7 +51,7 @@
 extern "C" {
 #endif /* defined(__cplusplus) */
 
-struct vy_index;
+struct vy_lsm;
 struct vy_tx;
 struct vy_read_view;
 struct tuple;
@@ -59,7 +59,7 @@ struct tuple;
 /**
  * Given a key that has all index parts (including primary index
  * parts in case of a secondary index), lookup the corresponding
- * tuple in the index. The tuple is returned in @ret with its
+ * tuple in the LSM tree. The tuple is returned in @ret with its
  * reference counter elevated.
  *
  * The caller must guarantee that if the tuple looked up by this
@@ -70,7 +70,7 @@ struct tuple;
  * the tuple is already tracked in a secondary index.
  */
 int
-vy_point_lookup(struct vy_index *index, struct vy_tx *tx,
+vy_point_lookup(struct vy_lsm *lsm, struct vy_tx *tx,
 		const struct vy_read_view **rv,
 		struct tuple *key, struct tuple **ret);
 
diff --git a/src/box/vy_range.h b/src/box/vy_range.h
index c6d3a0ad..3a451fb4 100644
--- a/src/box/vy_range.h
+++ b/src/box/vy_range.h
@@ -54,7 +54,7 @@ struct tuple;
 struct vy_slice;
 
 /**
- * Range of keys in an index stored on disk.
+ * Range of keys in an LSM tree stored on disk.
  */
 struct vy_range {
 	/** Unique ID of this range. */
@@ -108,20 +108,19 @@ struct vy_range {
 	int compact_priority;
 	/** Number of times the range was compacted. */
 	int n_compactions;
-	/** Link in vy_index->tree. */
+	/** Link in vy_lsm->tree. */
 	rb_node(struct vy_range) tree_node;
-	/** Link in vy_index->range_heap. */
+	/** Link in vy_lsm->range_heap. */
 	struct heap_node heap_node;
 	/**
-	 * Incremented whenever an in-memory index or on disk
-	 * run is added to or deleted from this range. Used to
-	 * invalidate iterators.
+	 * Incremented whenever a run is added to or deleted
+	 * from this range. Used invalidate read iterators.
 	 */
 	uint32_t version;
 };
 
 /**
- * Heap of all ranges of the same index, prioritized by
+ * Heap of all ranges of the same LSM tree, prioritized by
  * vy_range->compact_priority.
  */
 #define HEAP_NAME vy_range_heap
@@ -145,7 +144,7 @@ vy_range_is_scheduled(struct vy_range *range)
 }
 
 /**
- * Search tree of all ranges of the same index, sorted by
+ * Search tree of all ranges of the same LSM tree, sorted by
  * vy_range->begin. Ranges in a tree are supposed to span
  * all possible keys without overlaps.
  */
diff --git a/src/box/vy_read_iterator.c b/src/box/vy_read_iterator.c
index 3b5e34fc..37493e19 100644
--- a/src/box/vy_read_iterator.c
+++ b/src/box/vy_read_iterator.c
@@ -35,7 +35,7 @@
 #include "vy_tx.h"
 #include "fiber.h"
 #include "vy_upsert.h"
-#include "vy_index.h"
+#include "vy_lsm.h"
 #include "vy_stat.h"
 
 /**
@@ -141,7 +141,7 @@ vy_read_iterator_range_is_done(struct vy_read_iterator *itr)
 {
 	struct tuple *stmt = itr->curr_stmt;
 	struct vy_range *range = itr->curr_range;
-	struct key_def *cmp_def = itr->index->cmp_def;
+	struct key_def *cmp_def = itr->lsm->cmp_def;
 	int dir = iterator_direction(itr->iterator_type);
 
 	if (dir > 0 && range->end != NULL &&
@@ -182,7 +182,7 @@ vy_read_iterator_cmp_stmt(struct vy_read_iterator *itr,
 	if (a == NULL && b == NULL)
 		return 0;
 	return iterator_direction(itr->iterator_type) *
-		vy_tuple_compare(a, b, itr->index->cmp_def);
+		vy_tuple_compare(a, b, itr->lsm->cmp_def);
 }
 
 /**
@@ -195,7 +195,7 @@ vy_read_iterator_is_exact_match(struct vy_read_iterator *itr,
 {
 	struct tuple *key = itr->key;
 	enum iterator_type type = itr->iterator_type;
-	struct key_def *cmp_def = itr->index->cmp_def;
+	struct key_def *cmp_def = itr->lsm->cmp_def;
 
 	/*
 	 * If the index is unique and the search key is full,
@@ -442,7 +442,7 @@ vy_read_iterator_next_key(struct vy_read_iterator *itr, struct tuple **ret)
 
 	if (itr->last_stmt != NULL && (itr->iterator_type == ITER_EQ ||
 				       itr->iterator_type == ITER_REQ) &&
-	    tuple_field_count(itr->key) >= itr->index->cmp_def->part_count) {
+	    tuple_field_count(itr->key) >= itr->lsm->cmp_def->part_count) {
 		/*
 		 * There may be one statement at max satisfying
 		 * EQ with a full key.
@@ -451,11 +451,11 @@ vy_read_iterator_next_key(struct vy_read_iterator *itr, struct tuple **ret)
 		return 0;
 	}
 	/*
-	 * Restore the iterator position if the index has changed
+	 * Restore the iterator position if the LSM tree has changed
 	 * since the last iteration.
 	 */
-	if (itr->mem_list_version != itr->index->mem_list_version ||
-	    itr->range_tree_version != itr->index->range_tree_version ||
+	if (itr->mem_list_version != itr->lsm->mem_list_version ||
+	    itr->range_tree_version != itr->lsm->range_tree_version ||
 	    itr->range_version != itr->curr_range->version) {
 		vy_read_iterator_restore(itr);
 	}
@@ -501,8 +501,8 @@ rescan_disk:
 	 * because all slices were pinned and hence could not be
 	 * removed.
 	 */
-	if (itr->mem_list_version != itr->index->mem_list_version ||
-	    itr->range_tree_version != itr->index->range_tree_version) {
+	if (itr->mem_list_version != itr->lsm->mem_list_version ||
+	    itr->range_tree_version != itr->lsm->range_tree_version) {
 		vy_read_iterator_restore(itr);
 		goto restart;
 	}
@@ -528,7 +528,7 @@ done:
 
 	if (itr->need_check_eq && itr->curr_stmt != NULL &&
 	    vy_stmt_compare(itr->curr_stmt, itr->key,
-			    itr->index->cmp_def) != 0)
+			    itr->lsm->cmp_def) != 0)
 		itr->curr_stmt = NULL;
 
 	if (vy_read_iterator_track_read(itr, itr->curr_stmt) != 0)
@@ -592,7 +592,7 @@ vy_read_iterator_next_lsn(struct vy_read_iterator *itr, struct tuple **ret)
 	/*
 	 * Look up the older statement in on-disk runs.
 	 *
-	 * Note, we don't need to check the index version after the yield
+	 * Note, we don't need to check the LSM tree version after the yield
 	 * caused by the disk read, because once we've come to this point,
 	 * we won't read any source except run slices, which are pinned
 	 * and hence cannot be removed during the yield.
@@ -645,11 +645,11 @@ vy_read_iterator_squash_upsert(struct vy_read_iterator *itr,
 			       struct tuple **ret)
 {
 	*ret = NULL;
-	struct vy_index *index = itr->index;
+	struct vy_lsm *lsm = itr->lsm;
 	struct tuple *t = itr->curr_stmt;
 
-	/* Upserts enabled only in the primary index. */
-	assert(vy_stmt_type(t) != IPROTO_UPSERT || index->index_id == 0);
+	/* Upserts enabled only in the primary index LSM tree. */
+	assert(vy_stmt_type(t) != IPROTO_UPSERT || lsm->index_id == 0);
 	tuple_ref(t);
 	while (vy_stmt_type(t) == IPROTO_UPSERT) {
 		struct tuple *next;
@@ -659,9 +659,9 @@ vy_read_iterator_squash_upsert(struct vy_read_iterator *itr,
 			return rc;
 		}
 		struct tuple *applied = vy_apply_upsert(t, next,
-				index->cmp_def, index->mem_format,
-				index->upsert_format, true);
-		index->stat.upsert.applied++;
+				lsm->cmp_def, lsm->mem_format,
+				lsm->upsert_format, true);
+		lsm->stat.upsert.applied++;
 		tuple_unref(t);
 		if (applied == NULL)
 			return -1;
@@ -679,9 +679,9 @@ vy_read_iterator_add_tx(struct vy_read_iterator *itr)
 	assert(itr->tx != NULL);
 	enum iterator_type iterator_type = (itr->iterator_type != ITER_REQ ?
 					    itr->iterator_type : ITER_LE);
-	struct vy_txw_iterator_stat *stat = &itr->index->stat.txw.iterator;
+	struct vy_txw_iterator_stat *stat = &itr->lsm->stat.txw.iterator;
 	struct vy_read_src *sub_src = vy_read_iterator_add_src(itr);
-	vy_txw_iterator_open(&sub_src->txw_iterator, stat, itr->tx, itr->index,
+	vy_txw_iterator_open(&sub_src->txw_iterator, stat, itr->tx, itr->lsm,
 			     iterator_type, itr->key);
 }
 
@@ -692,7 +692,7 @@ vy_read_iterator_add_cache(struct vy_read_iterator *itr)
 					    itr->iterator_type : ITER_LE);
 	struct vy_read_src *sub_src = vy_read_iterator_add_src(itr);
 	vy_cache_iterator_open(&sub_src->cache_iterator,
-			       &itr->index->cache, iterator_type,
+			       &itr->lsm->cache, iterator_type,
 			       itr->key, itr->read_view);
 }
 
@@ -701,22 +701,20 @@ vy_read_iterator_add_mem(struct vy_read_iterator *itr)
 {
 	enum iterator_type iterator_type = (itr->iterator_type != ITER_REQ ?
 					    itr->iterator_type : ITER_LE);
-	struct vy_index *index = itr->index;
+	struct vy_lsm *lsm = itr->lsm;
 	struct vy_read_src *sub_src;
 
 	/* Add the active in-memory index. */
-	assert(index->mem != NULL);
+	assert(lsm->mem != NULL);
 	sub_src = vy_read_iterator_add_src(itr);
-	vy_mem_iterator_open(&sub_src->mem_iterator,
-			     &index->stat.memory.iterator,
-			     index->mem, iterator_type, itr->key,
-			     itr->read_view);
+	vy_mem_iterator_open(&sub_src->mem_iterator, &lsm->stat.memory.iterator,
+			     lsm->mem, iterator_type, itr->key, itr->read_view);
 	/* Add sealed in-memory indexes. */
 	struct vy_mem *mem;
-	rlist_foreach_entry(mem, &index->sealed, in_sealed) {
+	rlist_foreach_entry(mem, &lsm->sealed, in_sealed) {
 		sub_src = vy_read_iterator_add_src(itr);
 		vy_mem_iterator_open(&sub_src->mem_iterator,
-				     &index->stat.memory.iterator,
+				     &lsm->stat.memory.iterator,
 				     mem, iterator_type, itr->key,
 				     itr->read_view);
 	}
@@ -728,7 +726,7 @@ vy_read_iterator_add_disk(struct vy_read_iterator *itr)
 	assert(itr->curr_range != NULL);
 	enum iterator_type iterator_type = (itr->iterator_type != ITER_REQ ?
 					    itr->iterator_type : ITER_LE);
-	struct vy_index *index = itr->index;
+	struct vy_lsm *lsm = itr->lsm;
 	struct vy_slice *slice;
 	/*
 	 * The format of the statement must be exactly the space
@@ -742,21 +740,20 @@ vy_read_iterator_add_disk(struct vy_read_iterator *itr)
 		 * dumped in-memory trees. We must not add both
 		 * the slice and the trees in this case, because
 		 * the read iterator can't deal with duplicates.
-		 * Since index->dump_lsn is bumped after deletion
+		 * Since lsm->dump_lsn is bumped after deletion
 		 * of dumped in-memory trees, we can filter out
 		 * the run slice containing duplicates by LSN.
 		 */
-		if (slice->run->info.min_lsn > index->dump_lsn)
+		if (slice->run->info.min_lsn > lsm->dump_lsn)
 			continue;
-		assert(slice->run->info.max_lsn <= index->dump_lsn);
+		assert(slice->run->info.max_lsn <= lsm->dump_lsn);
 		struct vy_read_src *sub_src = vy_read_iterator_add_src(itr);
 		vy_run_iterator_open(&sub_src->run_iterator,
-				     &index->stat.disk.iterator, slice,
+				     &lsm->stat.disk.iterator, slice,
 				     iterator_type, itr->key,
-				     itr->read_view, index->cmp_def,
-				     index->key_def, index->disk_format,
-				     index->upsert_format,
-				     index->index_id == 0);
+				     itr->read_view, lsm->cmp_def,
+				     lsm->key_def, lsm->disk_format,
+				     lsm->upsert_format, lsm->index_id == 0);
 	}
 }
 
@@ -799,13 +796,13 @@ vy_read_iterator_cleanup(struct vy_read_iterator *itr)
 }
 
 void
-vy_read_iterator_open(struct vy_read_iterator *itr, struct vy_index *index,
+vy_read_iterator_open(struct vy_read_iterator *itr, struct vy_lsm *lsm,
 		      struct vy_tx *tx, enum iterator_type iterator_type,
 		      struct tuple *key, const struct vy_read_view **rv)
 {
 	memset(itr, 0, sizeof(*itr));
 
-	itr->index = index;
+	itr->lsm = lsm;
 	itr->tx = tx;
 	itr->iterator_type = iterator_type;
 	itr->key = key;
@@ -851,9 +848,9 @@ vy_read_iterator_restore(struct vy_read_iterator *itr)
 {
 	vy_read_iterator_cleanup(itr);
 
-	itr->mem_list_version = itr->index->mem_list_version;
-	itr->range_tree_version = itr->index->range_tree_version;
-	itr->curr_range = vy_range_tree_find_by_key(itr->index->tree,
+	itr->mem_list_version = itr->lsm->mem_list_version;
+	itr->range_tree_version = itr->lsm->range_tree_version;
+	itr->curr_range = vy_range_tree_find_by_key(itr->lsm->tree,
 			itr->iterator_type, itr->last_stmt ?: itr->key);
 	itr->range_version = itr->curr_range->version;
 
@@ -879,13 +876,13 @@ static void
 vy_read_iterator_next_range(struct vy_read_iterator *itr)
 {
 	struct vy_range *range = itr->curr_range;
-	struct key_def *cmp_def = itr->index->cmp_def;
+	struct key_def *cmp_def = itr->lsm->cmp_def;
 	int dir = iterator_direction(itr->iterator_type);
 
 	assert(range != NULL);
 	while (true) {
-		range = dir > 0 ? vy_range_tree_next(itr->index->tree, range) :
-				  vy_range_tree_prev(itr->index->tree, range);
+		range = dir > 0 ? vy_range_tree_next(itr->lsm->tree, range) :
+				  vy_range_tree_prev(itr->lsm->tree, range);
 		assert(range != NULL);
 
 		if (itr->last_stmt == NULL)
@@ -927,16 +924,16 @@ vy_read_iterator_track_read(struct vy_read_iterator *itr, struct tuple *stmt)
 	if (stmt == NULL) {
 		stmt = (itr->iterator_type == ITER_EQ ||
 			itr->iterator_type == ITER_REQ ?
-			itr->key : itr->index->env->empty_key);
+			itr->key : itr->lsm->env->empty_key);
 	}
 
 	int rc;
 	if (iterator_direction(itr->iterator_type) >= 0) {
-		rc = vy_tx_track(itr->tx, itr->index, itr->key,
+		rc = vy_tx_track(itr->tx, itr->lsm, itr->key,
 				 itr->iterator_type != ITER_GT,
 				 stmt, true);
 	} else {
-		rc = vy_tx_track(itr->tx, itr->index, stmt, true,
+		rc = vy_tx_track(itr->tx, itr->lsm, stmt, true,
 				 itr->key, itr->iterator_type != ITER_LT);
 	}
 	return rc;
@@ -951,7 +948,7 @@ vy_read_iterator_next(struct vy_read_iterator *itr, struct tuple **result)
 
 	if (!itr->search_started) {
 		itr->search_started = true;
-		itr->index->stat.lookup++;
+		itr->lsm->stat.lookup++;
 		vy_read_iterator_restore(itr);
 	}
 
@@ -961,7 +958,7 @@ vy_read_iterator_next(struct vy_read_iterator *itr, struct tuple **result)
 	bool skipped_txw_delete = false;
 
 	struct tuple *t = NULL;
-	struct vy_index *index = itr->index;
+	struct vy_lsm *lsm = itr->lsm;
 	int rc = 0;
 	while (true) {
 		rc = vy_read_iterator_next_key(itr, &t);
@@ -993,7 +990,7 @@ vy_read_iterator_next(struct vy_read_iterator *itr, struct tuple **result)
 	       vy_stmt_type(*result) == IPROTO_INSERT ||
 	       vy_stmt_type(*result) == IPROTO_REPLACE);
 	if (*result != NULL)
-		vy_stmt_counter_acct_tuple(&index->stat.get, *result);
+		vy_stmt_counter_acct_tuple(&lsm->stat.get, *result);
 
 #ifndef NDEBUG
 	/* Check constraints. */
@@ -1005,18 +1002,18 @@ vy_read_iterator_next(struct vy_read_iterator *itr, struct tuple **result)
 	 */
 	if (itr->last_stmt != NULL && tuple_field_count(itr->key) > 0) {
 		int cmp = dir * vy_stmt_compare(*result, itr->key,
-						itr->index->cmp_def);
+						itr->lsm->cmp_def);
 		assert(cmp >= 0);
 	}
 	/*
 	 * Ensure the read iterator does not return duplicates
-	 * and respects statements order (index->cmp_def includes
+	 * and respects statements order (lsm->cmp_def includes
 	 * primary parts, so prev_key != itr->last_stmt for any
-	 * index).
+	 * LSM tree).
 	 */
 	if (prev_key != NULL && itr->last_stmt != NULL) {
 		assert(dir * vy_tuple_compare(prev_key, itr->last_stmt,
-					      index->cmp_def) < 0);
+					      lsm->cmp_def) < 0);
 	}
 #endif
 
@@ -1034,7 +1031,7 @@ vy_read_iterator_next(struct vy_read_iterator *itr, struct tuple **result)
 			 */
 			cache_prev = NULL;
 		}
-		vy_cache_add(&itr->index->cache, *result, cache_prev,
+		vy_cache_add(&itr->lsm->cache, *result, cache_prev,
 			     itr->key, itr->iterator_type);
 	}
 clear:
@@ -1042,11 +1039,11 @@ clear:
 		tuple_unref(prev_key);
 
 	ev_tstamp latency = ev_monotonic_now(loop()) - start_time;
-	latency_collect(&index->stat.latency, latency);
+	latency_collect(&lsm->stat.latency, latency);
 
-	if (latency > index->env->too_long_threshold) {
+	if (latency > lsm->env->too_long_threshold) {
 		say_warn("%s: select(%s, %s) => %s took too long: %.3f sec",
-			 vy_index_name(index), tuple_str(itr->key),
+			 vy_lsm_name(lsm), tuple_str(itr->key),
 			 iterator_type_strs[itr->iterator_type],
 			 vy_stmt_str(itr->last_stmt), latency);
 	}
diff --git a/src/box/vy_read_iterator.h b/src/box/vy_read_iterator.h
index 5726b042..4f9d3d4b 100644
--- a/src/box/vy_read_iterator.h
+++ b/src/box/vy_read_iterator.h
@@ -44,11 +44,11 @@ extern "C" {
 /**
  * Vinyl read iterator.
  *
- * Used for executing a SELECT request over a Vinyl index.
+ * Used for executing a SELECT request over an LSM tree.
  */
 struct vy_read_iterator {
-	/** Index to iterate over. */
-	struct vy_index *index;
+	/** LSM tree to iterate over. */
+	struct vy_lsm *lsm;
 	/** Active transaction or NULL. */
 	struct vy_tx *tx;
 	/** Iterator type. */
@@ -67,12 +67,12 @@ struct vy_read_iterator {
 	/** Last statement returned by vy_read_iterator_next(). */
 	struct tuple *last_stmt;
 	/**
-	 * Copy of index->range_tree_version.
+	 * Copy of lsm->range_tree_version.
 	 * Used for detecting range tree changes.
 	 */
 	uint32_t range_tree_version;
 	/**
-	 * Copy of index->mem_list_version.
+	 * Copy of lsm->mem_list_version.
 	 * Used for detecting memory level changes.
 	 */
 	uint32_t mem_list_version;
@@ -94,7 +94,7 @@ struct vy_read_iterator {
 	uint32_t src_count;
 	/** Maximal capacity of the src array. */
 	uint32_t src_capacity;
-	/** Index of the current merge source. */
+	/** Offset of the current merge source. */
 	uint32_t curr_src;
 	/** Statement returned by the current merge source. */
 	struct tuple *curr_stmt;
@@ -122,7 +122,7 @@ struct vy_read_iterator {
 /**
  * Open the read iterator.
  * @param itr           Read iterator.
- * @param index         Vinyl index to iterate.
+ * @param lsm           LSM tree to iterate.
  * @param tx            Current transaction, if exists.
  * @param iterator_type Type of the iterator that determines order
  *                      of the iteration.
@@ -130,7 +130,7 @@ struct vy_read_iterator {
  * @param rv            Read view.
  */
 void
-vy_read_iterator_open(struct vy_read_iterator *itr, struct vy_index *index,
+vy_read_iterator_open(struct vy_read_iterator *itr, struct vy_lsm *lsm,
 		      struct vy_tx *tx, enum iterator_type iterator_type,
 		      struct tuple *key, const struct vy_read_view **rv);
 
diff --git a/src/box/vy_read_set.c b/src/box/vy_read_set.c
index bb54a4a6..0f3fab61 100644
--- a/src/box/vy_read_set.c
+++ b/src/box/vy_read_set.c
@@ -37,15 +37,15 @@
 
 #include "trivia/util.h"
 #include "tuple.h"
-#include "vy_index.h"
+#include "vy_lsm.h"
 #include "vy_stmt.h"
 
 int
 vy_read_interval_cmpl(const struct vy_read_interval *a,
 		      const struct vy_read_interval *b)
 {
-	assert(a->index == b->index);
-	struct key_def *cmp_def = a->index->cmp_def;
+	assert(a->lsm == b->lsm);
+	struct key_def *cmp_def = a->lsm->cmp_def;
 	int cmp = vy_stmt_compare(a->left, b->left, cmp_def);
 	if (cmp != 0)
 		return cmp;
@@ -67,8 +67,8 @@ int
 vy_read_interval_cmpr(const struct vy_read_interval *a,
 		      const struct vy_read_interval *b)
 {
-	assert(a->index == b->index);
-	struct key_def *cmp_def = a->index->cmp_def;
+	assert(a->lsm == b->lsm);
+	struct key_def *cmp_def = a->lsm->cmp_def;
 	int cmp = vy_stmt_compare(a->right, b->right, cmp_def);
 	if (cmp != 0)
 		return cmp;
@@ -90,9 +90,9 @@ bool
 vy_read_interval_should_merge(const struct vy_read_interval *l,
 			      const struct vy_read_interval *r)
 {
-	assert(l->index == r->index);
+	assert(l->lsm == r->lsm);
 	assert(vy_read_interval_cmpl(l, r) <= 0);
-	struct key_def *cmp_def = l->index->cmp_def;
+	struct key_def *cmp_def = l->lsm->cmp_def;
 	int cmp = vy_stmt_compare(l->right, r->left, cmp_def);
 	if (cmp > 0)
 		return true;
@@ -116,13 +116,13 @@ struct vy_tx *
 vy_tx_conflict_iterator_next(struct vy_tx_conflict_iterator *it)
 {
 	struct vy_read_interval *curr, *left, *right;
-	while ((curr = vy_index_read_set_walk_next(&it->tree_walk, it->tree_dir,
-						   &left, &right)) != NULL) {
-		struct key_def *cmp_def = curr->index->cmp_def;
+	while ((curr = vy_lsm_read_set_walk_next(&it->tree_walk, it->tree_dir,
+						 &left, &right)) != NULL) {
+		struct key_def *cmp_def = curr->lsm->cmp_def;
 		const struct vy_read_interval *last = curr->subtree_last;
 
-		assert(left == NULL || left->index == curr->index);
-		assert(right == NULL || right->index == curr->index);
+		assert(left == NULL || left->lsm == curr->lsm);
+		assert(right == NULL || right->lsm == curr->lsm);
 
 		int cmp_right = vy_stmt_compare(it->stmt, last->right, cmp_def);
 		if (cmp_right == 0 && !last->right_belongs)
diff --git a/src/box/vy_read_set.h b/src/box/vy_read_set.h
index 272da63a..1b139ccf 100644
--- a/src/box/vy_read_set.h
+++ b/src/box/vy_read_set.h
@@ -48,7 +48,7 @@ extern "C" {
 
 struct tuple;
 struct vy_tx;
-struct vy_index;
+struct vy_lsm;
 
 /**
  * A tuple interval read by a transaction.
@@ -56,8 +56,8 @@ struct vy_index;
 struct vy_read_interval {
 	/** Transaction. */
 	struct vy_tx *tx;
-	/** Index that the transaction read from. */
-	struct vy_index *index;
+	/** LSM tree that the transaction read from. */
+	struct vy_lsm *lsm;
 	/** Left boundary of the interval. */
 	struct tuple *left;
 	/** Right boundary of the interval. */
@@ -73,8 +73,8 @@ struct vy_read_interval {
 	const struct vy_read_interval *subtree_last;
 	/** Link in vy_tx->read_set. */
 	rb_node(struct vy_read_interval) in_tx;
-	/** Link in vy_index->read_set. */
-	rb_node(struct vy_read_interval) in_index;
+	/** Link in vy_lsm->read_set. */
+	rb_node(struct vy_read_interval) in_lsm;
 	/**
 	 * Auxiliary list node. Used by vy_tx_track() to
 	 * link intervals to be merged.
@@ -125,7 +125,7 @@ vy_read_interval_should_merge(const struct vy_read_interval *l,
 
 /**
  * Tree that contains tuple intervals read by a transactions.
- * Linked by vy_read_interval->in_tx. Sorted by vy_index, then
+ * Linked by vy_read_interval->in_tx. Sorted by vy_lsm, then
  * by vy_read_interval->left. Intervals stored in this tree
  * must not intersect.
  */
@@ -136,7 +136,7 @@ vy_tx_read_set_cmp(const struct vy_read_interval *a,
 		   const struct vy_read_interval *b)
 {
 	assert(a->tx == b->tx);
-	int rc = a->index < b->index ? -1 : a->index > b->index;
+	int rc = a->lsm < b->lsm ? -1 : a->lsm > b->lsm;
 	if (rc == 0)
 		rc = vy_read_interval_cmpl(a, b);
 	return rc;
@@ -146,18 +146,18 @@ rb_gen(MAYBE_UNUSED static inline, vy_tx_read_set_, vy_tx_read_set_t,
        struct vy_read_interval, in_tx, vy_tx_read_set_cmp);
 
 /**
- * Interval tree used for tracking reads done from an index by
- * all active transactions. Linked by vy_read_interval->in_index.
+ * Interval tree used for tracking reads done from an LSM tree by
+ * all active transactions. Linked by vy_read_interval->in_lsm.
  * Sorted by vy_read_interval->left, then by vy_tx. Intervals that
  * belong to different transactions may intersect.
  */
-typedef rb_tree(struct vy_read_interval) vy_index_read_set_t;
+typedef rb_tree(struct vy_read_interval) vy_lsm_read_set_t;
 
 static inline int
-vy_index_read_set_cmp(const struct vy_read_interval *a,
-		      const struct vy_read_interval *b)
+vy_lsm_read_set_cmp(const struct vy_read_interval *a,
+		    const struct vy_read_interval *b)
 {
-	assert(a->index == b->index);
+	assert(a->lsm == b->lsm);
 	int rc = vy_read_interval_cmpl(a, b);
 	if (rc == 0)
 		rc = a->tx < b->tx ? -1 : a->tx > b->tx;
@@ -165,9 +165,9 @@ vy_index_read_set_cmp(const struct vy_read_interval *a,
 }
 
 static inline void
-vy_index_read_set_aug(struct vy_read_interval *node,
-		      const struct vy_read_interval *left,
-		      const struct vy_read_interval *right)
+vy_lsm_read_set_aug(struct vy_read_interval *node,
+		    const struct vy_read_interval *left,
+		    const struct vy_read_interval *right)
 {
 	node->subtree_last = node;
 	if (left != NULL &&
@@ -178,9 +178,9 @@ vy_index_read_set_aug(struct vy_read_interval *node,
 		node->subtree_last = right->subtree_last;
 }
 
-rb_gen_aug(MAYBE_UNUSED static inline, vy_index_read_set_, vy_index_read_set_t,
-	   struct vy_read_interval, in_index, vy_index_read_set_cmp,
-	   vy_index_read_set_aug);
+rb_gen_aug(MAYBE_UNUSED static inline, vy_lsm_read_set_, vy_lsm_read_set_t,
+	   struct vy_read_interval, in_lsm, vy_lsm_read_set_cmp,
+	   vy_lsm_read_set_aug);
 
 /**
  * Iterator over transactions that conflict with a statement.
@@ -189,10 +189,10 @@ struct vy_tx_conflict_iterator {
 	/** The statement. */
 	const struct tuple *stmt;
 	/**
-	 * Iterator over the index interval tree checked
+	 * Iterator over the interval tree checked
 	 * for intersections with the statement.
 	 */
-	struct vy_index_read_set_walk tree_walk;
+	struct vy_lsm_read_set_walk tree_walk;
 	/**
 	 * Direction of tree traversal to be used on the
 	 * next iteration.
@@ -202,10 +202,10 @@ struct vy_tx_conflict_iterator {
 
 static inline void
 vy_tx_conflict_iterator_init(struct vy_tx_conflict_iterator *it,
-			     vy_index_read_set_t *read_set,
+			     vy_lsm_read_set_t *read_set,
 			     const struct tuple *stmt)
 {
-	vy_index_read_set_walk_init(&it->tree_walk, read_set);
+	vy_lsm_read_set_walk_init(&it->tree_walk, read_set);
 	it->tree_dir = 0;
 	it->stmt = stmt;
 }
diff --git a/src/box/vy_run.c b/src/box/vy_run.c
index f8d96f33..7f792c13 100644
--- a/src/box/vy_run.c
+++ b/src/box/vy_run.c
@@ -236,7 +236,7 @@ vy_run_new(struct vy_run_env *env, int64_t id)
 	run->dump_lsn = -1;
 	run->fd = -1;
 	run->refs = 1;
-	rlist_create(&run->in_index);
+	rlist_create(&run->in_lsm);
 	rlist_create(&run->in_unused);
 	TRASH(&run->info.bloom);
 	return run;
@@ -719,8 +719,7 @@ vy_page_xrow(struct vy_page *page, uint32_t stmt_no,
  * Read raw stmt data from the page
  * @param page          Page.
  * @param stmt_no       Statement position in the page.
- * @param cmp_def       Key definition of an index, including
- *                      primary key parts.
+ * @param cmp_def       Key definition, including primary key parts.
  * @param format        Format for REPLACE/DELETE tuples.
  * @param upsert_format Format for UPSERT tuples.
  * @param is_primary    True if the index is primary.
diff --git a/src/box/vy_run.h b/src/box/vy_run.h
index 6973ee2d..ecf55350 100644
--- a/src/box/vy_run.h
+++ b/src/box/vy_run.h
@@ -144,8 +144,8 @@ struct vy_run {
 	 * after compaction.
 	 */
 	struct rlist in_unused;
-	/** Link in vy_index::runs list. */
-	struct rlist in_index;
+	/** Link in vy_lsm::runs list. */
+	struct rlist in_lsm;
 };
 
 /**
@@ -214,9 +214,9 @@ struct vy_run_iterator {
 	struct vy_run_iterator_stat *stat;
 
 	/* Members needed for memory allocation and disk access */
-	/** Index key definition used for storing statements on disk. */
+	/** Key definition used for comparing statements on disk. */
 	const struct key_def *cmp_def;
-	/** Index key definition defined by the user. */
+	/** Key definition provided by the user. */
 	const struct key_def *key_def;
 	/**
 	 * Format ot allocate REPLACE and DELETE tuples read from
@@ -382,8 +382,8 @@ enum vy_file_type {
 extern const char *vy_file_suffix[];
 
 static inline int
-vy_index_snprint_path(char *buf, int size, const char *dir,
-		      uint32_t space_id, uint32_t iid)
+vy_lsm_snprint_path(char *buf, int size, const char *dir,
+		    uint32_t space_id, uint32_t iid)
 {
 	return snprintf(buf, size, "%s/%u/%u",
 			dir, (unsigned)space_id, (unsigned)iid);
@@ -403,7 +403,7 @@ vy_run_snprint_path(char *buf, int size, const char *dir,
 		    int64_t run_id, enum vy_file_type type)
 {
 	int total = 0;
-	SNPRINT(total, vy_index_snprint_path, buf, size,
+	SNPRINT(total, vy_lsm_snprint_path, buf, size,
 		dir, (unsigned)space_id, (unsigned)iid);
 	SNPRINT(total, snprintf, buf, size, "/");
 	SNPRINT(total, vy_run_snprint_filename, buf, size, run_id, type);
diff --git a/src/box/vy_scheduler.c b/src/box/vy_scheduler.c
index 2ae0d8ca..27961155 100644
--- a/src/box/vy_scheduler.c
+++ b/src/box/vy_scheduler.c
@@ -48,7 +48,7 @@
 #include "fiber_cond.h"
 #include "salad/stailq.h"
 #include "say.h"
-#include "vy_index.h"
+#include "vy_lsm.h"
 #include "vy_log.h"
 #include "vy_mem.h"
 #include "vy_range.h"
@@ -93,7 +93,7 @@ struct vy_task_ops {
 	/**
 	 * This function is called by the scheduler if either ->execute
 	 * or ->complete failed. It may be used to undo changes done to
-	 * the index when preparing the task.
+	 * the LSM tree when preparing the task.
 	 *
 	 * If @in_shutdown is set, the callback is invoked from the
 	 * engine destructor.
@@ -108,10 +108,10 @@ struct vy_task {
 	int status;
 	/** If ->execute fails, the error is stored here. */
 	struct diag diag;
-	/** Index this task is for. */
-	struct vy_index *index;
+	/** LSM tree this task is for. */
+	struct vy_lsm *lsm;
 	/**
-	 * Copies of index->key/cmp_def to protect from
+	 * Copies of lsm->key/cmp_def to protect from
 	 * multithread read/write on alter.
 	 */
 	struct key_def *cmp_def;
@@ -154,11 +154,11 @@ struct vy_task {
  * Allocate a new task to be executed by a worker thread.
  * When preparing an asynchronous task, this function must
  * be called before yielding the current fiber in order to
- * pin the index the task is for so that a concurrent fiber
+ * pin the LSM tree the task is for so that a concurrent fiber
  * does not free it from under us.
  */
 static struct vy_task *
-vy_task_new(struct mempool *pool, struct vy_index *index,
+vy_task_new(struct mempool *pool, struct vy_lsm *lsm,
 	    const struct vy_task_ops *ops)
 {
 	struct vy_task *task = mempool_alloc(pool);
@@ -169,19 +169,19 @@ vy_task_new(struct mempool *pool, struct vy_index *index,
 	}
 	memset(task, 0, sizeof(*task));
 	task->ops = ops;
-	task->index = index;
-	task->cmp_def = key_def_dup(index->cmp_def);
+	task->lsm = lsm;
+	task->cmp_def = key_def_dup(lsm->cmp_def);
 	if (task->cmp_def == NULL) {
 		mempool_free(pool, task);
 		return NULL;
 	}
-	task->key_def = key_def_dup(index->key_def);
+	task->key_def = key_def_dup(lsm->key_def);
 	if (task->key_def == NULL) {
 		key_def_delete(task->cmp_def);
 		mempool_free(pool, task);
 		return NULL;
 	}
-	vy_index_ref(index);
+	vy_lsm_ref(lsm);
 	diag_create(&task->diag);
 	return task;
 }
@@ -192,7 +192,7 @@ vy_task_delete(struct mempool *pool, struct vy_task *task)
 {
 	key_def_delete(task->cmp_def);
 	key_def_delete(task->key_def);
-	vy_index_unref(task->index);
+	vy_lsm_unref(task->lsm);
 	diag_destroy(&task->diag);
 	TRASH(task);
 	mempool_free(pool, task);
@@ -201,21 +201,22 @@ vy_task_delete(struct mempool *pool, struct vy_task *task)
 static bool
 vy_dump_heap_less(struct heap_node *a, struct heap_node *b)
 {
-	struct vy_index *i1 = container_of(a, struct vy_index, in_dump);
-	struct vy_index *i2 = container_of(b, struct vy_index, in_dump);
+	struct vy_lsm *i1 = container_of(a, struct vy_lsm, in_dump);
+	struct vy_lsm *i2 = container_of(b, struct vy_lsm, in_dump);
 
 	/*
-	 * Indexes that are currently being dumped or can't be scheduled
-	 * for dump right now are moved off the top of the heap.
+	 * LSM trees that are currently being dumped or can't be
+	 * scheduled for dump right now are moved off the top of
+	 * the heap.
 	 */
 	if (i1->is_dumping != i2->is_dumping)
 		return i1->is_dumping < i2->is_dumping;
 	if (i1->pin_count != i2->pin_count)
 		return i1->pin_count < i2->pin_count;
 
-	/* Older indexes are dumped first. */
-	int64_t i1_generation = vy_index_generation(i1);
-	int64_t i2_generation = vy_index_generation(i2);
+	/* Older LSM trees are dumped first. */
+	int64_t i1_generation = vy_lsm_generation(i1);
+	int64_t i2_generation = vy_lsm_generation(i2);
 	if (i1_generation != i2_generation)
 		return i1_generation < i2_generation;
 	/*
@@ -240,13 +241,13 @@ vy_dump_heap_less(struct heap_node *a, struct heap_node *b)
 static bool
 vy_compact_heap_less(struct heap_node *a, struct heap_node *b)
 {
-	struct vy_index *i1 = container_of(a, struct vy_index, in_compact);
-	struct vy_index *i2 = container_of(b, struct vy_index, in_compact);
+	struct vy_lsm *i1 = container_of(a, struct vy_lsm, in_compact);
+	struct vy_lsm *i2 = container_of(b, struct vy_lsm, in_compact);
 	/*
-	 * Prefer indexes whose read amplification will be reduced
+	 * Prefer LSM trees whose read amplification will be reduced
 	 * most as a result of compaction.
 	 */
-	return vy_index_compact_priority(i1) > vy_index_compact_priority(i2);
+	return vy_lsm_compact_priority(i1) > vy_lsm_compact_priority(i2);
 }
 
 #define HEAP_NAME vy_compact_heap
@@ -388,58 +389,55 @@ vy_scheduler_destroy(struct vy_scheduler *scheduler)
 }
 
 void
-vy_scheduler_add_index(struct vy_scheduler *scheduler,
-		       struct vy_index *index)
+vy_scheduler_add_lsm(struct vy_scheduler *scheduler, struct vy_lsm *lsm)
 {
-	assert(index->in_dump.pos == UINT32_MAX);
-	assert(index->in_compact.pos == UINT32_MAX);
-	vy_dump_heap_insert(&scheduler->dump_heap, &index->in_dump);
-	vy_compact_heap_insert(&scheduler->compact_heap, &index->in_compact);
+	assert(lsm->in_dump.pos == UINT32_MAX);
+	assert(lsm->in_compact.pos == UINT32_MAX);
+	vy_dump_heap_insert(&scheduler->dump_heap, &lsm->in_dump);
+	vy_compact_heap_insert(&scheduler->compact_heap, &lsm->in_compact);
 }
 
 void
-vy_scheduler_remove_index(struct vy_scheduler *scheduler,
-			  struct vy_index *index)
+vy_scheduler_remove_lsm(struct vy_scheduler *scheduler, struct vy_lsm *lsm)
 {
-	assert(index->in_dump.pos != UINT32_MAX);
-	assert(index->in_compact.pos != UINT32_MAX);
-	vy_dump_heap_delete(&scheduler->dump_heap, &index->in_dump);
-	vy_compact_heap_delete(&scheduler->compact_heap, &index->in_compact);
-	index->in_dump.pos = UINT32_MAX;
-	index->in_compact.pos = UINT32_MAX;
+	assert(lsm->in_dump.pos != UINT32_MAX);
+	assert(lsm->in_compact.pos != UINT32_MAX);
+	vy_dump_heap_delete(&scheduler->dump_heap, &lsm->in_dump);
+	vy_compact_heap_delete(&scheduler->compact_heap, &lsm->in_compact);
+	lsm->in_dump.pos = UINT32_MAX;
+	lsm->in_compact.pos = UINT32_MAX;
 }
 
 static void
-vy_scheduler_update_index(struct vy_scheduler *scheduler,
-			  struct vy_index *index)
+vy_scheduler_update_lsm(struct vy_scheduler *scheduler, struct vy_lsm *lsm)
 {
-	if (index->is_dropped) {
-		/* Dropped indexes are exempted from scheduling. */
-		assert(index->in_dump.pos == UINT32_MAX);
-		assert(index->in_compact.pos == UINT32_MAX);
+	if (lsm->is_dropped) {
+		/* Dropped LSM trees are exempted from scheduling. */
+		assert(lsm->in_dump.pos == UINT32_MAX);
+		assert(lsm->in_compact.pos == UINT32_MAX);
 		return;
 	}
-	assert(index->in_dump.pos != UINT32_MAX);
-	assert(index->in_compact.pos != UINT32_MAX);
-	vy_dump_heap_update(&scheduler->dump_heap, &index->in_dump);
-	vy_compact_heap_update(&scheduler->compact_heap, &index->in_compact);
+	assert(lsm->in_dump.pos != UINT32_MAX);
+	assert(lsm->in_compact.pos != UINT32_MAX);
+	vy_dump_heap_update(&scheduler->dump_heap, &lsm->in_dump);
+	vy_compact_heap_update(&scheduler->compact_heap, &lsm->in_compact);
 }
 
 static void
-vy_scheduler_pin_index(struct vy_scheduler *scheduler, struct vy_index *index)
+vy_scheduler_pin_lsm(struct vy_scheduler *scheduler, struct vy_lsm *lsm)
 {
-	assert(!index->is_dumping);
-	if (index->pin_count++ == 0)
-		vy_scheduler_update_index(scheduler, index);
+	assert(!lsm->is_dumping);
+	if (lsm->pin_count++ == 0)
+		vy_scheduler_update_lsm(scheduler, lsm);
 }
 
 static void
-vy_scheduler_unpin_index(struct vy_scheduler *scheduler, struct vy_index *index)
+vy_scheduler_unpin_lsm(struct vy_scheduler *scheduler, struct vy_lsm *lsm)
 {
-	assert(!index->is_dumping);
-	assert(index->pin_count > 0);
-	if (--index->pin_count == 0)
-		vy_scheduler_update_index(scheduler, index);
+	assert(!lsm->is_dumping);
+	assert(lsm->pin_count > 0);
+	if (--lsm->pin_count == 0)
+		vy_scheduler_update_lsm(scheduler, lsm);
 }
 
 void
@@ -486,22 +484,21 @@ vy_scheduler_complete_dump(struct vy_scheduler *scheduler)
 	int64_t min_generation = scheduler->generation;
 	struct heap_node *pn = vy_dump_heap_top(&scheduler->dump_heap);
 	if (pn != NULL) {
-		struct vy_index *index;
-		index = container_of(pn, struct vy_index, in_dump);
-		min_generation = vy_index_generation(index);
+		struct vy_lsm *lsm = container_of(pn, struct vy_lsm, in_dump);
+		min_generation = vy_lsm_generation(lsm);
 	}
 	if (min_generation == scheduler->dump_generation) {
 		/*
-		 * There are still indexes that must be dumped
+		 * There are still LSM trees that must be dumped
 		 * during the current dump round.
 		 */
 		return;
 	}
 
 	/*
-	 * The oldest index data is newer than @dump_generation,
-	 * so the current dump round has been finished. Notify
-	 * about dump completion.
+	 * The oldest LSM tree data is newer than @dump_generation,
+	 * so the current dump round has been finished. Notify about
+	 * dump completion.
 	 */
 	double now = ev_monotonic_now(loop());
 	double dump_duration = now - scheduler->dump_start;
@@ -590,19 +587,19 @@ vy_scheduler_end_checkpoint(struct vy_scheduler *scheduler)
 }
 
 /**
- * Allocate a new run for an index and write the information
+ * Allocate a new run for an LSM tree and write the information
  * about it to the metadata log so that we could still find
  * and delete it in case a write error occured. This function
  * is called from dump/compaction task constructor.
  */
 static struct vy_run *
-vy_run_prepare(struct vy_run_env *run_env, struct vy_index *index)
+vy_run_prepare(struct vy_run_env *run_env, struct vy_lsm *lsm)
 {
 	struct vy_run *run = vy_run_new(run_env, vy_log_next_id());
 	if (run == NULL)
 		return NULL;
 	vy_log_tx_begin();
-	vy_log_prepare_run(index->id, run->id);
+	vy_log_prepare_run(lsm->id, run->id);
 	if (vy_log_tx_commit() < 0) {
 		vy_run_unref(run);
 		return NULL;
@@ -643,7 +640,7 @@ vy_run_discard(struct vy_run *run)
 static int
 vy_task_write_run(struct vy_scheduler *scheduler, struct vy_task *task)
 {
-	struct vy_index *index = task->index;
+	struct vy_lsm *lsm = task->lsm;
 	struct vy_stmt_stream *wi = task->wi;
 
 	ERROR_INJECT(ERRINJ_VY_RUN_WRITE,
@@ -655,8 +652,8 @@ vy_task_write_run(struct vy_scheduler *scheduler, struct vy_task *task)
 		usleep(inj->dparam * 1000000);
 
 	struct vy_run_writer writer;
-	if (vy_run_writer_create(&writer, task->new_run, index->env->path,
-				 index->space_id, index->index_id,
+	if (vy_run_writer_create(&writer, task->new_run, lsm->env->path,
+				 lsm->space_id, lsm->index_id,
 				 task->cmp_def, task->key_def,
 				 task->page_size, task->bloom_fpr,
 				 task->max_output_count) != 0)
@@ -705,34 +702,34 @@ vy_task_dump_execute(struct vy_scheduler *scheduler, struct vy_task *task)
 static int
 vy_task_dump_complete(struct vy_scheduler *scheduler, struct vy_task *task)
 {
-	struct vy_index *index = task->index;
+	struct vy_lsm *lsm = task->lsm;
 	struct vy_run *new_run = task->new_run;
 	int64_t dump_lsn = new_run->dump_lsn;
-	struct tuple_format *key_format = index->env->key_format;
+	struct tuple_format *key_format = lsm->env->key_format;
 	struct vy_mem *mem, *next_mem;
 	struct vy_slice **new_slices, *slice;
 	struct vy_range *range, *begin_range, *end_range;
 	struct tuple *min_key, *max_key;
 	int i, loops = 0;
 
-	assert(index->is_dumping);
+	assert(lsm->is_dumping);
 
 	if (vy_run_is_empty(new_run)) {
 		/*
 		 * In case the run is empty, we can discard the run
 		 * and delete dumped in-memory trees right away w/o
 		 * inserting slices into ranges. However, we need
-		 * to log index dump anyway.
+		 * to log LSM tree dump anyway.
 		 */
 		vy_log_tx_begin();
-		vy_log_dump_index(index->id, dump_lsn);
+		vy_log_dump_lsm(lsm->id, dump_lsn);
 		if (vy_log_tx_commit() < 0)
 			goto fail;
 		vy_run_discard(new_run);
 		goto delete_mems;
 	}
 
-	assert(new_run->info.min_lsn > index->dump_lsn);
+	assert(new_run->info.min_lsn > lsm->dump_lsn);
 	assert(new_run->info.max_lsn <= dump_lsn);
 
 	/*
@@ -750,28 +747,28 @@ vy_task_dump_complete(struct vy_scheduler *scheduler, struct vy_task *task)
 		tuple_unref(min_key);
 		goto fail;
 	}
-	begin_range = vy_range_tree_psearch(index->tree, min_key);
-	end_range = vy_range_tree_nsearch(index->tree, max_key);
+	begin_range = vy_range_tree_psearch(lsm->tree, min_key);
+	end_range = vy_range_tree_nsearch(lsm->tree, max_key);
 	tuple_unref(min_key);
 	tuple_unref(max_key);
 
 	/*
 	 * For each intersected range allocate a slice of the new run.
 	 */
-	new_slices = calloc(index->range_count, sizeof(*new_slices));
+	new_slices = calloc(lsm->range_count, sizeof(*new_slices));
 	if (new_slices == NULL) {
-		diag_set(OutOfMemory, index->range_count * sizeof(*new_slices),
+		diag_set(OutOfMemory, lsm->range_count * sizeof(*new_slices),
 			 "malloc", "struct vy_slice *");
 		goto fail;
 	}
 	for (range = begin_range, i = 0; range != end_range;
-	     range = vy_range_tree_next(index->tree, range), i++) {
+	     range = vy_range_tree_next(lsm->tree, range), i++) {
 		slice = vy_slice_new(vy_log_next_id(), new_run,
-				     range->begin, range->end, index->cmp_def);
+				     range->begin, range->end, lsm->cmp_def);
 		if (slice == NULL)
 			goto fail_free_slices;
 
-		assert(i < index->range_count);
+		assert(i < lsm->range_count);
 		new_slices[i] = slice;
 		/*
 		 * It's OK to yield here for the range tree can only
@@ -785,10 +782,10 @@ vy_task_dump_complete(struct vy_scheduler *scheduler, struct vy_task *task)
 	 * Log change in metadata.
 	 */
 	vy_log_tx_begin();
-	vy_log_create_run(index->id, new_run->id, dump_lsn);
+	vy_log_create_run(lsm->id, new_run->id, dump_lsn);
 	for (range = begin_range, i = 0; range != end_range;
-	     range = vy_range_tree_next(index->tree, range), i++) {
-		assert(i < index->range_count);
+	     range = vy_range_tree_next(lsm->tree, range), i++) {
+		assert(i < lsm->range_count);
 		slice = new_slices[i];
 		vy_log_insert_slice(range->id, new_run->id, slice->id,
 				    tuple_data_or_null(slice->begin),
@@ -797,15 +794,15 @@ vy_task_dump_complete(struct vy_scheduler *scheduler, struct vy_task *task)
 		if (++loops % VY_YIELD_LOOPS == 0)
 			fiber_sleep(0); /* see comment above */
 	}
-	vy_log_dump_index(index->id, dump_lsn);
+	vy_log_dump_lsm(lsm->id, dump_lsn);
 	if (vy_log_tx_commit() < 0)
 		goto fail_free_slices;
 
 	/*
 	 * Account the new run.
 	 */
-	vy_index_add_run(index, new_run);
-	vy_stmt_counter_add_disk(&index->stat.disk.dump.out, &new_run->count);
+	vy_lsm_add_run(lsm, new_run);
+	vy_stmt_counter_add_disk(&lsm->stat.disk.dump.out, &new_run->count);
 
 	/* Drop the reference held by the task. */
 	vy_run_unref(new_run);
@@ -814,23 +811,23 @@ vy_task_dump_complete(struct vy_scheduler *scheduler, struct vy_task *task)
 	 * Add new slices to ranges.
 	 */
 	for (range = begin_range, i = 0; range != end_range;
-	     range = vy_range_tree_next(index->tree, range), i++) {
-		assert(i < index->range_count);
+	     range = vy_range_tree_next(lsm->tree, range), i++) {
+		assert(i < lsm->range_count);
 		slice = new_slices[i];
-		vy_index_unacct_range(index, range);
+		vy_lsm_unacct_range(lsm, range);
 		vy_range_add_slice(range, slice);
-		vy_index_acct_range(index, range);
-		vy_range_update_compact_priority(range, &index->opts);
+		vy_lsm_acct_range(lsm, range);
+		vy_range_update_compact_priority(range, &lsm->opts);
 		if (!vy_range_is_scheduled(range))
-			vy_range_heap_update(&index->range_heap,
+			vy_range_heap_update(&lsm->range_heap,
 					     &range->heap_node);
 		range->version++;
 		/*
 		 * If we yield here, a concurrent fiber will see
 		 * a range with a run slice containing statements
-		 * present in the in-memory trees of the index.
+		 * present in the in-memory indexes of the LSM tree.
 		 * This is OK, because read iterator won't use the
-		 * new run slice until index->dump_lsn is bumped,
+		 * new run slice until lsm->dump_lsn is bumped,
 		 * which is only done after in-memory trees are
 		 * removed (see vy_read_iterator_add_disk()).
 		 */
@@ -843,34 +840,34 @@ delete_mems:
 	/*
 	 * Delete dumped in-memory trees.
 	 */
-	rlist_foreach_entry_safe(mem, &index->sealed, in_sealed, next_mem) {
+	rlist_foreach_entry_safe(mem, &lsm->sealed, in_sealed, next_mem) {
 		if (mem->generation > scheduler->dump_generation)
 			continue;
-		vy_stmt_counter_add(&index->stat.disk.dump.in, &mem->count);
-		vy_index_delete_mem(index, mem);
+		vy_stmt_counter_add(&lsm->stat.disk.dump.in, &mem->count);
+		vy_lsm_delete_mem(lsm, mem);
 	}
-	index->dump_lsn = dump_lsn;
-	index->stat.disk.dump.count++;
+	lsm->dump_lsn = dump_lsn;
+	lsm->stat.disk.dump.count++;
 
 	/* The iterator has been cleaned up in a worker thread. */
 	task->wi->iface->close(task->wi);
 
-	index->is_dumping = false;
-	vy_scheduler_update_index(scheduler, index);
+	lsm->is_dumping = false;
+	vy_scheduler_update_lsm(scheduler, lsm);
 
-	if (index->index_id != 0)
-		vy_scheduler_unpin_index(scheduler, index->pk);
+	if (lsm->index_id != 0)
+		vy_scheduler_unpin_lsm(scheduler, lsm->pk);
 
 	assert(scheduler->dump_task_count > 0);
 	scheduler->dump_task_count--;
 
-	say_info("%s: dump completed", vy_index_name(index));
+	say_info("%s: dump completed", vy_lsm_name(lsm));
 
 	vy_scheduler_complete_dump(scheduler);
 	return 0;
 
 fail_free_slices:
-	for (i = 0; i < index->range_count; i++) {
+	for (i = 0; i < lsm->range_count; i++) {
 		slice = new_slices[i];
 		if (slice != NULL)
 			vy_slice_delete(slice);
@@ -886,21 +883,21 @@ static void
 vy_task_dump_abort(struct vy_scheduler *scheduler, struct vy_task *task,
 		   bool in_shutdown)
 {
-	struct vy_index *index = task->index;
+	struct vy_lsm *lsm = task->lsm;
 
-	assert(index->is_dumping);
+	assert(lsm->is_dumping);
 
 	/* The iterator has been cleaned up in a worker thread. */
 	task->wi->iface->close(task->wi);
 
 	/*
 	 * It's no use alerting the user if the server is
-	 * shutting down or the index was dropped.
+	 * shutting down or the LSM tree was dropped.
 	 */
-	if (!in_shutdown && !index->is_dropped) {
+	if (!in_shutdown && !lsm->is_dropped) {
 		struct error *e = diag_last_error(&task->diag);
 		error_log(e);
-		say_error("%s: dump failed", vy_index_name(index));
+		say_error("%s: dump failed", vy_lsm_name(lsm));
 	}
 
 	/* The metadata log is unavailable on shutdown. */
@@ -909,36 +906,36 @@ vy_task_dump_abort(struct vy_scheduler *scheduler, struct vy_task *task,
 	else
 		vy_run_unref(task->new_run);
 
-	index->is_dumping = false;
-	vy_scheduler_update_index(scheduler, index);
+	lsm->is_dumping = false;
+	vy_scheduler_update_lsm(scheduler, lsm);
 
-	if (index->index_id != 0)
-		vy_scheduler_unpin_index(scheduler, index->pk);
+	if (lsm->index_id != 0)
+		vy_scheduler_unpin_lsm(scheduler, lsm->pk);
 
 	assert(scheduler->dump_task_count > 0);
 	scheduler->dump_task_count--;
 
 	/*
-	 * If the index was dropped during dump, we abort the
-	 * dump task, but we should still poke the scheduler
+	 * If the LSM tree was dropped during dump, we abort
+	 * the dump task, but we should still poke the scheduler
 	 * to check if the current dump round is complete.
-	 * If we don't and this index happens to be the last
+	 * If we don't and this LSM tree happens to be the last
 	 * one of the current generation, the scheduler will
 	 * never be notified about dump completion and hence
 	 * memory will never be released.
 	 */
-	if (index->is_dropped)
+	if (lsm->is_dropped)
 		vy_scheduler_complete_dump(scheduler);
 }
 
 /**
- * Create a task to dump an index.
+ * Create a task to dump an LSM tree.
  *
  * On success the task is supposed to dump all in-memory
  * trees created at @scheduler->dump_generation.
  */
 static int
-vy_task_dump_new(struct vy_scheduler *scheduler, struct vy_index *index,
+vy_task_dump_new(struct vy_scheduler *scheduler, struct vy_lsm *lsm,
 		 struct vy_task **p_task)
 {
 	static struct vy_task_ops dump_ops = {
@@ -947,21 +944,21 @@ vy_task_dump_new(struct vy_scheduler *scheduler, struct vy_index *index,
 		.abort = vy_task_dump_abort,
 	};
 
-	assert(!index->is_dropped);
-	assert(!index->is_dumping);
-	assert(index->pin_count == 0);
-	assert(vy_index_generation(index) == scheduler->dump_generation);
+	assert(!lsm->is_dropped);
+	assert(!lsm->is_dumping);
+	assert(lsm->pin_count == 0);
+	assert(vy_lsm_generation(lsm) == scheduler->dump_generation);
 	assert(scheduler->dump_generation < scheduler->generation);
 
 	struct errinj *inj = errinj(ERRINJ_VY_INDEX_DUMP, ERRINJ_INT);
-	if (inj != NULL && inj->iparam == (int)index->index_id) {
+	if (inj != NULL && inj->iparam == (int)lsm->index_id) {
 		diag_set(ClientError, ER_INJECTION, "vinyl index dump");
 		goto err;
 	}
 
 	/* Rotate the active tree if it needs to be dumped. */
-	if (index->mem->generation == scheduler->dump_generation &&
-	    vy_index_rotate_mem(index) != 0)
+	if (lsm->mem->generation == scheduler->dump_generation &&
+	    vy_lsm_rotate_mem(lsm) != 0)
 		goto err;
 
 	/*
@@ -971,7 +968,7 @@ vy_task_dump_new(struct vy_scheduler *scheduler, struct vy_index *index,
 	int64_t dump_lsn = -1;
 	size_t max_output_count = 0;
 	struct vy_mem *mem, *next_mem;
-	rlist_foreach_entry_safe(mem, &index->sealed, in_sealed, next_mem) {
+	rlist_foreach_entry_safe(mem, &lsm->sealed, in_sealed, next_mem) {
 		if (mem->generation > scheduler->dump_generation)
 			continue;
 		vy_mem_wait_pinned(mem);
@@ -980,7 +977,7 @@ vy_task_dump_new(struct vy_scheduler *scheduler, struct vy_index *index,
 			 * The tree is empty so we can delete it
 			 * right away, without involving a worker.
 			 */
-			vy_index_delete_mem(index, mem);
+			vy_lsm_delete_mem(lsm, mem);
 			continue;
 		}
 		dump_lsn = MAX(dump_lsn, mem->max_lsn);
@@ -988,18 +985,18 @@ vy_task_dump_new(struct vy_scheduler *scheduler, struct vy_index *index,
 	}
 
 	if (max_output_count == 0) {
-		/* Nothing to do, pick another index. */
-		vy_scheduler_update_index(scheduler, index);
+		/* Nothing to do, pick another LSM tree. */
+		vy_scheduler_update_lsm(scheduler, lsm);
 		vy_scheduler_complete_dump(scheduler);
 		return 0;
 	}
 
 	struct vy_task *task = vy_task_new(&scheduler->task_pool,
-					   index, &dump_ops);
+					   lsm, &dump_ops);
 	if (task == NULL)
 		goto err;
 
-	struct vy_run *new_run = vy_run_prepare(scheduler->run_env, index);
+	struct vy_run *new_run = vy_run_prepare(scheduler->run_env, lsm);
 	if (new_run == NULL)
 		goto err_run;
 
@@ -1007,13 +1004,13 @@ vy_task_dump_new(struct vy_scheduler *scheduler, struct vy_index *index,
 	new_run->dump_lsn = dump_lsn;
 
 	struct vy_stmt_stream *wi;
-	bool is_last_level = (index->run_count == 0);
-	wi = vy_write_iterator_new(task->cmp_def, index->disk_format,
-				   index->upsert_format, index->index_id == 0,
+	bool is_last_level = (lsm->run_count == 0);
+	wi = vy_write_iterator_new(task->cmp_def, lsm->disk_format,
+				   lsm->upsert_format, lsm->index_id == 0,
 				   is_last_level, scheduler->read_views);
 	if (wi == NULL)
 		goto err_wi;
-	rlist_foreach_entry(mem, &index->sealed, in_sealed) {
+	rlist_foreach_entry(mem, &lsm->sealed, in_sealed) {
 		if (mem->generation > scheduler->dump_generation)
 			continue;
 		if (vy_write_iterator_new_mem(wi, mem) != 0)
@@ -1023,27 +1020,27 @@ vy_task_dump_new(struct vy_scheduler *scheduler, struct vy_index *index,
 	task->new_run = new_run;
 	task->wi = wi;
 	task->max_output_count = max_output_count;
-	task->bloom_fpr = index->opts.bloom_fpr;
-	task->page_size = index->opts.page_size;
+	task->bloom_fpr = lsm->opts.bloom_fpr;
+	task->page_size = lsm->opts.page_size;
 
-	index->is_dumping = true;
-	vy_scheduler_update_index(scheduler, index);
+	lsm->is_dumping = true;
+	vy_scheduler_update_lsm(scheduler, lsm);
 
-	if (index->index_id != 0) {
+	if (lsm->index_id != 0) {
 		/*
-		 * The primary index must be dumped after all
-		 * secondary indexes of the same space - see
-		 * vy_dump_heap_less(). To make sure it isn't
-		 * picked by the scheduler while all secondary
-		 * indexes are being dumped, temporarily remove
+		 * The primary index LSM tree must be dumped after
+		 * all secondary index LSM trees of the same space,
+		 * see vy_dump_heap_less(). To make sure it isn't
+		 * picked by the scheduler while all secondary index
+		 * LSM trees are being dumped, temporarily remove
 		 * it from the dump heap.
 		 */
-		vy_scheduler_pin_index(scheduler, index->pk);
+		vy_scheduler_pin_lsm(scheduler, lsm->pk);
 	}
 
 	scheduler->dump_task_count++;
 
-	say_info("%s: dump started", vy_index_name(index));
+	say_info("%s: dump started", vy_lsm_name(lsm));
 	*p_task = task;
 	return 0;
 
@@ -1055,7 +1052,7 @@ err_run:
 	vy_task_delete(&scheduler->task_pool, task);
 err:
 	diag_log();
-	say_error("%s: could not start dump", vy_index_name(index));
+	say_error("%s: could not start dump", vy_lsm_name(lsm));
 	return -1;
 }
 
@@ -1068,7 +1065,7 @@ vy_task_compact_execute(struct vy_scheduler *scheduler, struct vy_task *task)
 static int
 vy_task_compact_complete(struct vy_scheduler *scheduler, struct vy_task *task)
 {
-	struct vy_index *index = task->index;
+	struct vy_lsm *lsm = task->lsm;
 	struct vy_range *range = task->range;
 	struct vy_run *new_run = task->new_run;
 	struct vy_slice *first_slice = task->first_slice;
@@ -1084,8 +1081,8 @@ vy_task_compact_complete(struct vy_scheduler *scheduler, struct vy_task *task)
 	 * compacted runs.
 	 */
 	if (!vy_run_is_empty(new_run)) {
-		new_slice = vy_slice_new(vy_log_next_id(), new_run, NULL, NULL,
-					 index->cmp_def);
+		new_slice = vy_slice_new(vy_log_next_id(), new_run,
+					 NULL, NULL, lsm->cmp_def);
 		if (new_slice == NULL)
 			return -1;
 	}
@@ -1122,8 +1119,7 @@ vy_task_compact_complete(struct vy_scheduler *scheduler, struct vy_task *task)
 	rlist_foreach_entry(run, &unused_runs, in_unused)
 		vy_log_drop_run(run->id, gc_lsn);
 	if (new_slice != NULL) {
-		vy_log_create_run(index->id, new_run->id,
-				  new_run->dump_lsn);
+		vy_log_create_run(lsm->id, new_run->id, new_run->dump_lsn);
 		vy_log_insert_slice(range->id, new_run->id, new_slice->id,
 				    tuple_data_or_null(new_slice->begin),
 				    tuple_data_or_null(new_slice->end));
@@ -1142,10 +1138,8 @@ vy_task_compact_complete(struct vy_scheduler *scheduler, struct vy_task *task)
 		 */
 		vy_log_tx_begin();
 		rlist_foreach_entry(run, &unused_runs, in_unused) {
-			if (vy_run_remove_files(index->env->path,
-						index->space_id,
-						index->index_id,
-						run->id) == 0) {
+			if (vy_run_remove_files(lsm->env->path, lsm->space_id,
+						lsm->index_id, run->id) == 0) {
 				vy_log_forget_run(run->id);
 			}
 		}
@@ -1157,8 +1151,8 @@ vy_task_compact_complete(struct vy_scheduler *scheduler, struct vy_task *task)
 	 * otherwise discard it.
 	 */
 	if (new_slice != NULL) {
-		vy_index_add_run(index, new_run);
-		vy_stmt_counter_add_disk(&index->stat.disk.compact.out,
+		vy_lsm_add_run(lsm, new_run);
+		vy_stmt_counter_add_disk(&lsm->stat.disk.compact.out,
 					 &new_run->count);
 		/* Drop the reference held by the task. */
 		vy_run_unref(new_run);
@@ -1174,29 +1168,29 @@ vy_task_compact_complete(struct vy_scheduler *scheduler, struct vy_task *task)
 	 * the compacted slices were.
 	 */
 	RLIST_HEAD(compacted_slices);
-	vy_index_unacct_range(index, range);
+	vy_lsm_unacct_range(lsm, range);
 	if (new_slice != NULL)
 		vy_range_add_slice_before(range, new_slice, first_slice);
 	for (slice = first_slice; ; slice = next_slice) {
 		next_slice = rlist_next_entry(slice, in_range);
 		vy_range_remove_slice(range, slice);
 		rlist_add_entry(&compacted_slices, slice, in_range);
-		vy_stmt_counter_add_disk(&index->stat.disk.compact.in,
+		vy_stmt_counter_add_disk(&lsm->stat.disk.compact.in,
 					 &slice->count);
 		if (slice == last_slice)
 			break;
 	}
 	range->n_compactions++;
 	range->version++;
-	vy_index_acct_range(index, range);
-	vy_range_update_compact_priority(range, &index->opts);
-	index->stat.disk.compact.count++;
+	vy_lsm_acct_range(lsm, range);
+	vy_range_update_compact_priority(range, &lsm->opts);
+	lsm->stat.disk.compact.count++;
 
 	/*
 	 * Unaccount unused runs and delete compacted slices.
 	 */
 	rlist_foreach_entry(run, &unused_runs, in_unused)
-		vy_index_remove_run(index, run);
+		vy_lsm_remove_run(lsm, run);
 	rlist_foreach_entry_safe(slice, &compacted_slices,
 				 in_range, next_slice) {
 		vy_slice_wait_pinned(slice);
@@ -1207,11 +1201,11 @@ vy_task_compact_complete(struct vy_scheduler *scheduler, struct vy_task *task)
 	task->wi->iface->close(task->wi);
 
 	assert(range->heap_node.pos == UINT32_MAX);
-	vy_range_heap_insert(&index->range_heap, &range->heap_node);
-	vy_scheduler_update_index(scheduler, index);
+	vy_range_heap_insert(&lsm->range_heap, &range->heap_node);
+	vy_scheduler_update_lsm(scheduler, lsm);
 
 	say_info("%s: completed compacting range %s",
-		 vy_index_name(index), vy_range_str(range));
+		 vy_lsm_name(lsm), vy_range_str(range));
 	return 0;
 }
 
@@ -1219,7 +1213,7 @@ static void
 vy_task_compact_abort(struct vy_scheduler *scheduler, struct vy_task *task,
 		      bool in_shutdown)
 {
-	struct vy_index *index = task->index;
+	struct vy_lsm *lsm = task->lsm;
 	struct vy_range *range = task->range;
 
 	/* The iterator has been cleaned up in worker. */
@@ -1227,13 +1221,13 @@ vy_task_compact_abort(struct vy_scheduler *scheduler, struct vy_task *task,
 
 	/*
 	 * It's no use alerting the user if the server is
-	 * shutting down or the index was dropped.
+	 * shutting down or the LSM tree was dropped.
 	 */
-	if (!in_shutdown && !index->is_dropped) {
+	if (!in_shutdown && !lsm->is_dropped) {
 		struct error *e = diag_last_error(&task->diag);
 		error_log(e);
 		say_error("%s: failed to compact range %s",
-			  vy_index_name(index), vy_range_str(range));
+			  vy_lsm_name(lsm), vy_range_str(range));
 	}
 
 	/* The metadata log is unavailable on shutdown. */
@@ -1243,12 +1237,12 @@ vy_task_compact_abort(struct vy_scheduler *scheduler, struct vy_task *task,
 		vy_run_unref(task->new_run);
 
 	assert(range->heap_node.pos == UINT32_MAX);
-	vy_range_heap_insert(&index->range_heap, &range->heap_node);
-	vy_scheduler_update_index(scheduler, index);
+	vy_range_heap_insert(&lsm->range_heap, &range->heap_node);
+	vy_scheduler_update_lsm(scheduler, lsm);
 }
 
 static int
-vy_task_compact_new(struct vy_scheduler *scheduler, struct vy_index *index,
+vy_task_compact_new(struct vy_scheduler *scheduler, struct vy_lsm *lsm,
 		    struct vy_task **p_task)
 {
 	static struct vy_task_ops compact_ops = {
@@ -1260,32 +1254,32 @@ vy_task_compact_new(struct vy_scheduler *scheduler, struct vy_index *index,
 	struct heap_node *range_node;
 	struct vy_range *range;
 
-	assert(!index->is_dropped);
+	assert(!lsm->is_dropped);
 
-	range_node = vy_range_heap_top(&index->range_heap);
+	range_node = vy_range_heap_top(&lsm->range_heap);
 	assert(range_node != NULL);
 	range = container_of(range_node, struct vy_range, heap_node);
 	assert(range->compact_priority > 1);
 
-	if (vy_index_split_range(index, range) ||
-	    vy_index_coalesce_range(index, range)) {
-		vy_scheduler_update_index(scheduler, index);
+	if (vy_lsm_split_range(lsm, range) ||
+	    vy_lsm_coalesce_range(lsm, range)) {
+		vy_scheduler_update_lsm(scheduler, lsm);
 		return 0;
 	}
 
 	struct vy_task *task = vy_task_new(&scheduler->task_pool,
-					   index, &compact_ops);
+					   lsm, &compact_ops);
 	if (task == NULL)
 		goto err_task;
 
-	struct vy_run *new_run = vy_run_prepare(scheduler->run_env, index);
+	struct vy_run *new_run = vy_run_prepare(scheduler->run_env, lsm);
 	if (new_run == NULL)
 		goto err_run;
 
 	struct vy_stmt_stream *wi;
 	bool is_last_level = (range->compact_priority == range->slice_count);
-	wi = vy_write_iterator_new(task->cmp_def, index->disk_format,
-				   index->upsert_format, index->index_id == 0,
+	wi = vy_write_iterator_new(task->cmp_def, lsm->disk_format,
+				   lsm->upsert_format, lsm->index_id == 0,
 				   is_last_level, scheduler->read_views);
 	if (wi == NULL)
 		goto err_wi;
@@ -1314,19 +1308,19 @@ vy_task_compact_new(struct vy_scheduler *scheduler, struct vy_index *index,
 	task->range = range;
 	task->new_run = new_run;
 	task->wi = wi;
-	task->bloom_fpr = index->opts.bloom_fpr;
-	task->page_size = index->opts.page_size;
+	task->bloom_fpr = lsm->opts.bloom_fpr;
+	task->page_size = lsm->opts.page_size;
 
 	/*
 	 * Remove the range we are going to compact from the heap
 	 * so that it doesn't get selected again.
 	 */
-	vy_range_heap_delete(&index->range_heap, range_node);
+	vy_range_heap_delete(&lsm->range_heap, range_node);
 	range_node->pos = UINT32_MAX;
-	vy_scheduler_update_index(scheduler, index);
+	vy_scheduler_update_lsm(scheduler, lsm);
 
 	say_info("%s: started compacting range %s, runs %d/%d",
-		 vy_index_name(index), vy_range_str(range),
+		 vy_lsm_name(lsm), vy_range_str(range),
                  range->compact_priority, range->slice_count);
 	*p_task = task;
 	return 0;
@@ -1340,17 +1334,17 @@ err_run:
 err_task:
 	diag_log();
 	say_error("%s: could not start compacting range %s: %s",
-		  vy_index_name(index), vy_range_str(range));
+		  vy_lsm_name(lsm), vy_range_str(range));
 	return -1;
 }
 
 /**
- * Create a task for dumping an index. The new task is returned
- * in @ptask. If there's no index that needs to be dumped @ptask
+ * Create a task for dumping an LSM tree. The new task is returned
+ * in @ptask. If there's no LSM tree that needs to be dumped @ptask
  * is set to NULL.
  *
- * We only dump an index if it needs to be snapshotted or the quota
- * on memory usage is exceeded. In either case, the oldest index
+ * We only dump an LSM tree if it needs to be snapshotted or the quota
+ * on memory usage is exceeded. In either case, the oldest LSM tree
  * is selected, because dumping it will free the maximal amount of
  * memory due to log structured design of the memory allocator.
  *
@@ -1370,38 +1364,38 @@ retry:
 		return 0;
 	}
 	/*
-	 * Look up the oldest index eligible for dump.
+	 * Look up the oldest LSM tree eligible for dump.
 	 */
 	struct heap_node *pn = vy_dump_heap_top(&scheduler->dump_heap);
 	if (pn == NULL) {
 		/*
-		 * There is no vinyl index and so no task to schedule.
+		 * There is no LSM tree and so no task to schedule.
 		 * Complete the current dump round.
 		 */
 		vy_scheduler_complete_dump(scheduler);
 		return 0;
 	}
-	struct vy_index *index = container_of(pn, struct vy_index, in_dump);
-	if (!index->is_dumping && index->pin_count == 0 &&
-	    vy_index_generation(index) == scheduler->dump_generation) {
+	struct vy_lsm *lsm = container_of(pn, struct vy_lsm, in_dump);
+	if (!lsm->is_dumping && lsm->pin_count == 0 &&
+	    vy_lsm_generation(lsm) == scheduler->dump_generation) {
 		/*
-		 * Dump is in progress and there is an index that
+		 * Dump is in progress and there is an LSM tree that
 		 * contains data that must be dumped at the current
 		 * round. Try to create a task for it.
 		 */
-		if (vy_task_dump_new(scheduler, index, ptask) != 0)
+		if (vy_task_dump_new(scheduler, lsm, ptask) != 0)
 			return -1;
 		if (*ptask != NULL)
 			return 0; /* new task */
 		/*
 		 * All in-memory trees eligible for dump were empty
 		 * and so were deleted without involving a worker
-		 * thread. Check another index.
+		 * thread. Check another LSM tree.
 		 */
 		goto retry;
 	}
 	/*
-	 * Dump is in progress, but all eligible indexes are
+	 * Dump is in progress, but all eligible LSM trees are
 	 * already being dumped. Wait until the current round
 	 * is complete.
 	 */
@@ -1430,13 +1424,13 @@ retry:
 	struct heap_node *pn = vy_compact_heap_top(&scheduler->compact_heap);
 	if (pn == NULL)
 		return 0; /* nothing to do */
-	struct vy_index *index = container_of(pn, struct vy_index, in_compact);
-	if (vy_index_compact_priority(index) <= 1)
+	struct vy_lsm *lsm = container_of(pn, struct vy_lsm, in_compact);
+	if (vy_lsm_compact_priority(lsm) <= 1)
 		return 0; /* nothing to do */
-	if (vy_task_compact_new(scheduler, index, ptask) != 0)
+	if (vy_task_compact_new(scheduler, lsm, ptask) != 0)
 		return -1;
 	if (*ptask == NULL)
-		goto retry; /* index dropped or range split/coalesced */
+		goto retry; /* LSM tree dropped or range split/coalesced */
 	return 0; /* new task */
 }
 
@@ -1480,7 +1474,7 @@ static int
 vy_scheduler_complete_task(struct vy_scheduler *scheduler,
 			   struct vy_task *task)
 {
-	if (task->index->is_dropped) {
+	if (task->lsm->is_dropped) {
 		if (task->ops->abort)
 			task->ops->abort(scheduler, task, false);
 		return 0;
diff --git a/src/box/vy_scheduler.h b/src/box/vy_scheduler.h
index 6724c01b..4db86152 100644
--- a/src/box/vy_scheduler.h
+++ b/src/box/vy_scheduler.h
@@ -50,7 +50,7 @@ extern "C" {
 
 struct cord;
 struct fiber;
-struct vy_index;
+struct vy_lsm;
 struct vy_run_env;
 struct vy_scheduler;
 
@@ -97,13 +97,13 @@ struct vy_scheduler {
 	 */
 	pthread_mutex_t mutex;
 	/**
-	 * Heap of indexes, ordered by dump priority,
-	 * linked by vy_index::in_dump.
+	 * Heap of LSM trees, ordered by dump priority,
+	 * linked by vy_lsm::in_dump.
 	 */
 	heap_t dump_heap;
 	/**
-	 * Heap of indexes, ordered by compaction priority,
-	 * linked by vy_index::in_compact.
+	 * Heap of LSM trees, ordered by compaction priority,
+	 * linked by vy_lsm::in_compact.
 	 */
 	heap_t compact_heap;
 	/** Last error seen by the scheduler. */
@@ -184,16 +184,16 @@ void
 vy_scheduler_destroy(struct vy_scheduler *scheduler);
 
 /**
- * Add an index to scheduler dump/compaction queues.
+ * Add an LSM tree to scheduler dump/compaction queues.
  */
 void
-vy_scheduler_add_index(struct vy_scheduler *, struct vy_index *);
+vy_scheduler_add_lsm(struct vy_scheduler *, struct vy_lsm *);
 
 /**
- * Remove an index from scheduler dump/compaction queues.
+ * Remove an LSM tree from scheduler dump/compaction queues.
  */
 void
-vy_scheduler_remove_index(struct vy_scheduler *, struct vy_index *);
+vy_scheduler_remove_lsm(struct vy_scheduler *, struct vy_lsm *);
 
 /**
  * Trigger dump of all currently existing in-memory trees.
diff --git a/src/box/vy_stat.h b/src/box/vy_stat.h
index 35442629..ca52c4d3 100644
--- a/src/box/vy_stat.h
+++ b/src/box/vy_stat.h
@@ -110,13 +110,13 @@ struct vy_compact_stat {
 	struct vy_stmt_counter out;
 };
 
-/** Vinyl index statistics. */
-struct vy_index_stat {
-	/** Number of lookups in the index. */
+/** LSM tree statistics. */
+struct vy_lsm_stat {
+	/** Number of lookups in the LSM tree. */
 	int64_t lookup;
-	/** Number of statements read from this index. */
+	/** Number of statements read from this LSM tree. */
 	struct vy_stmt_counter get;
-	/** Number of statements written to this index. */
+	/** Number of statements written to this LSM tree. */
 	struct vy_stmt_counter put;
 	/** Read latency. */
 	struct latency latency;
@@ -187,13 +187,13 @@ struct vy_tx_stat {
 };
 
 static inline int
-vy_index_stat_create(struct vy_index_stat *stat)
+vy_lsm_stat_create(struct vy_lsm_stat *stat)
 {
 	return latency_create(&stat->latency);
 }
 
 static inline void
-vy_index_stat_destroy(struct vy_index_stat *stat)
+vy_lsm_stat_destroy(struct vy_lsm_stat *stat)
 {
 	latency_destroy(&stat->latency);
 }
diff --git a/src/box/vy_tx.c b/src/box/vy_tx.c
index 1b583240..ed423ba3 100644
--- a/src/box/vy_tx.c
+++ b/src/box/vy_tx.c
@@ -50,7 +50,7 @@
 #include "trivia/util.h"
 #include "tuple.h"
 #include "vy_cache.h"
-#include "vy_index.h"
+#include "vy_lsm.h"
 #include "vy_mem.h"
 #include "vy_stat.h"
 #include "vy_stmt.h"
@@ -61,18 +61,18 @@
 int
 write_set_cmp(struct txv *a, struct txv *b)
 {
-	int rc = a->index < b->index ? -1 : a->index > b->index;
+	int rc = a->lsm < b->lsm ? -1 : a->lsm > b->lsm;
 	if (rc == 0)
-		return vy_tuple_compare(a->stmt, b->stmt, a->index->cmp_def);
+		return vy_tuple_compare(a->stmt, b->stmt, a->lsm->cmp_def);
 	return rc;
 }
 
 int
 write_set_key_cmp(struct write_set_key *a, struct txv *b)
 {
-	int rc = a->index < b->index ? -1 : a->index > b->index;
+	int rc = a->lsm < b->lsm ? -1 : a->lsm > b->lsm;
 	if (rc == 0)
-		return vy_stmt_compare(a->stmt, b->stmt, a->index->cmp_def);
+		return vy_stmt_compare(a->stmt, b->stmt, a->lsm->cmp_def);
 	return rc;
 }
 
@@ -204,7 +204,7 @@ tx_manager_vlsn(struct tx_manager *xm)
 }
 
 static struct txv *
-txv_new(struct vy_tx *tx, struct vy_index *index, struct tuple *stmt)
+txv_new(struct vy_tx *tx, struct vy_lsm *lsm, struct tuple *stmt)
 {
 	struct tx_manager *xm = tx->xm;
 	struct txv *v = mempool_alloc(&xm->txv_mempool);
@@ -212,8 +212,8 @@ txv_new(struct vy_tx *tx, struct vy_index *index, struct tuple *stmt)
 		diag_set(OutOfMemory, sizeof(*v), "mempool", "struct txv");
 		return NULL;
 	}
-	v->index = index;
-	vy_index_ref(v->index);
+	v->lsm = lsm;
+	vy_lsm_ref(v->lsm);
 	v->mem = NULL;
 	v->stmt = stmt;
 	tuple_ref(stmt);
@@ -232,12 +232,12 @@ txv_delete(struct txv *v)
 	struct tx_manager *xm = v->tx->xm;
 	xm->write_set_size -= tuple_size(v->stmt);
 	tuple_unref(v->stmt);
-	vy_index_unref(v->index);
+	vy_lsm_unref(v->lsm);
 	mempool_free(&xm->txv_mempool, v);
 }
 
 static struct vy_read_interval *
-vy_read_interval_new(struct vy_tx *tx, struct vy_index *index,
+vy_read_interval_new(struct vy_tx *tx, struct vy_lsm *lsm,
 		     struct tuple *left, bool left_belongs,
 		     struct tuple *right, bool right_belongs)
 {
@@ -250,8 +250,8 @@ vy_read_interval_new(struct vy_tx *tx, struct vy_index *index,
 		return NULL;
 	}
 	interval->tx = tx;
-	vy_index_ref(index);
-	interval->index = index;
+	vy_lsm_ref(lsm);
+	interval->lsm = lsm;
 	tuple_ref(left);
 	interval->left = left;
 	interval->left_belongs = left_belongs;
@@ -272,7 +272,7 @@ vy_read_interval_delete(struct vy_read_interval *interval)
 	xm->read_set_size -= tuple_size(interval->left);
 	if (interval->left != interval->right)
 		xm->read_set_size -= tuple_size(interval->right);
-	vy_index_unref(interval->index);
+	vy_lsm_unref(interval->lsm);
 	tuple_unref(interval->left);
 	tuple_unref(interval->right);
 	mempool_free(&xm->read_interval_mempool, interval);
@@ -284,7 +284,7 @@ vy_tx_read_set_free_cb(vy_tx_read_set_t *read_set,
 {
 	(void)arg;
 	(void)read_set;
-	vy_index_read_set_remove(&interval->index->read_set, interval);
+	vy_lsm_read_set_remove(&interval->lsm->read_set, interval);
 	vy_read_interval_delete(interval);
 	return NULL;
 }
@@ -314,7 +314,7 @@ vy_tx_destroy(struct vy_tx *tx)
 
 	struct txv *v, *tmp;
 	stailq_foreach_entry_safe(v, tmp, &tx->log, next_in_log) {
-		vy_stmt_counter_unacct_tuple(&v->index->stat.txw.count,
+		vy_stmt_counter_unacct_tuple(&v->lsm->stat.txw.count,
 					     v->stmt);
 		txv_delete(v);
 	}
@@ -344,7 +344,7 @@ static int
 vy_tx_send_to_read_view(struct vy_tx *tx, struct txv *v)
 {
 	struct vy_tx_conflict_iterator it;
-	vy_tx_conflict_iterator_init(&it, &v->index->read_set, v->stmt);
+	vy_tx_conflict_iterator_init(&it, &v->lsm->read_set, v->stmt);
 	struct vy_tx *abort;
 	while ((abort = vy_tx_conflict_iterator_next(&it)) != NULL) {
 		/* Don't abort self. */
@@ -372,7 +372,7 @@ static void
 vy_tx_abort_readers(struct vy_tx *tx, struct txv *v)
 {
 	struct vy_tx_conflict_iterator it;
-	vy_tx_conflict_iterator_init(&it, &v->index->read_set, v->stmt);
+	vy_tx_conflict_iterator_init(&it, &v->lsm->read_set, v->stmt);
 	struct vy_tx *abort;
 	while ((abort = vy_tx_conflict_iterator_next(&it)) != NULL) {
 		/* Don't abort self. */
@@ -404,7 +404,7 @@ vy_tx_begin(struct tx_manager *xm)
 static int
 vy_tx_write_prepare(struct txv *v)
 {
-	struct vy_index *index = v->index;
+	struct vy_lsm *lsm = v->lsm;
 
 	/*
 	 * Allocate a new in-memory tree if either of the following
@@ -418,21 +418,21 @@ vy_tx_write_prepare(struct txv *v)
 	 *   We have to seal the tree, because we don't support mixing
 	 *   statements of different formats in the same tree.
 	 */
-	if (unlikely(index->mem->schema_version != schema_version ||
-		     index->mem->generation != *index->env->p_generation)) {
-		if (vy_index_rotate_mem(index) != 0)
+	if (unlikely(lsm->mem->schema_version != schema_version ||
+		     lsm->mem->generation != *lsm->env->p_generation)) {
+		if (vy_lsm_rotate_mem(lsm) != 0)
 			return -1;
 	}
-	vy_mem_pin(index->mem);
-	v->mem = index->mem;
+	vy_mem_pin(lsm->mem);
+	v->mem = lsm->mem;
 	return 0;
 }
 
 /**
- * Write a single statement into an index. If the statement has
+ * Write a single statement into an LSM tree. If the statement has
  * an lsregion copy then use it, else create it.
  *
- * @param index       Index to write to.
+ * @param lsm         LSM tree to write to.
  * @param mem         In-memory tree to write to.
  * @param stmt        Statement allocated with malloc().
  * @param region_stmt NULL or the same statement as stmt,
@@ -442,7 +442,7 @@ vy_tx_write_prepare(struct txv *v)
  * @retval -1 Memory error.
  */
 static int
-vy_tx_write(struct vy_index *index, struct vy_mem *mem,
+vy_tx_write(struct vy_lsm *lsm, struct vy_mem *mem,
 	    struct tuple *stmt, const struct tuple **region_stmt)
 {
 	assert(vy_stmt_is_refable(stmt));
@@ -458,7 +458,7 @@ vy_tx_write(struct vy_index *index, struct vy_mem *mem,
 	if (vy_stmt_type(stmt) == IPROTO_UPSERT) {
 		struct tuple *deleted = NULL;
 		/* Invalidate cache element. */
-		vy_cache_on_write(&index->cache, stmt, &deleted);
+		vy_cache_on_write(&lsm->cache, stmt, &deleted);
 		if (deleted != NULL) {
 			struct tuple *applied =
 				vy_apply_upsert(stmt, deleted, mem->cmp_def,
@@ -467,8 +467,8 @@ vy_tx_write(struct vy_index *index, struct vy_mem *mem,
 			tuple_unref(deleted);
 			if (applied != NULL) {
 				assert(vy_stmt_type(applied) == IPROTO_REPLACE);
-				int rc = vy_index_set(index, mem, applied,
-						      region_stmt);
+				int rc = vy_lsm_set(lsm, mem, applied,
+						    region_stmt);
 				tuple_unref(applied);
 				return rc;
 			}
@@ -479,9 +479,9 @@ vy_tx_write(struct vy_index *index, struct vy_mem *mem,
 		}
 	} else {
 		/* Invalidate cache element. */
-		vy_cache_on_write(&index->cache, stmt, NULL);
+		vy_cache_on_write(&lsm->cache, stmt, NULL);
 	}
-	return vy_index_set(index, mem, stmt, region_stmt);
+	return vy_lsm_set(lsm, mem, stmt, region_stmt);
 }
 
 int
@@ -517,21 +517,21 @@ vy_tx_prepare(struct vy_tx *tx)
 	}
 
 	/*
-	 * Flush transactional changes to the index.
+	 * Flush transactional changes to the LSM tree.
 	 * Sic: the loop below must not yield after recovery.
 	 */
 	/* repsert - REPLACE/UPSERT */
 	const struct tuple *delete = NULL, *repsert = NULL;
 	MAYBE_UNUSED uint32_t current_space_id = 0;
 	stailq_foreach_entry(v, &tx->log, next_in_log) {
-		struct vy_index *index = v->index;
-		if (index->index_id == 0) {
+		struct vy_lsm *lsm = v->lsm;
+		if (lsm->index_id == 0) {
 			/* The beginning of the new txn_stmt is met. */
-			current_space_id = index->space_id;
+			current_space_id = lsm->space_id;
 			repsert = NULL;
 			delete = NULL;
 		}
-		assert(index->space_id == current_space_id);
+		assert(lsm->space_id == current_space_id);
 
 		/* Do not save statements that was overwritten by the same tx */
 		if (v->is_overwritten)
@@ -570,7 +570,7 @@ vy_tx_prepare(struct vy_tx *tx)
 		vy_stmt_set_lsn(v->stmt, MAX_LSN + tx->psn);
 		const struct tuple **region_stmt =
 			(type == IPROTO_DELETE) ? &delete : &repsert;
-		if (vy_tx_write(index, v->mem, v->stmt, region_stmt) != 0)
+		if (vy_tx_write(lsm, v->mem, v->stmt, region_stmt) != 0)
 			return -1;
 		v->region_stmt = *region_stmt;
 	}
@@ -600,7 +600,7 @@ vy_tx_commit(struct vy_tx *tx, int64_t lsn)
 	stailq_foreach_entry(v, &tx->log, next_in_log) {
 		if (v->region_stmt != NULL) {
 			vy_stmt_set_lsn((struct tuple *)v->region_stmt, lsn);
-			vy_index_commit_stmt(v->index, v->mem, v->region_stmt);
+			vy_lsm_commit_stmt(v->lsm, v->mem, v->region_stmt);
 		}
 		if (v->mem != NULL)
 			vy_mem_unpin(v->mem);
@@ -645,8 +645,8 @@ vy_tx_rollback_after_prepare(struct vy_tx *tx)
 	struct txv *v;
 	stailq_foreach_entry(v, &tx->log, next_in_log) {
 		if (v->region_stmt != NULL)
-			vy_index_rollback_stmt(v->index, v->mem,
-					       v->region_stmt);
+			vy_lsm_rollback_stmt(v->lsm, v->mem,
+					     v->region_stmt);
 		if (v->mem != NULL)
 			vy_mem_unpin(v->mem);
 	}
@@ -699,7 +699,7 @@ vy_tx_rollback_to_savepoint(struct vy_tx *tx, void *svp)
 }
 
 int
-vy_tx_track(struct vy_tx *tx, struct vy_index *index,
+vy_tx_track(struct vy_tx *tx, struct vy_lsm *lsm,
 	    struct tuple *left, bool left_belongs,
 	    struct tuple *right, bool right_belongs)
 {
@@ -709,7 +709,7 @@ vy_tx_track(struct vy_tx *tx, struct vy_index *index,
 	}
 
 	struct vy_read_interval *new_interval;
-	new_interval = vy_read_interval_new(tx, index, left, left_belongs,
+	new_interval = vy_read_interval_new(tx, lsm, left, left_belongs,
 					    right, right_belongs);
 	if (new_interval == NULL)
 		return -1;
@@ -725,7 +725,7 @@ vy_tx_track(struct vy_tx *tx, struct vy_index *index,
 
 	struct vy_read_interval *interval;
 	interval = vy_tx_read_set_inext(&it);
-	if (interval != NULL && interval->index == index) {
+	if (interval != NULL && interval->lsm == lsm) {
 		if (vy_read_interval_cmpr(interval, new_interval) >= 0) {
 			/*
 			 * There is an interval in the tree spanning
@@ -742,13 +742,13 @@ vy_tx_track(struct vy_tx *tx, struct vy_index *index,
 		vy_tx_read_set_isearch_gt(&tx->read_set, new_interval, &it);
 
 	while ((interval = vy_tx_read_set_inext(&it)) != NULL &&
-	       interval->index == index &&
+	       interval->lsm == lsm &&
 	       vy_read_interval_should_merge(new_interval, interval))
 		stailq_add_tail_entry(&merge, interval, in_merge);
 
 	/*
 	 * Merge intersecting intervals with the new interval and
-	 * remove them from the transaction and index read sets.
+	 * remove them from the transaction and LSM tree read sets.
 	 */
 	if (!stailq_empty(&merge)) {
 		interval = stailq_first_entry(&merge, struct vy_read_interval,
@@ -771,38 +771,37 @@ vy_tx_track(struct vy_tx *tx, struct vy_index *index,
 		stailq_foreach_entry_safe(interval, next_interval, &merge,
 					  in_merge) {
 			vy_tx_read_set_remove(&tx->read_set, interval);
-			vy_index_read_set_remove(&index->read_set, interval);
+			vy_lsm_read_set_remove(&lsm->read_set, interval);
 			vy_read_interval_delete(interval);
 		}
 	}
 
 	vy_tx_read_set_insert(&tx->read_set, new_interval);
-	vy_index_read_set_insert(&index->read_set, new_interval);
+	vy_lsm_read_set_insert(&lsm->read_set, new_interval);
 	return 0;
 }
 
 int
-vy_tx_track_point(struct vy_tx *tx, struct vy_index *index,
-		  struct tuple *stmt)
+vy_tx_track_point(struct vy_tx *tx, struct vy_lsm *lsm, struct tuple *stmt)
 {
-	assert(tuple_field_count(stmt) >= index->cmp_def->part_count);
+	assert(tuple_field_count(stmt) >= lsm->cmp_def->part_count);
 
 	if (vy_tx_is_in_read_view(tx)) {
 		/* No point in tracking reads. */
 		return 0;
 	}
 
-	struct txv *v = write_set_search_key(&tx->write_set, index, stmt);
+	struct txv *v = write_set_search_key(&tx->write_set, lsm, stmt);
 	if (v != NULL && vy_stmt_type(v->stmt) != IPROTO_UPSERT) {
 		/* Reading from own write set is serializable. */
 		return 0;
 	}
 
-	return vy_tx_track(tx, index, stmt, true, stmt, true);
+	return vy_tx_track(tx, lsm, stmt, true, stmt, true);
 }
 
 int
-vy_tx_set(struct vy_tx *tx, struct vy_index *index, struct tuple *stmt)
+vy_tx_set(struct vy_tx *tx, struct vy_lsm *lsm, struct tuple *stmt)
 {
 	assert(vy_stmt_type(stmt) != 0);
 	/**
@@ -812,11 +811,10 @@ vy_tx_set(struct vy_tx *tx, struct vy_index *index, struct tuple *stmt)
 	vy_stmt_set_lsn(stmt, INT64_MAX);
 	struct tuple *applied = NULL;
 
-	/* Update concurrent index */
-	struct txv *old = write_set_search_key(&tx->write_set, index, stmt);
+	struct txv *old = write_set_search_key(&tx->write_set, lsm, stmt);
 	/* Found a match of the previous action of this transaction */
 	if (old != NULL && vy_stmt_type(stmt) == IPROTO_UPSERT) {
-		assert(index->index_id == 0);
+		assert(lsm->index_id == 0);
 		uint8_t old_type = vy_stmt_type(old->stmt);
 		assert(old_type == IPROTO_UPSERT ||
 		       old_type == IPROTO_INSERT ||
@@ -824,19 +822,19 @@ vy_tx_set(struct vy_tx *tx, struct vy_index *index, struct tuple *stmt)
 		       old_type == IPROTO_DELETE);
 		(void) old_type;
 
-		applied = vy_apply_upsert(stmt, old->stmt, index->cmp_def,
-					  index->mem_format,
-					  index->upsert_format, true);
-		index->stat.upsert.applied++;
+		applied = vy_apply_upsert(stmt, old->stmt, lsm->cmp_def,
+					  lsm->mem_format,
+					  lsm->upsert_format, true);
+		lsm->stat.upsert.applied++;
 		if (applied == NULL)
 			return -1;
 		stmt = applied;
 		assert(vy_stmt_type(stmt) != 0);
-		index->stat.upsert.squashed++;
+		lsm->stat.upsert.squashed++;
 	}
 
 	/* Allocate a MVCC container. */
-	struct txv *v = txv_new(tx, index, stmt);
+	struct txv *v = txv_new(tx, lsm, stmt);
 	if (applied != NULL)
 		tuple_unref(applied);
 	if (v == NULL)
@@ -869,7 +867,7 @@ vy_tx_set(struct vy_tx *tx, struct vy_index *index, struct tuple *stmt)
 	write_set_insert(&tx->write_set, v);
 	tx->write_set_version++;
 	tx->write_size += tuple_size(stmt);
-	vy_stmt_counter_acct_tuple(&index->stat.txw.count, stmt);
+	vy_stmt_counter_acct_tuple(&lsm->stat.txw.count, stmt);
 	stailq_add_tail_entry(&tx->log, v, next_in_log);
 	return 0;
 }
@@ -877,13 +875,13 @@ vy_tx_set(struct vy_tx *tx, struct vy_index *index, struct tuple *stmt)
 void
 vy_txw_iterator_open(struct vy_txw_iterator *itr,
 		     struct vy_txw_iterator_stat *stat,
-		     struct vy_tx *tx, struct vy_index *index,
+		     struct vy_tx *tx, struct vy_lsm *lsm,
 		     enum iterator_type iterator_type,
 		     const struct tuple *key)
 {
 	itr->stat = stat;
 	itr->tx = tx;
-	itr->index = index;
+	itr->lsm = lsm;
 	itr->iterator_type = iterator_type;
 	itr->key = key;
 	itr->version = UINT32_MAX;
@@ -904,8 +902,8 @@ vy_txw_iterator_seek(struct vy_txw_iterator *itr,
 	itr->stat->lookup++;
 	itr->version = itr->tx->write_set_version;
 	itr->curr_txv = NULL;
-	struct vy_index *index = itr->index;
-	struct write_set_key k = { index, key };
+	struct vy_lsm *lsm = itr->lsm;
+	struct write_set_key k = { lsm, key };
 	struct txv *txv;
 	if (tuple_field_count(key) > 0) {
 		if (iterator_type == ITER_EQ)
@@ -914,9 +912,9 @@ vy_txw_iterator_seek(struct vy_txw_iterator *itr,
 			txv = write_set_nsearch(&itr->tx->write_set, &k);
 		else
 			txv = write_set_psearch(&itr->tx->write_set, &k);
-		if (txv == NULL || txv->index != index)
+		if (txv == NULL || txv->lsm != lsm)
 			return;
-		if (vy_stmt_compare(key, txv->stmt, index->cmp_def) == 0) {
+		if (vy_stmt_compare(key, txv->stmt, lsm->cmp_def) == 0) {
 			while (true) {
 				struct txv *next;
 				if (iterator_type == ITER_LE ||
@@ -924,10 +922,10 @@ vy_txw_iterator_seek(struct vy_txw_iterator *itr,
 					next = write_set_next(&itr->tx->write_set, txv);
 				else
 					next = write_set_prev(&itr->tx->write_set, txv);
-				if (next == NULL || next->index != index)
+				if (next == NULL || next->lsm != lsm)
 					break;
 				if (vy_stmt_compare(key, next->stmt,
-						    index->cmp_def) != 0)
+						    lsm->cmp_def) != 0)
 					break;
 				txv = next;
 			}
@@ -942,7 +940,7 @@ vy_txw_iterator_seek(struct vy_txw_iterator *itr,
 		assert(iterator_type == ITER_GE);
 		txv = write_set_psearch(&itr->tx->write_set, &k);
 	}
-	if (txv == NULL || txv->index != index)
+	if (txv == NULL || txv->lsm != lsm)
 		return;
 	itr->curr_txv = txv;
 }
@@ -963,11 +961,11 @@ vy_txw_iterator_next(struct vy_txw_iterator *itr, struct tuple **ret)
 		itr->curr_txv = write_set_prev(&itr->tx->write_set, itr->curr_txv);
 	else
 		itr->curr_txv = write_set_next(&itr->tx->write_set, itr->curr_txv);
-	if (itr->curr_txv != NULL && itr->curr_txv->index != itr->index)
+	if (itr->curr_txv != NULL && itr->curr_txv->lsm != itr->lsm)
 		itr->curr_txv = NULL;
 	if (itr->curr_txv != NULL && itr->iterator_type == ITER_EQ &&
 	    vy_stmt_compare(itr->key, itr->curr_txv->stmt,
-			    itr->index->cmp_def) != 0)
+			    itr->lsm->cmp_def) != 0)
 		itr->curr_txv = NULL;
 out:
 	if (itr->curr_txv != NULL) {
@@ -992,7 +990,7 @@ vy_txw_iterator_skip(struct vy_txw_iterator *itr,
 	    (itr->curr_txv == NULL || last_stmt == NULL ||
 	     iterator_direction(itr->iterator_type) *
 	     vy_tuple_compare(itr->curr_txv->stmt, last_stmt,
-			      itr->index->cmp_def) > 0)) {
+			      itr->lsm->cmp_def) > 0)) {
 		if (itr->curr_txv != NULL)
 			*ret = itr->curr_txv->stmt;
 		return;
@@ -1011,7 +1009,7 @@ vy_txw_iterator_skip(struct vy_txw_iterator *itr,
 
 	if (itr->iterator_type == ITER_EQ && last_stmt != NULL &&
 	    itr->curr_txv != NULL && vy_stmt_compare(itr->key,
-			itr->curr_txv->stmt, itr->index->cmp_def) != 0)
+			itr->curr_txv->stmt, itr->lsm->cmp_def) != 0)
 		itr->curr_txv = NULL;
 
 	if (itr->curr_txv != NULL) {
@@ -1040,7 +1038,7 @@ vy_txw_iterator_restore(struct vy_txw_iterator *itr,
 
 	if (itr->iterator_type == ITER_EQ && itr->curr_txv != NULL &&
 	    vy_stmt_compare(itr->key, itr->curr_txv->stmt,
-			    itr->index->cmp_def) != 0)
+			    itr->lsm->cmp_def) != 0)
 		itr->curr_txv = NULL;
 
 	if (prev_txv == itr->curr_txv)
diff --git a/src/box/vy_tx.h b/src/box/vy_tx.h
index 33238ed7..a59145be 100644
--- a/src/box/vy_tx.h
+++ b/src/box/vy_tx.h
@@ -42,7 +42,7 @@
 #include "iterator_type.h"
 #include "salad/stailq.h"
 #include "trivia/util.h"
-#include "vy_index.h"
+#include "vy_lsm.h"
 #include "vy_stat.h"
 #include "vy_read_set.h"
 #include "vy_read_view.h"
@@ -76,8 +76,8 @@ enum tx_state {
 struct txv {
 	/** Transaction. */
 	struct vy_tx *tx;
-	/** Index this operation is for. */
-	struct vy_index *index;
+	/** LSM tree this operation is for. */
+	struct vy_lsm *lsm;
 	/** In-memory tree to insert the statement into. */
 	struct vy_mem *mem;
 	/** Statement of this operation. */
@@ -106,10 +106,10 @@ struct txv {
 
 /**
  * Index of all modifications made by a transaction.
- * Ordered by index, then by key in the index.
+ * Ordered by LSM tree, then by key.
  */
 struct write_set_key {
-	struct vy_index *index;
+	struct vy_lsm *lsm;
 	const struct tuple *stmt;
 };
 
@@ -123,10 +123,10 @@ rb_gen_ext_key(MAYBE_UNUSED static inline, write_set_, write_set_t, struct txv,
 		in_set, write_set_cmp, struct write_set_key *, write_set_key_cmp);
 
 static inline struct txv *
-write_set_search_key(write_set_t *tree, struct vy_index *index,
+write_set_search_key(write_set_t *tree, struct vy_lsm *lsm,
 		     const struct tuple *stmt)
 {
-	struct write_set_key key = { .index = index, .stmt = stmt };
+	struct write_set_key key = { .lsm = lsm, .stmt = stmt };
 	return write_set_search(tree, &key);
 }
 
@@ -141,7 +141,7 @@ struct vy_tx {
 	struct stailq log;
 	/**
 	 * Writes of the transaction segregated by the changed
-	 * vy_index object.
+	 * vy_lsm object.
 	 */
 	write_set_t write_set;
 	/**
@@ -326,7 +326,7 @@ vy_tx_rollback_to_savepoint(struct vy_tx *tx, void *svp);
  * to will be aborted.
  *
  * @param tx            Transaction that invoked the read.
- * @param index         Index that was read from.
+ * @param lsm           LSM tree that was read from.
  * @param left          Left boundary of the read interval.
  * @param left_belongs  Set if the left boundary belongs to
  *                      the interval.
@@ -338,7 +338,7 @@ vy_tx_rollback_to_savepoint(struct vy_tx *tx, void *svp);
  * @retval -1 Memory error.
  */
 int
-vy_tx_track(struct vy_tx *tx, struct vy_index *index,
+vy_tx_track(struct vy_tx *tx, struct vy_lsm *lsm,
 	    struct tuple *left, bool left_belongs,
 	    struct tuple *right, bool right_belongs);
 
@@ -346,7 +346,7 @@ vy_tx_track(struct vy_tx *tx, struct vy_index *index,
  * Remember a point read in the conflict manager index.
  *
  * @param tx    Transaction that invoked the read.
- * @param index Index that was read from.
+ * @param lsm   LSM tree that was read from.
  * @param stmt  Key that was read.
  *
  * @retval  0 Success.
@@ -359,12 +359,11 @@ vy_tx_track(struct vy_tx *tx, struct vy_index *index,
  * transaction read it from its own write set.
  */
 int
-vy_tx_track_point(struct vy_tx *tx, struct vy_index *index,
-		  struct tuple *stmt);
+vy_tx_track_point(struct vy_tx *tx, struct vy_lsm *lsm, struct tuple *stmt);
 
 /** Add a statement to a transaction. */
 int
-vy_tx_set(struct vy_tx *tx, struct vy_index *index, struct tuple *stmt);
+vy_tx_set(struct vy_tx *tx, struct vy_lsm *lsm, struct tuple *stmt);
 
 /**
  * Iterator over the write set of a transaction.
@@ -374,8 +373,8 @@ struct vy_txw_iterator {
 	struct vy_txw_iterator_stat *stat;
 	/** Transaction whose write set is iterated. */
 	struct vy_tx *tx;
-	/** Index of interest. */
-	struct vy_index *index;
+	/** LSM tree of interest. */
+	struct vy_lsm *lsm;
 	/**
 	 * Iterator type.
 	 *
@@ -399,7 +398,7 @@ struct vy_txw_iterator {
 void
 vy_txw_iterator_open(struct vy_txw_iterator *itr,
 		     struct vy_txw_iterator_stat *stat,
-		     struct vy_tx *tx, struct vy_index *index,
+		     struct vy_tx *tx, struct vy_lsm *lsm,
 		     enum iterator_type iterator_type,
 		     const struct tuple *key);
 
diff --git a/test/unit/CMakeLists.txt b/test/unit/CMakeLists.txt
index 943788b9..7e1c95ed 100644
--- a/test/unit/CMakeLists.txt
+++ b/test/unit/CMakeLists.txt
@@ -160,7 +160,7 @@ add_executable(vy_point_lookup.test
     ${PROJECT_SOURCE_DIR}/src/box/vy_tx.c
     ${PROJECT_SOURCE_DIR}/src/box/vy_read_set.c
     ${PROJECT_SOURCE_DIR}/src/box/vy_upsert.c
-    ${PROJECT_SOURCE_DIR}/src/box/vy_index.c
+    ${PROJECT_SOURCE_DIR}/src/box/vy_lsm.c
     ${PROJECT_SOURCE_DIR}/src/box/vy_cache.c
     ${PROJECT_SOURCE_DIR}/src/box/index_def.c
     ${PROJECT_SOURCE_DIR}/src/box/schema_def.c
diff --git a/test/unit/vy_log_stub.c b/test/unit/vy_log_stub.c
index 7cfaff84..d4668408 100644
--- a/test/unit/vy_log_stub.c
+++ b/test/unit/vy_log_stub.c
@@ -51,9 +51,9 @@ vy_log_tx_commit(void)
 void
 vy_log_write(const struct vy_log_record *record) {}
 
-struct vy_index_recovery_info *
-vy_recovery_index_by_id(struct vy_recovery *recovery,
-			uint32_t space_id, uint32_t index_id)
+struct vy_lsm_recovery_info *
+vy_recovery_lsm_by_index_id(struct vy_recovery *recovery,
+			    uint32_t space_id, uint32_t index_id)
 {
 	unreachable();
 }
diff --git a/test/unit/vy_point_lookup.c b/test/unit/vy_point_lookup.c
index 30a61040..ec9b214e 100644
--- a/test/unit/vy_point_lookup.c
+++ b/test/unit/vy_point_lookup.c
@@ -1,6 +1,6 @@
 #include "trivia/util.h"
 #include "unit.h"
-#include "vy_index.h"
+#include "vy_lsm.h"
 #include "vy_cache.h"
 #include "vy_run.h"
 #include "fiber.h"
@@ -15,12 +15,12 @@ uint32_t schema_version;
 
 static int
 write_run(struct vy_run *run, const char *dir_name,
-	  struct vy_index *index, struct vy_stmt_stream *wi)
+	  struct vy_lsm *lsm, struct vy_stmt_stream *wi)
 {
 	struct vy_run_writer writer;
 	if (vy_run_writer_create(&writer, run, dir_name,
-				 index->space_id, index->index_id,
-				 index->cmp_def, index->key_def,
+				 lsm->space_id, lsm->index_id,
+				 lsm->cmp_def, lsm->key_def,
 				 4096, 0.1, 100500) != 0)
 		goto fail;
 
@@ -62,10 +62,9 @@ test_basic()
 	struct slab_cache *slab_cache = cord_slab_cache();
 
 	int rc;
-	struct vy_index_env index_env;
-	rc = vy_index_env_create(&index_env, ".", &generation,
-				 NULL, NULL);
-	is(rc, 0, "vy_index_env_create");
+	struct vy_lsm_env lsm_env;
+	rc = vy_lsm_env_create(&lsm_env, ".", &generation, NULL, NULL);
+	is(rc, 0, "vy_lsm_env_create");
 
 	struct vy_run_env run_env;
 	vy_run_env_create(&run_env);
@@ -92,14 +91,14 @@ test_basic()
 		index_def_new(512, 0, "primary", sizeof("primary") - 1, TREE,
 			      &index_opts, key_def, NULL);
 
-	struct vy_index *pk = vy_index_new(&index_env, &cache_env, &mem_env,
-					   index_def, format, NULL);
-	isnt(pk, NULL, "index is not NULL")
+	struct vy_lsm *pk = vy_lsm_new(&lsm_env, &cache_env, &mem_env,
+				       index_def, format, NULL);
+	isnt(pk, NULL, "lsm is not NULL")
 
 	struct vy_range *range = vy_range_new(1, NULL, NULL, pk->cmp_def);
 
 	isnt(pk, NULL, "range is not NULL")
-	vy_index_add_range(pk, range);
+	vy_lsm_add_range(pk, range);
 
 	struct rlist read_views = RLIST_HEAD_INITIALIZER(read_views);
 
@@ -115,7 +114,7 @@ test_basic()
 	rc = mkdir(path, 0777);
 	is(rc, 0, "temp dir create (3)");
 
-	/* Filling the index with test data */
+	/* Filling the LSM tree with test data */
 	/* Prepare variants */
 	const size_t num_of_keys = 100;
 	bool in_mem1[num_of_keys]; /* UPSERT value += 1, lsn 4 */
@@ -161,8 +160,8 @@ test_basic()
 		vy_mem_insert_template(pk->mem, &tmpl_val);
 	}
 
-	rc = vy_index_rotate_mem(pk);
-	is(rc, 0, "vy_index_rotate_mem");
+	rc = vy_lsm_rotate_mem(pk);
+	is(rc, 0, "vy_lsm_rotate_mem");
 
 	/* create first mem */
 	for (size_t i = 0; i < num_of_keys; i++) {
@@ -205,7 +204,7 @@ test_basic()
 	write_stream->iface->close(write_stream);
 	vy_mem_delete(run_mem);
 
-	vy_index_add_run(pk, run);
+	vy_lsm_add_run(pk, run);
 	struct vy_slice *slice = vy_slice_new(1, run, NULL, NULL, pk->cmp_def);
 	vy_range_add_slice(range, slice);
 	vy_run_unref(run);
@@ -240,7 +239,7 @@ test_basic()
 	write_stream->iface->close(write_stream);
 	vy_mem_delete(run_mem);
 
-	vy_index_add_run(pk, run);
+	vy_lsm_add_run(pk, run);
 	slice = vy_slice_new(1, run, NULL, NULL, pk->cmp_def);
 	vy_range_add_slice(range, slice);
 	vy_run_unref(run);
@@ -308,14 +307,14 @@ test_basic()
 	is(results_ok, true, "select results");
 	is(has_errors, false, "no errors happened");
 
-	vy_index_unref(pk);
+	vy_lsm_unref(pk);
 	index_def_delete(index_def);
 	tuple_format_unref(format);
 	vy_cache_destroy(&cache);
 	key_def_delete(key_def);
 	vy_cache_env_destroy(&cache_env);
 	vy_run_env_destroy(&run_env);
-	vy_index_env_destroy(&index_env);
+	vy_lsm_env_destroy(&lsm_env);
 
 	strcpy(path, "rm -rf ");
 	strcat(path, dir_name);
diff --git a/test/unit/vy_point_lookup.result b/test/unit/vy_point_lookup.result
index 6b8dcdef..4e68dda6 100644
--- a/test/unit/vy_point_lookup.result
+++ b/test/unit/vy_point_lookup.result
@@ -1,15 +1,15 @@
 1..1
 	*** test_basic ***
     1..15
-    ok 1 - vy_index_env_create
+    ok 1 - vy_lsm_env_create
     ok 2 - key_def is not NULL
     ok 3 - tuple_format_new is not NULL
-    ok 4 - index is not NULL
+    ok 4 - lsm is not NULL
     ok 5 - range is not NULL
     ok 6 - temp dir name is not NULL
     ok 7 - temp dir create (2)
     ok 8 - temp dir create (3)
-    ok 9 - vy_index_rotate_mem
+    ok 9 - vy_lsm_rotate_mem
     ok 10 - vy_run_new
     ok 11 - vy_run_write
     ok 12 - vy_run_new
-- 
2.11.0




More information about the Tarantool-patches mailing list