Tarantool development patches archive
 help / color / mirror / Atom feed
* [tarantool-patches] [PATCH] triggers: remove exceptions
@ 2019-08-08 18:30 Ilya Kosarev
  2019-08-15  6:09 ` [tarantool-patches] " Георгий Кириченко
  0 siblings, 1 reply; 2+ messages in thread
From: Ilya Kosarev @ 2019-08-08 18:30 UTC (permalink / raw)
  To: tarantool-patches; +Cc: georgy, i.kosarev, Ilya Kosarev

Triggers don't throw exceptions any more. Now they have
return codes to report errors.

Closes #4247
---
Branch: https://github.com/tarantool/tarantool/tree/i.kosarev/gh-4247-remove-exceptions-from-triggers
Issue: https://github.com/tarantool/tarantool/issues/4247

 src/box/alter.cc            | 2398 +++++++++++++++++++++++------------
 src/box/applier.cc          |   18 +-
 src/box/ck_constraint.c     |    9 +-
 src/box/ck_constraint.h     |    2 +-
 src/box/identifier.h        |   10 -
 src/box/iproto.cc           |    3 +-
 src/box/lua/call.c          |    5 +-
 src/box/lua/sequence.c      |    3 +-
 src/box/lua/space.cc        |    3 +-
 src/box/memtx_space.c       |   17 +-
 src/box/relay.cc            |    5 +-
 src/box/replication.cc      |  101 +-
 src/box/replication.h       |    2 +-
 src/box/schema.cc           |   28 +-
 src/box/schema.h            |   13 +-
 src/box/sequence.h          |    9 -
 src/box/session.cc          |    3 +-
 src/box/tuple.h             |   10 -
 src/box/txn.c               |   10 +-
 src/box/user.cc             |    9 +-
 src/box/user.h              |   14 +-
 src/box/vinyl.c             |   44 +-
 src/lib/core/fiber.c        |    3 +-
 src/lib/core/trigger.cc     |   22 +-
 src/lib/core/trigger.h      |    2 +-
 src/lua/trigger.c           |    7 +-
 src/main.cc                 |    3 +-
 test/unit/cbus.c            |    3 +-
 test/unit/swim.c            |    6 +-
 test/unit/swim_test_utils.c |    3 +-
 30 files changed, 1745 insertions(+), 1020 deletions(-)

diff --git a/src/box/alter.cc b/src/box/alter.cc
index 4f2e34bf0..923cc19cd 100644
--- a/src/box/alter.cc
+++ b/src/box/alter.cc
@@ -60,7 +60,7 @@
 
 /* {{{ Auxiliary functions and methods. */
 
-static void
+static int
 access_check_ddl(const char *name, uint32_t object_id, uint32_t owner_uid,
 		 enum schema_object_type type, enum priv_type priv_type)
 {
@@ -71,7 +71,7 @@ access_check_ddl(const char *name, uint32_t object_id, uint32_t owner_uid,
 				~has_access);
 	bool is_owner = owner_uid == cr->uid || cr->uid == ADMIN;
 	if (access == 0)
-		return; /* Access granted. */
+		return 0; /* Access granted. */
 	/* Check for specific entity access. */
 	struct access *object = entity_access_get(type);
 	if (object) {
@@ -87,7 +87,7 @@ access_check_ddl(const char *name, uint32_t object_id, uint32_t owner_uid,
 	 * CREATE privilege is required.
 	 */
 	if (access == 0 || (is_owner && !(access & (PRIV_U | PRIV_C))))
-		return; /* Access granted. */
+		return 0; /* Access granted. */
 	/*
 	 * USAGE can be granted only globally.
 	 */
@@ -97,10 +97,12 @@ access_check_ddl(const char *name, uint32_t object_id, uint32_t owner_uid,
 		if (object != NULL)
 			access &= ~object[cr->auth_token].effective;
 		if (access == 0)
-			return; /* Access granted. */
+			return 0; /* Access granted. */
 	}
 	/* Create a meaningful error message. */
-	struct user *user = user_find_xc(cr->uid);
+	struct user *user = user_find(cr->uid);
+	if (user == NULL)
+		return -1;
 	const char *object_name;
 	const char *pname;
 	if (access & PRIV_U) {
@@ -111,15 +113,15 @@ access_check_ddl(const char *name, uint32_t object_id, uint32_t owner_uid,
 		object_name = schema_object_name(type);
 		pname = priv_name(access);
 	}
-	tnt_raise(AccessDeniedError, pname, object_name, name,
-		  user->def->name);
+	diag_set(AccessDeniedError, pname, object_name, name, user->def->name);
+	return -1;
 }
 
 /**
  * Throw an exception if the given index definition
  * is incompatible with a sequence.
  */
-static void
+static int
 index_def_check_sequence(struct index_def *index_def, uint32_t sequence_fieldno,
 			 const char *sequence_path, uint32_t sequence_path_len,
 			 const char *space_name)
@@ -140,16 +142,19 @@ index_def_check_sequence(struct index_def *index_def, uint32_t sequence_fieldno,
 		}
 	}
 	if (sequence_part == NULL) {
-		tnt_raise(ClientError, ER_MODIFY_INDEX, index_def->name,
+		diag_set(ClientError, ER_MODIFY_INDEX, index_def->name,
 			  space_name, "sequence field must be a part of "
 			  "the index");
+		return -1;
 	}
 	enum field_type type = sequence_part->type;
 	if (type != FIELD_TYPE_UNSIGNED && type != FIELD_TYPE_INTEGER) {
-		tnt_raise(ClientError, ER_MODIFY_INDEX, index_def->name,
+		diag_set(ClientError, ER_MODIFY_INDEX, index_def->name,
 			  space_name, "sequence cannot be used with "
 			  "a non-integer key");
+		return -1;
 	}
+	return 0;
 }
 
 /**
@@ -157,7 +162,7 @@ index_def_check_sequence(struct index_def *index_def, uint32_t sequence_fieldno,
  * Checks tuple (of _index space) and throws a nice error if it is invalid
  * Checks only types of fields and their count!
  */
-static void
+static int
 index_def_check_tuple(struct tuple *tuple)
 {
 	const mp_type common_template[] =
@@ -173,7 +178,7 @@ index_def_check_tuple(struct tuple *tuple)
 			goto err;
 		mp_next(&data);
 	}
-	return;
+	return 0;
 
 err:
 	char got[DIAG_ERRMSG_MAX];
@@ -184,51 +189,58 @@ err:
 		mp_next(&data);
 		p += snprintf(p, e - p, i ? ", %s" : "%s", mp_type_strs[type]);
 	}
-	tnt_raise(ClientError, ER_WRONG_INDEX_RECORD, got,
+	diag_set(ClientError, ER_WRONG_INDEX_RECORD, got,
 		  "space id (unsigned), index id (unsigned), name (string), "\
 		  "type (string), options (map), parts (array)");
+	return -1;
 }
 
 /**
  * Fill index_opts structure from opts field in tuple of space _index
  * Throw an error is unrecognized option.
  */
-static void
+static int
 index_opts_decode(struct index_opts *opts, const char *map,
 		  struct region *region)
 {
 	index_opts_create(opts);
 	if (opts_decode(opts, index_opts_reg, &map, ER_WRONG_INDEX_OPTIONS,
 			BOX_INDEX_FIELD_OPTS, region) != 0)
-		diag_raise();
+		return -1;
 	if (opts->distance == rtree_index_distance_type_MAX) {
-		tnt_raise(ClientError, ER_WRONG_INDEX_OPTIONS,
+		diag_set(ClientError, ER_WRONG_INDEX_OPTIONS,
 			  BOX_INDEX_FIELD_OPTS, "distance must be either "\
 			  "'euclid' or 'manhattan'");
+		return -1;
 	}
 	if (opts->page_size <= 0 || (opts->range_size > 0 &&
 				     opts->page_size > opts->range_size)) {
-		tnt_raise(ClientError, ER_WRONG_INDEX_OPTIONS,
+		diag_set(ClientError, ER_WRONG_INDEX_OPTIONS,
 			  BOX_INDEX_FIELD_OPTS,
 			  "page_size must be greater than 0 and "
 			  "less than or equal to range_size");
+		return -1;
 	}
 	if (opts->run_count_per_level <= 0) {
-		tnt_raise(ClientError, ER_WRONG_INDEX_OPTIONS,
+		diag_set(ClientError, ER_WRONG_INDEX_OPTIONS,
 			  BOX_INDEX_FIELD_OPTS,
 			  "run_count_per_level must be greater than 0");
+		return -1;
 	}
 	if (opts->run_size_ratio <= 1) {
-		tnt_raise(ClientError, ER_WRONG_INDEX_OPTIONS,
+		diag_set(ClientError, ER_WRONG_INDEX_OPTIONS,
 			  BOX_INDEX_FIELD_OPTS,
 			  "run_size_ratio must be greater than 1");
+		return -1;
 	}
 	if (opts->bloom_fpr <= 0 || opts->bloom_fpr > 1) {
-		tnt_raise(ClientError, ER_WRONG_INDEX_OPTIONS,
+		diag_set(ClientError, ER_WRONG_INDEX_OPTIONS,
 			  BOX_INDEX_FIELD_OPTS,
 			  "bloom_fpr must be greater than 0 and "
 			  "less than or equal to 1");
+		return -1;
 	}
+	return 0;
 }
 
 /**
@@ -236,16 +248,18 @@ index_opts_decode(struct index_opts *opts, const char *map,
  * only a deterministic persistent Lua function may be used in
  * functional index for now.
  */
-static void
+static int
 func_index_check_func(struct func *func) {
 	assert(func != NULL);
 	if (func->def->language != FUNC_LANGUAGE_LUA ||
 	    func->def->body == NULL || !func->def->is_deterministic ||
 	    !func->def->is_sandboxed) {
-		tnt_raise(ClientError, ER_WRONG_INDEX_OPTIONS, 0,
+		diag_set(ClientError, ER_WRONG_INDEX_OPTIONS, 0,
 			  "referenced function doesn't satisfy "
 			  "functional index function constraints");
+		return -1;
 	}
+	return 0;
 }
 
 /**
@@ -263,35 +277,48 @@ func_index_check_func(struct func *func) {
 static struct index_def *
 index_def_new_from_tuple(struct tuple *tuple, struct space *space)
 {
-	index_def_check_tuple(tuple);
+	if (index_def_check_tuple(tuple) != 0)
+		return NULL;
 
 	struct index_opts opts;
-	uint32_t id = tuple_field_u32_xc(tuple, BOX_INDEX_FIELD_SPACE_ID);
-	uint32_t index_id = tuple_field_u32_xc(tuple, BOX_INDEX_FIELD_ID);
-	enum index_type type =
-		STR2ENUM(index_type, tuple_field_cstr_xc(tuple,
-							 BOX_INDEX_FIELD_TYPE));
+	uint32_t id;
+	if (tuple_field_u32(tuple, BOX_INDEX_FIELD_SPACE_ID, &id) != 0)
+		return NULL;
+	uint32_t index_id;
+	if (tuple_field_u32(tuple, BOX_INDEX_FIELD_ID, &index_id) != 0)
+		return NULL;
+	const char *out = tuple_field_cstr(tuple, BOX_INDEX_FIELD_TYPE);
+	if (out == NULL)
+		return NULL;
+	enum index_type type = STR2ENUM(index_type, out);
 	uint32_t name_len;
-	const char *name = tuple_field_str_xc(tuple, BOX_INDEX_FIELD_NAME,
+	const char *name = tuple_field_str(tuple, BOX_INDEX_FIELD_NAME,
 					      &name_len);
-	const char *opts_field =
-		tuple_field_with_type_xc(tuple, BOX_INDEX_FIELD_OPTS,
-					 MP_MAP);
-	index_opts_decode(&opts, opts_field, &fiber()->gc);
+	if (name == NULL)
+		return NULL;
+	const char *opts_field = tuple_field_with_type(tuple,
+		BOX_INDEX_FIELD_OPTS, MP_MAP);
+	if (opts_field == NULL)
+		return NULL;
+	if (index_opts_decode(&opts, opts_field, &fiber()->gc) != 0)
+		return NULL;
 	const char *parts = tuple_field(tuple, BOX_INDEX_FIELD_PARTS);
 	uint32_t part_count = mp_decode_array(&parts);
 	if (name_len > BOX_NAME_MAX) {
-		tnt_raise(ClientError, ER_MODIFY_INDEX,
+		diag_set(ClientError, ER_MODIFY_INDEX,
 			  tt_cstr(name, BOX_INVALID_NAME_MAX),
 			  space_name(space), "index name is too long");
+		return NULL;
 	}
-	identifier_check_xc(name, name_len);
+	if (identifier_check(name, name_len) != 0)
+		return NULL;
 	struct key_def *key_def = NULL;
 	struct key_part_def *part_def = (struct key_part_def *)
 			malloc(sizeof(*part_def) * part_count);
 	if (part_def == NULL) {
-		tnt_raise(OutOfMemory, sizeof(*part_def) * part_count,
+		diag_set(OutOfMemory, sizeof(*part_def) * part_count,
 			  "malloc", "key_part_def");
+		return NULL;
 	}
 	auto key_def_guard = make_scoped_guard([&] {
 		free(part_def);
@@ -301,19 +328,21 @@ index_def_new_from_tuple(struct tuple *tuple, struct space *space)
 	if (key_def_decode_parts(part_def, part_count, &parts,
 				 space->def->fields,
 				 space->def->field_count, &fiber()->gc) != 0)
-		diag_raise();
+		return NULL;
 	bool for_func_index = opts.func_id > 0;
 	key_def = key_def_new(part_def, part_count, for_func_index);
 	if (key_def == NULL)
-		diag_raise();
+		return NULL;
 	struct index_def *index_def =
 		index_def_new(id, index_id, name, name_len, type,
 			      &opts, key_def, space_index_key_def(space, 0));
 	if (index_def == NULL)
-		diag_raise();
+		return NULL;
 	auto index_def_guard = make_scoped_guard([=] { index_def_delete(index_def); });
-	index_def_check_xc(index_def, space_name(space));
-	space_check_index_def_xc(space, index_def);
+	if (!index_def_is_valid(index_def, space_name(space)))
+		return NULL;
+	if (space_check_index_def(space, index_def) != 0)
+		return NULL;
 	/*
 	 * In case of functional index definition, resolve a
 	 * function pointer to perform a complete index build
@@ -331,15 +360,17 @@ index_def_new_from_tuple(struct tuple *tuple, struct space *space)
 	 */
 	struct func *func = NULL;
 	if (for_func_index && (func = func_by_id(opts.func_id)) != NULL) {
-		func_index_check_func(func);
+		if (func_index_check_func(func) != 0)
+			return NULL;
 		index_def_set_func(index_def, func);
 	}
 	if (index_def->iid == 0 && space->sequence != NULL)
-		index_def_check_sequence(index_def, space->sequence_fieldno,
+		if (index_def_check_sequence(index_def, space->sequence_fieldno,
 					 space->sequence_path,
 					 space->sequence_path != NULL ?
 					 strlen(space->sequence_path) : 0,
-					 space_name(space));
+					 space_name(space)) != 0)
+			return NULL;
 	index_def_guard.is_active = false;
 	return index_def;
 }
@@ -348,23 +379,25 @@ index_def_new_from_tuple(struct tuple *tuple, struct space *space)
  * Fill space opts from the msgpack stream (MP_MAP field in the
  * tuple).
  */
-static void
+static int
 space_opts_decode(struct space_opts *opts, const char *map,
 		  struct region *region)
 {
 	space_opts_create(opts);
 	if (opts_decode(opts, space_opts_reg, &map, ER_WRONG_SPACE_OPTIONS,
 			BOX_SPACE_FIELD_OPTS, region) != 0)
-		diag_raise();
+		return -1;
 	if (opts->sql != NULL) {
 		char *sql = strdup(opts->sql);
 		if (sql == NULL) {
 			opts->sql = NULL;
-			tnt_raise(OutOfMemory, strlen(opts->sql) + 1, "strdup",
+			diag_set(OutOfMemory, strlen(opts->sql) + 1, "strdup",
 				  "sql");
+			return -1;
 		}
 		opts->sql = sql;
 	}
+	return 0;
 }
 
 /**
@@ -380,15 +413,16 @@ space_opts_decode(struct space_opts *opts, const char *map,
  * @param fieldno Field number to decode. Used in error messages.
  * @param region Region to allocate field name.
  */
-static void
+static int
 field_def_decode(struct field_def *field, const char **data,
 		 const char *space_name, uint32_t name_len,
 		 uint32_t errcode, uint32_t fieldno, struct region *region)
 {
 	if (mp_typeof(**data) != MP_MAP) {
-		tnt_raise(ClientError, errcode, tt_cstr(space_name, name_len),
+		diag_set(ClientError, errcode, tt_cstr(space_name, name_len),
 			  tt_sprintf("field %d is not map",
 				     fieldno + TUPLE_INDEX_BASE));
+		return -1;
 	}
 	int count = mp_decode_map(data);
 	*field = field_def_default;
@@ -396,11 +430,12 @@ field_def_decode(struct field_def *field, const char **data,
 	uint32_t action_literal_len = strlen("nullable_action");
 	for (int i = 0; i < count; ++i) {
 		if (mp_typeof(**data) != MP_STR) {
-			tnt_raise(ClientError, errcode,
+			diag_set(ClientError, errcode,
 				  tt_cstr(space_name, name_len),
 				  tt_sprintf("field %d format is not map"\
 					     " with string keys",
 					     fieldno + TUPLE_INDEX_BASE));
+			return -1;
 		}
 		uint32_t key_len;
 		const char *key = mp_decode_str(data, &key_len);
@@ -408,7 +443,7 @@ field_def_decode(struct field_def *field, const char **data,
 				   ER_WRONG_SPACE_FORMAT,
 				   fieldno + TUPLE_INDEX_BASE, region,
 				   true) != 0)
-			diag_raise();
+			return -1;
 		if (is_action_missing &&
 		    key_len == action_literal_len &&
 		    memcmp(key, "nullable_action", action_literal_len) == 0)
@@ -420,44 +455,51 @@ field_def_decode(struct field_def *field, const char **data,
 			: ON_CONFLICT_ACTION_DEFAULT;
 	}
 	if (field->name == NULL) {
-		tnt_raise(ClientError, errcode, tt_cstr(space_name, name_len),
+		diag_set(ClientError, errcode, tt_cstr(space_name, name_len),
 			  tt_sprintf("field %d name is not specified",
 				     fieldno + TUPLE_INDEX_BASE));
+		return -1;
 	}
 	size_t field_name_len = strlen(field->name);
 	if (field_name_len > BOX_NAME_MAX) {
-		tnt_raise(ClientError, errcode, tt_cstr(space_name, name_len),
+		diag_set(ClientError, errcode, tt_cstr(space_name, name_len),
 			  tt_sprintf("field %d name is too long",
 				     fieldno + TUPLE_INDEX_BASE));
+		return -1;
 	}
-	identifier_check_xc(field->name, field_name_len);
+	if (identifier_check(field->name, field_name_len) != 0)
+		return -1;
 	if (field->type == field_type_MAX) {
-		tnt_raise(ClientError, errcode, tt_cstr(space_name, name_len),
+		diag_set(ClientError, errcode, tt_cstr(space_name, name_len),
 			  tt_sprintf("field %d has unknown field type",
 				     fieldno + TUPLE_INDEX_BASE));
+		return -1;
 	}
 	if (field->nullable_action == on_conflict_action_MAX) {
-		tnt_raise(ClientError, errcode, tt_cstr(space_name, name_len),
+		diag_set(ClientError, errcode, tt_cstr(space_name, name_len),
 			  tt_sprintf("field %d has unknown field on conflict "
 				     "nullable action",
 				     fieldno + TUPLE_INDEX_BASE));
+		return -1;
 	}
 	if (!((field->is_nullable && field->nullable_action ==
 	       ON_CONFLICT_ACTION_NONE)
 	      || (!field->is_nullable
 		  && field->nullable_action != ON_CONFLICT_ACTION_NONE))) {
-		tnt_raise(ClientError, errcode, tt_cstr(space_name, name_len),
+		diag_set(ClientError, errcode, tt_cstr(space_name, name_len),
 			  tt_sprintf("field %d has conflicting nullability and "
 				     "nullable action properties", fieldno +
 				     TUPLE_INDEX_BASE));
+		return -1;
 	}
 	if (field->coll_id != COLL_NONE &&
 	    field->type != FIELD_TYPE_STRING &&
 	    field->type != FIELD_TYPE_SCALAR &&
 	    field->type != FIELD_TYPE_ANY) {
-		tnt_raise(ClientError, errcode, tt_cstr(space_name, name_len),
+		diag_set(ClientError, errcode, tt_cstr(space_name, name_len),
 			  tt_sprintf("collation is reasonable only for "
 				     "string, scalar and any fields"));
+		return -1;
 	}
 
 	const char *dv = field->default_value;
@@ -465,8 +507,9 @@ field_def_decode(struct field_def *field, const char **data,
 		field->default_value_expr = sql_expr_compile(sql_get(), dv,
 							     strlen(dv));
 		if (field->default_value_expr == NULL)
-			diag_raise();
+			return -1;
 	}
+	return 0;
 }
 
 /**
@@ -479,20 +522,26 @@ field_def_decode(struct field_def *field, const char **data,
  *
  * @retval Array of fields.
  */
-static struct field_def *
+static int
 space_format_decode(const char *data, uint32_t *out_count,
 		    const char *space_name, uint32_t name_len,
-		    uint32_t errcode, struct region *region)
+		    uint32_t errcode, struct region *region, struct field_def **fields)
 {
 	/* Type is checked by _space format. */
 	assert(mp_typeof(*data) == MP_ARRAY);
 	uint32_t count = mp_decode_array(&data);
 	*out_count = count;
-	if (count == 0)
-		return NULL;
+	if (count == 0) {
+		*fields = NULL;
+		return 0;
+	}
 	size_t size = count * sizeof(struct field_def);
 	struct field_def *region_defs =
-		(struct field_def *) region_alloc_xc(region, size);
+		(struct field_def *) region_alloc(region, size);
+	if (region_defs == NULL) {
+		diag_set(OutOfMemory, size, "region", "new slab");
+		return -1;
+	}
 	/*
 	 * Nullify to prevent a case when decoding will fail in
 	 * the middle and space_def_destroy_fields() below will
@@ -503,11 +552,13 @@ space_format_decode(const char *data, uint32_t *out_count,
 		space_def_destroy_fields(region_defs, count, false);
 	});
 	for (uint32_t i = 0; i < count; ++i) {
-		field_def_decode(&region_defs[i], &data, space_name, name_len,
-				 errcode, i, region);
+		if (field_def_decode(&region_defs[i], &data, space_name, name_len,
+				 errcode, i, region) != 0)
+			return -1;
 	}
 	fields_guard.is_active = false;
-	return region_defs;
+	*fields = region_defs;
+	return 0;
 }
 
 /**
@@ -518,79 +569,108 @@ space_def_new_from_tuple(struct tuple *tuple, uint32_t errcode,
 			 struct region *region)
 {
 	uint32_t name_len;
-	const char *name =
-		tuple_field_str_xc(tuple, BOX_SPACE_FIELD_NAME, &name_len);
-	if (name_len > BOX_NAME_MAX)
-		tnt_raise(ClientError, errcode,
-			  tt_cstr(name, BOX_INVALID_NAME_MAX),
-			  "space name is too long");
-	identifier_check_xc(name, name_len);
-	uint32_t id = tuple_field_u32_xc(tuple, BOX_SPACE_FIELD_ID);
+	const char *name = tuple_field_str(tuple, BOX_SPACE_FIELD_NAME,
+		&name_len);
+	if (name == NULL)
+		return NULL;
+	if (name_len > BOX_NAME_MAX) {
+		diag_set(ClientError, errcode,
+			 tt_cstr(name, BOX_INVALID_NAME_MAX),
+			 "space name is too long");
+		return NULL;
+	}
+	if (identifier_check(name, name_len) != 0)
+		return NULL;
+	uint32_t id;
+	if (tuple_field_u32(tuple, BOX_SPACE_FIELD_ID, &id) != 0)
+		return NULL;
 	if (id > BOX_SPACE_MAX) {
-		tnt_raise(ClientError, errcode, tt_cstr(name, name_len),
+		diag_set(ClientError, errcode, tt_cstr(name, name_len),
 			  "space id is too big");
+		return NULL;
 	}
 	if (id == 0) {
-		tnt_raise(ClientError, errcode, tt_cstr(name, name_len),
+		diag_set(ClientError, errcode, tt_cstr(name, name_len),
 			  "space id 0 is reserved");
+		return NULL;
 	}
-	uint32_t uid = tuple_field_u32_xc(tuple, BOX_SPACE_FIELD_UID);
-	uint32_t exact_field_count =
-		tuple_field_u32_xc(tuple, BOX_SPACE_FIELD_FIELD_COUNT);
+	uint32_t uid;
+	if (tuple_field_u32(tuple, BOX_SPACE_FIELD_UID, &uid) != 0)
+		return NULL;
+	uint32_t exact_field_count;
+	if (tuple_field_u32(tuple, BOX_SPACE_FIELD_FIELD_COUNT,
+		&exact_field_count) != 0)
+		return NULL;
 	uint32_t engine_name_len;
-	const char *engine_name =
-		tuple_field_str_xc(tuple, BOX_SPACE_FIELD_ENGINE,
-				   &engine_name_len);
+	const char *engine_name = tuple_field_str(tuple,
+		BOX_SPACE_FIELD_ENGINE, &engine_name_len);
+	if (engine_name == NULL)
+		return NULL;
 	/*
 	 * Engines are compiled-in so their names are known in
 	 * advance to be shorter than names of other identifiers.
 	 */
 	if (engine_name_len > ENGINE_NAME_MAX) {
-		tnt_raise(ClientError, errcode, tt_cstr(name, name_len),
+		diag_set(ClientError, errcode, tt_cstr(name, name_len),
 			  "space engine name is too long");
+		return NULL;
 	}
-	identifier_check_xc(engine_name, engine_name_len);
-	struct field_def *fields;
-	uint32_t field_count;
+	if (identifier_check(engine_name, engine_name_len) != 0)
+		return NULL;
 	/* Check space opts. */
-	const char *space_opts =
-		tuple_field_with_type_xc(tuple, BOX_SPACE_FIELD_OPTS,
-					 MP_MAP);
+	const char *space_opts = tuple_field_with_type(tuple,
+		BOX_SPACE_FIELD_OPTS, MP_MAP);
+	if (space_opts == NULL)
+		return NULL;
 	/* Check space format */
-	const char *format =
-		tuple_field_with_type_xc(tuple, BOX_SPACE_FIELD_FORMAT,
-					 MP_ARRAY);
-	fields = space_format_decode(format, &field_count, name,
-				     name_len, errcode, region);
+	const char *format = tuple_field_with_type(tuple,
+		BOX_SPACE_FIELD_FORMAT, MP_ARRAY);
+	if (format == NULL)
+		return NULL;
+	struct field_def *fields = NULL;
+	uint32_t field_count;
+	if (space_format_decode(format, &field_count, name,
+				     name_len, errcode, region, &fields) != 0)
+		return NULL;
 	auto fields_guard = make_scoped_guard([=] {
 		space_def_destroy_fields(fields, field_count, false);
 	});
 	if (exact_field_count != 0 &&
 	    exact_field_count < field_count) {
-		tnt_raise(ClientError, errcode, tt_cstr(name, name_len),
+		diag_set(ClientError, errcode, tt_cstr(name, name_len),
 			  "exact_field_count must be either 0 or >= "\
 			  "formatted field count");
+		return NULL;
 	}
 	struct space_opts opts;
-	space_opts_decode(&opts, space_opts, region);
+	if (space_opts_decode(&opts, space_opts, region) != 0)
+		return NULL;
 	/*
 	 * Currently, only predefined replication groups
 	 * are supported.
 	 */
 	if (opts.group_id != GROUP_DEFAULT &&
 	    opts.group_id != GROUP_LOCAL) {
-		tnt_raise(ClientError, ER_NO_SUCH_GROUP,
+		diag_set(ClientError, ER_NO_SUCH_GROUP,
 			  int2str(opts.group_id));
+		return NULL;
+	}
+	if (opts.is_view && opts.sql == NULL) {
+		diag_set(ClientError, ER_VIEW_MISSING_SQL);
+		return NULL;
 	}
-	if (opts.is_view && opts.sql == NULL)
-		tnt_raise(ClientError, ER_VIEW_MISSING_SQL);
 	struct space_def *def =
-		space_def_new_xc(id, uid, exact_field_count, name, name_len,
+		space_def_new(id, uid, exact_field_count, name, name_len,
 				 engine_name, engine_name_len, &opts, fields,
 				 field_count);
+	if (def == NULL)
+		return NULL;
 	auto def_guard = make_scoped_guard([=] { space_def_delete(def); });
-	struct engine *engine = engine_find_xc(def->engine_name);
-	engine_check_space_def_xc(engine, def);
+	struct engine *engine = engine_find(def->engine_name);
+	if (engine == NULL)
+		return NULL;
+	if (engine_check_space_def(engine, def) != 0)
+		return NULL;
 	def_guard.is_active = false;
 	return def;
 }
@@ -625,25 +705,41 @@ space_swap_fk_constraints(struct space *new_space, struct space *old_space)
  * True if the space has records identified by key 'uid'.
  * Uses 'iid' index.
  */
-bool
-space_has_data(uint32_t id, uint32_t iid, uint32_t uid)
+int
+space_has_data(uint32_t id, uint32_t iid, uint32_t uid, bool *out)
 {
 	struct space *space = space_by_id(id);
-	if (space == NULL)
-		return false;
+	if (space == NULL) {
+		*out = false;
+		return 0;
+	}
 
-	if (space_index(space, iid) == NULL)
-		return false;
+	if (space_index(space, iid) == NULL) {
+		*out = false;
+		return 0;
+	}
+
+	if (!space_is_memtx(space)) {
+		diag_set(ClientError, ER_UNSUPPORTED,
+			 space->engine->name, "system data");
+		return -1;
+	}
+	struct index *index = index_find(space, iid);
+	if (index == NULL)
+		return -1;
 
-	struct index *index = index_find_system_xc(space, iid);
 	char key[6];
 	assert(mp_sizeof_uint(BOX_SYSTEM_ID_MIN) <= sizeof(key));
 	mp_encode_uint(key, uid);
-	struct iterator *it = index_create_iterator_xc(index, ITER_EQ, key, 1);
+	struct iterator *it = index_create_iterator(index, ITER_EQ, key, 1);
+	if (it == NULL)
+		return -1;
 	IteratorGuard iter_guard(it);
-	if (iterator_next_xc(it) != NULL)
-		return true;
-	return false;
+	struct tuple *tuple;
+	if (iterator_next(it, &tuple) != 0)
+		return -1;
+	*out = (tuple != NULL);
+	return 0;
 }
 
 /* }}} */
@@ -707,8 +803,15 @@ public:
 static struct trigger *
 txn_alter_trigger_new(trigger_f run, void *data)
 {
+	size_t size = sizeof(struct trigger);
 	struct trigger *trigger = (struct trigger *)
-		region_calloc_object_xc(&in_txn()->region, struct trigger);
+		region_aligned_alloc(&in_txn()->region, size,
+			alignof(struct trigger));
+	if (trigger == NULL) {
+		diag_set(OutOfMemory, size, "region", "new slab");
+		return NULL;
+	}
+	trigger = (struct trigger *)memset(trigger, 0, size);
 	trigger->run = run;
 	trigger->data = data;
 	trigger->destroy = NULL;
@@ -751,11 +854,20 @@ static struct alter_space *
 alter_space_new(struct space *old_space)
 {
 	struct txn *txn = in_txn();
-	struct alter_space *alter = region_calloc_object_xc(&txn->region,
-							    struct alter_space);
+	size_t size = sizeof(struct alter_space);
+	struct alter_space *alter = (struct alter_space *)
+		region_aligned_alloc(&in_txn()->region, size,
+				     alignof(struct alter_space));
+	if (alter == NULL) {
+		diag_set(OutOfMemory, size, "region", "new slab");
+		return NULL;
+	}
+	alter = (struct alter_space *)memset(alter, 0, size);
 	rlist_create(&alter->ops);
 	alter->old_space = old_space;
-	alter->space_def = space_def_dup_xc(alter->old_space->def);
+	alter->space_def = space_def_dup(alter->old_space->def);
+	if (alter->space_def == NULL)
+		return NULL;
 	if (old_space->format != NULL)
 		alter->new_min_field_count = old_space->format->min_field_count;
 	else
@@ -858,7 +970,7 @@ struct mh_i32_t *AlterSpaceLock::registry;
  * of the dropped indexes.
  * Replace the old space with a new one in the space cache.
  */
-static void
+static int
 alter_space_commit(struct trigger *trigger, void *event)
 {
 	struct txn *txn = (struct txn *) event;
@@ -877,8 +989,13 @@ alter_space_commit(struct trigger *trigger, void *event)
 	 * indexes into their new places.
 	 */
 	class AlterSpaceOp *op;
-	rlist_foreach_entry(op, &alter->ops, link)
-		op->commit(alter, signature);
+	try {
+		rlist_foreach_entry(op, &alter->ops, link) {
+			op->commit(alter, signature);
+		}
+	} catch (Exception *e) {
+		return -1;
+	}
 
 	alter->new_space = NULL; /* for alter_space_delete(). */
 	/*
@@ -888,6 +1005,7 @@ alter_space_commit(struct trigger *trigger, void *event)
 	space_delete(alter->old_space);
 	alter->old_space = NULL;
 	alter_space_delete(alter);
+	return 0;
 }
 
 /**
@@ -898,14 +1016,18 @@ alter_space_commit(struct trigger *trigger, void *event)
  * Keep in mind that we may end up here in case of
  * alter_space_commit() failure (unlikely)
  */
-static void
+static int
 alter_space_rollback(struct trigger *trigger, void * /* event */)
 {
 	struct alter_space *alter = (struct alter_space *) trigger->data;
 	/* Rollback alter ops */
 	class AlterSpaceOp *op;
-	rlist_foreach_entry(op, &alter->ops, link) {
-		op->rollback(alter);
+	try {
+		rlist_foreach_entry(op, &alter->ops, link) {
+			op->rollback(alter);
+		}
+	} catch (Exception *e) {
+		return -1;
 	}
 	/* Rebuild index maps once for all indexes. */
 	space_fill_index_map(alter->old_space);
@@ -917,6 +1039,7 @@ alter_space_rollback(struct trigger *trigger, void * /* event */)
 	space_swap_fk_constraints(alter->new_space, alter->old_space);
 	space_cache_replace(alter->new_space, alter->old_space);
 	alter_space_delete(alter);
+	return 0;
 }
 
 /**
@@ -968,6 +1091,8 @@ alter_space_do(struct txn_stmt *stmt, struct alter_space *alter)
 	struct trigger *on_commit, *on_rollback;
 	on_commit = txn_alter_trigger_new(alter_space_commit, alter);
 	on_rollback = txn_alter_trigger_new(alter_space_rollback, alter);
+	if (on_commit == NULL || on_rollback == NULL)
+		diag_raise();
 
 	/* Create a definition of the new space. */
 	space_dump_def(alter->old_space, &alter->key_list);
@@ -1644,12 +1769,13 @@ MoveCkConstraints::rollback(struct alter_space *alter)
 /**
  * Delete the space. It is already removed from the space cache.
  */
-static void
+static int
 on_drop_space_commit(struct trigger *trigger, void *event)
 {
 	(void) event;
 	struct space *space = (struct space *)trigger->data;
 	space_delete(space);
+	return 0;
 }
 
 /**
@@ -1657,12 +1783,13 @@ on_drop_space_commit(struct trigger *trigger, void *event)
  * of all other events happened after the space was removed were
  * reverted by the cascading rollback.
  */
-static void
+static int
 on_drop_space_rollback(struct trigger *trigger, void *event)
 {
 	(void) event;
 	struct space *space = (struct space *)trigger->data;
 	space_cache_replace(NULL, space);
+	return 0;
 }
 
 /**
@@ -1672,13 +1799,14 @@ on_drop_space_rollback(struct trigger *trigger, void *event)
  * By the time the space is removed, it should be empty: we
  * rely on cascading rollback.
  */
-static void
+static int
 on_create_space_rollback(struct trigger *trigger, void *event)
 {
 	(void) event;
 	struct space *space = (struct space *)trigger->data;
 	space_cache_replace(space, NULL);
 	space_delete(space);
+	return 0;
 }
 
 /**
@@ -1780,12 +1908,13 @@ update_view_references(struct Select *select, int update_value,
  * Trigger which is fired to commit creation of new SQL view.
  * Its purpose is to release memory of SELECT.
  */
-static void
+static int
 on_create_view_commit(struct trigger *trigger, void *event)
 {
 	(void) event;
 	struct Select *select = (struct Select *)trigger->data;
 	sql_select_delete(sql_get(), select);
+	return 0;
 }
 
 /**
@@ -1793,13 +1922,15 @@ on_create_view_commit(struct trigger *trigger, void *event)
  * Decrements view reference counters of dependent spaces and
  * releases memory for SELECT.
  */
-static void
+static int
 on_create_view_rollback(struct trigger *trigger, void *event)
 {
 	(void) event;
 	struct Select *select = (struct Select *)trigger->data;
-	update_view_references(select, -1, true, NULL);
+	if (update_view_references(select, -1, true, NULL) != 0)
+		return -1;
 	sql_select_delete(sql_get(), select);
+	return 0;
 }
 
 /**
@@ -1807,12 +1938,13 @@ on_create_view_rollback(struct trigger *trigger, void *event)
  * Its purpose is to decrement view reference counters of
  * dependent spaces and release memory for SELECT.
  */
-static void
+static int
 on_drop_view_commit(struct trigger *trigger, void *event)
 {
 	(void) event;
 	struct Select *select = (struct Select *)trigger->data;
 	sql_select_delete(sql_get(), select);
+	return 0;
 }
 
 /**
@@ -1820,13 +1952,15 @@ on_drop_view_commit(struct trigger *trigger, void *event)
  * Release memory for struct SELECT compiled in
  * on_replace_dd_space trigger.
  */
-static void
+static int
 on_drop_view_rollback(struct trigger *trigger, void *event)
 {
 	(void) event;
 	struct Select *select = (struct Select *)trigger->data;
-	update_view_references(select, 1, true, NULL);
+	if (update_view_references(select, 1, true, NULL) != 0)
+		return -1;
 	sql_select_delete(sql_get(), select);
+	return 0;
 }
 
 /**
@@ -1879,7 +2013,7 @@ on_drop_view_rollback(struct trigger *trigger, void *event)
  * dynamic space configuration such a check would be particularly
  * clumsy, so it is simply not done.
  */
-static void
+static int
 on_replace_dd_space(struct trigger * /* trigger */, void *event)
 {
 	struct txn *txn = (struct txn *) event;
@@ -1901,19 +2035,25 @@ on_replace_dd_space(struct trigger * /* trigger */, void *event)
 	 * old_tuple ID field, if old_tuple is set, since UPDATE
 	 * may have changed space id.
 	 */
-	uint32_t old_id = tuple_field_u32_xc(old_tuple ? old_tuple : new_tuple,
-					     BOX_SPACE_FIELD_ID);
+	uint32_t old_id;
+	if (tuple_field_u32(old_tuple ? old_tuple : new_tuple,
+		BOX_SPACE_FIELD_ID, &old_id) != 0)
+		return -1;
 	struct space *old_space = space_by_id(old_id);
 	if (new_tuple != NULL && old_space == NULL) { /* INSERT */
-		struct space_def *def =
-			space_def_new_from_tuple(new_tuple, ER_CREATE_SPACE,
-						 region);
-		auto def_guard =
-			make_scoped_guard([=] { space_def_delete(def); });
-		access_check_ddl(def->name, def->id, def->uid, SC_SPACE,
-				 PRIV_C);
+		struct space_def *def = space_def_new_from_tuple(new_tuple,
+			ER_CREATE_SPACE, region);
+		if (def == NULL)
+			return -1;
+		auto def_guard = make_scoped_guard([=] { space_def_delete(def); });
+		if (access_check_ddl(def->name, def->id, def->uid, SC_SPACE,
+				     PRIV_C) != 0)
+			return -1;
 		RLIST_HEAD(empty_list);
-		struct space *space = space_new_xc(def, &empty_list);
+		struct space *space;
+		space = space_new(def, &empty_list);
+		if (space == NULL)
+			return -1;
 		/**
 		 * The new space must be inserted in the space
 		 * cache right away to achieve linearisable
@@ -1937,14 +2077,16 @@ on_replace_dd_space(struct trigger * /* trigger */, void *event)
 		 */
 		struct trigger *on_rollback =
 			txn_alter_trigger_new(on_create_space_rollback, space);
+		if (on_rollback == NULL)
+			return -1;
 		txn_stmt_on_rollback(stmt, on_rollback);
 		if (def->opts.is_view) {
 			struct Select *select = sql_view_compile(sql_get(),
-								 def->opts.sql);
+				def->opts.sql);
 			if (select == NULL)
-				diag_raise();
+				return -1;
 			auto select_guard = make_scoped_guard([=] {
-				sql_select_delete(sql_get(), select);
+			    sql_select_delete(sql_get(), select);
 			});
 			const char *disappeared_space;
 			if (update_view_references(select, 1, false,
@@ -1955,41 +2097,58 @@ on_replace_dd_space(struct trigger * /* trigger */, void *event)
 				 */
 				update_view_references(select, -1, false,
 						       &disappeared_space);
-				tnt_raise(ClientError, ER_NO_SUCH_SPACE,
+				diag_set(ClientError, ER_NO_SUCH_SPACE,
 					  disappeared_space);
+				return -1;
 			}
 			struct trigger *on_commit_view =
 				txn_alter_trigger_new(on_create_view_commit,
 						      select);
+			if (on_commit_view == NULL)
+				return -1;
 			txn_stmt_on_commit(stmt, on_commit_view);
 			struct trigger *on_rollback_view =
 				txn_alter_trigger_new(on_create_view_rollback,
 						      select);
+			if (on_rollback_view == NULL)
+				return -1;
 			txn_stmt_on_rollback(stmt, on_rollback_view);
 			select_guard.is_active = false;
 		}
 	} else if (new_tuple == NULL) { /* DELETE */
-		access_check_ddl(old_space->def->name, old_space->def->id,
-				 old_space->def->uid, SC_SPACE, PRIV_D);
+		if (access_check_ddl(old_space->def->name, old_space->def->id,
+				     old_space->def->uid, SC_SPACE, PRIV_D) != 0)
+			return -1;
 		/* Verify that the space is empty (has no indexes) */
 		if (old_space->index_count) {
-			tnt_raise(ClientError, ER_DROP_SPACE,
+			diag_set(ClientError, ER_DROP_SPACE,
 				  space_name(old_space),
 				  "the space has indexes");
+			return -1;
 		}
-		if (schema_find_grants("space", old_space->def->id)) {
-			tnt_raise(ClientError, ER_DROP_SPACE,
-				  space_name(old_space),
-				  "the space has grants");
+		bool out;
+		if (schema_find_grants("space", old_space->def->id, &out) != 0) {
+			return -1;
 		}
-		if (space_has_data(BOX_TRUNCATE_ID, 0, old_space->def->id))
-			tnt_raise(ClientError, ER_DROP_SPACE,
+		if (out) {
+			diag_set(ClientError, ER_DROP_SPACE,
+				 space_name(old_space),
+				 "the space has grants");
+			return -1;
+		}
+		if (space_has_data(BOX_TRUNCATE_ID, 0, old_space->def->id, &out) != 0)
+			return -1;
+		if (out) {
+			diag_set(ClientError, ER_DROP_SPACE,
 				  space_name(old_space),
 				  "the space has truncate record");
+			return -1;
+		}
 		if (old_space->def->view_ref_count > 0) {
-			tnt_raise(ClientError, ER_DROP_SPACE,
+			diag_set(ClientError, ER_DROP_SPACE,
 				  space_name(old_space),
 				  "other views depend on this space");
+			return -1;
 		}
 		/*
 		 * No need to check existence of parent keys,
@@ -1998,15 +2157,17 @@ on_replace_dd_space(struct trigger * /* trigger */, void *event)
 		 * one referenced index which can't be dropped
 		 * before constraint itself.
 		 */
-		if (! rlist_empty(&old_space->child_fk_constraint)) {
-			tnt_raise(ClientError, ER_DROP_SPACE,
+		if (!rlist_empty(&old_space->child_fk_constraint)) {
+			diag_set(ClientError, ER_DROP_SPACE,
 				  space_name(old_space),
 				  "the space has foreign key constraints");
+			return -1;
 		}
 		if (!rlist_empty(&old_space->ck_constraint)) {
-			tnt_raise(ClientError, ER_DROP_SPACE,
+			diag_set(ClientError, ER_DROP_SPACE,
 				  space_name(old_space),
 				  "the space has check constraints");
+			return -1;
 		}
 		/**
 		 * The space must be deleted from the space
@@ -2022,69 +2183,88 @@ on_replace_dd_space(struct trigger * /* trigger */, void *event)
 		++schema_version;
 		struct trigger *on_commit =
 			txn_alter_trigger_new(on_drop_space_commit, old_space);
+		if (on_commit == NULL)
+			return -1;
 		txn_stmt_on_commit(stmt, on_commit);
 		struct trigger *on_rollback =
 			txn_alter_trigger_new(on_drop_space_rollback, old_space);
+		if (on_rollback == NULL)
+			return -1;
 		txn_stmt_on_rollback(stmt, on_rollback);
 		if (old_space->def->opts.is_view) {
-			struct Select *select =
-				sql_view_compile(sql_get(),
-						 old_space->def->opts.sql);
+			struct Select *select = sql_view_compile(sql_get(),
+						  old_space->def->opts.sql);
 			if (select == NULL)
-				diag_raise();
+				return -1;
 			auto select_guard = make_scoped_guard([=] {
-				sql_select_delete(sql_get(), select);
+			    sql_select_delete(sql_get(), select);
 			});
 			struct trigger *on_commit_view =
 				txn_alter_trigger_new(on_drop_view_commit,
 						      select);
+			if (on_commit_view == NULL)
+				return -1;
 			txn_stmt_on_commit(stmt, on_commit_view);
 			struct trigger *on_rollback_view =
 				txn_alter_trigger_new(on_drop_view_rollback,
 						      select);
+			if (on_rollback_view == NULL)
+				return -1;
 			txn_stmt_on_rollback(stmt, on_rollback_view);
 			update_view_references(select, -1, true, NULL);
 			select_guard.is_active = false;
 		}
 	} else { /* UPDATE, REPLACE */
 		assert(old_space != NULL && new_tuple != NULL);
-		struct space_def *def =
-			space_def_new_from_tuple(new_tuple, ER_ALTER_SPACE,
-						 region);
-		auto def_guard =
-			make_scoped_guard([=] { space_def_delete(def); });
-		access_check_ddl(def->name, def->id, def->uid, SC_SPACE,
-				 PRIV_A);
-		if (def->id != space_id(old_space))
-			tnt_raise(ClientError, ER_ALTER_SPACE,
+		struct space_def *def = space_def_new_from_tuple(new_tuple,
+			ER_ALTER_SPACE, region);
+		if (def == NULL)
+			return -1;
+		auto def_guard = make_scoped_guard([=] { space_def_delete(def); });
+		if (access_check_ddl(def->name, def->id, def->uid, SC_SPACE,
+				     PRIV_A) != 0)
+			return -1;
+		if (def->id != space_id(old_space)) {
+			diag_set(ClientError, ER_ALTER_SPACE,
 				  space_name(old_space),
 				  "space id is immutable");
-		if (strcmp(def->engine_name, old_space->def->engine_name) != 0)
-			tnt_raise(ClientError, ER_ALTER_SPACE,
+			return -1;
+		}
+		if (strcmp(def->engine_name, old_space->def->engine_name) != 0) {
+			diag_set(ClientError, ER_ALTER_SPACE,
 				  space_name(old_space),
 				  "can not change space engine");
-		if (def->opts.group_id != space_group_id(old_space))
-			tnt_raise(ClientError, ER_ALTER_SPACE,
+			return -1;
+		}
+		if (def->opts.group_id != space_group_id(old_space)) {
+			diag_set(ClientError, ER_ALTER_SPACE,
 				  space_name(old_space),
 				  "replication group is immutable");
-		if (def->opts.is_view != old_space->def->opts.is_view)
-			tnt_raise(ClientError, ER_ALTER_SPACE,
+			return -1;
+		}
+		if (def->opts.is_view != old_space->def->opts.is_view) {
+			diag_set(ClientError, ER_ALTER_SPACE,
 				  space_name(old_space),
 				  "can not convert a space to "
 				  "a view and vice versa");
+			return -1;
+		}
 		if (strcmp(def->name, old_space->def->name) != 0 &&
-		    old_space->def->view_ref_count > 0)
-			tnt_raise(ClientError, ER_ALTER_SPACE,
-				  space_name(old_space),
-				  "can not rename space which is referenced by "
-				  "view");
+		    old_space->def->view_ref_count > 0) {
+			diag_set(ClientError, ER_ALTER_SPACE,
+				 space_name(old_space),
+				 "can not rename space which is referenced by "
+				 "view");
+			return -1;
+		}
 		/*
 		 * Allow change of space properties, but do it
 		 * in WAL-error-safe mode.
 		 */
 		struct alter_space *alter = alter_space_new(old_space);
-		auto alter_guard =
-			make_scoped_guard([=] {alter_space_delete(alter);});
+		if (alter == NULL)
+			return -1;
+		auto alter_guard = make_scoped_guard([=] { alter_space_delete(alter); });
 		/*
 		 * Calculate a new min_field_count. It can be
 		 * changed by resetting space:format(), if an old
@@ -2095,8 +2275,11 @@ on_replace_dd_space(struct trigger * /* trigger */, void *event)
 		 */
 		struct key_def **keys;
 		size_t bsize = old_space->index_count * sizeof(keys[0]);
-		keys = (struct key_def **) region_alloc_xc(&fiber()->gc,
-							   bsize);
+		keys = (struct key_def **) region_alloc(&fiber()->gc, bsize);
+		if (keys == NULL) {
+			diag_set(OutOfMemory, bsize, "region", "new slab");
+			return -1;
+		}
 		for (uint32_t i = 0; i < old_space->index_count; ++i)
 			keys[i] = old_space->index[i]->def->key_def;
 		alter->new_min_field_count =
@@ -2112,9 +2295,14 @@ on_replace_dd_space(struct trigger * /* trigger */, void *event)
 		alter_space_move_indexes(alter, 0, old_space->index_id_max + 1);
 		/* Remember to update schema_version. */
 		(void) new UpdateSchemaVersion(alter);
-		alter_space_do(stmt, alter);
+		try {
+			alter_space_do(stmt, alter);
+		} catch (Exception *e) {
+			return -1;
+		}
 		alter_guard.is_active = false;
 	}
+	return 0;
 }
 
 /**
@@ -2175,27 +2363,34 @@ index_is_used_by_fk_constraint(struct rlist *fk_list, uint32_t iid)
  *   for offsets is relinquished to the slab allocator as tuples
  *   are modified.
  */
-static void
+static int
 on_replace_dd_index(struct trigger * /* trigger */, void *event)
 {
 	struct txn *txn = (struct txn *) event;
 	struct txn_stmt *stmt = txn_current_stmt(txn);
 	struct tuple *old_tuple = stmt->old_tuple;
 	struct tuple *new_tuple = stmt->new_tuple;
-	uint32_t id = tuple_field_u32_xc(old_tuple ? old_tuple : new_tuple,
-					 BOX_INDEX_FIELD_SPACE_ID);
-	uint32_t iid = tuple_field_u32_xc(old_tuple ? old_tuple : new_tuple,
-					  BOX_INDEX_FIELD_ID);
-	struct space *old_space = space_cache_find_xc(id);
+	uint32_t id, iid;
+	 if (tuple_field_u32(old_tuple ? old_tuple : new_tuple,
+	 	BOX_INDEX_FIELD_SPACE_ID, &id) != 0)
+		 return -1;
+	if (tuple_field_u32(old_tuple ? old_tuple : new_tuple,
+		BOX_INDEX_FIELD_ID, &iid) != 0)
+		return -1;
+	struct space *old_space = space_cache_find(id);
+	if (old_space == NULL)
+		return -1;
 	if (old_space->def->opts.is_view) {
-		tnt_raise(ClientError, ER_ALTER_SPACE, space_name(old_space),
+		diag_set(ClientError, ER_ALTER_SPACE, space_name(old_space),
 			  "can not add index on a view");
+		return -1;
 	}
 	enum priv_type priv_type = new_tuple ? PRIV_C : PRIV_D;
 	if (old_tuple && new_tuple)
 		priv_type = PRIV_A;
-	access_check_ddl(old_space->def->name, old_space->def->id,
-			 old_space->def->uid, SC_SPACE, priv_type);
+	if (access_check_ddl(old_space->def->name, old_space->def->id,
+			     old_space->def->uid, SC_SPACE, priv_type) != 0)
+		return -1;
 	struct index *old_index = space_index(old_space, iid);
 
 	/*
@@ -2205,24 +2400,28 @@ on_replace_dd_index(struct trigger * /* trigger */, void *event)
 		/*
 		 * Dropping the primary key in a system space: off limits.
 		 */
-		if (space_is_system(old_space))
-			tnt_raise(ClientError, ER_LAST_DROP,
+		if (space_is_system(old_space)) {
+			diag_set(ClientError, ER_LAST_DROP,
 				  space_name(old_space));
+			return -1;
+		}
 		/*
 		 * Can't drop primary key before secondary keys.
 		 */
 		if (old_space->index_count > 1) {
-			tnt_raise(ClientError, ER_DROP_PRIMARY_KEY,
+			diag_set(ClientError, ER_DROP_PRIMARY_KEY,
 				  space_name(old_space));
+			return -1;
 		}
 		/*
 		 * Can't drop primary key before space sequence.
 		 */
 		if (old_space->sequence != NULL) {
-			tnt_raise(ClientError, ER_ALTER_SPACE,
+			diag_set(ClientError, ER_ALTER_SPACE,
 				  space_name(old_space),
 				  "can not drop primary key while "
 				  "space sequence exists");
+			return -1;
 		}
 	}
 
@@ -2231,14 +2430,16 @@ on_replace_dd_index(struct trigger * /* trigger */, void *event)
 		 * A secondary index can not be created without
 		 * a primary key.
 		 */
-		tnt_raise(ClientError, ER_ALTER_SPACE,
+		diag_set(ClientError, ER_ALTER_SPACE,
 			  space_name(old_space),
 			  "can not add a secondary key before primary");
+		return -1;
 	}
 
 	struct alter_space *alter = alter_space_new(old_space);
-	auto scoped_guard =
-		make_scoped_guard([=] { alter_space_delete(alter); });
+	if (alter == NULL)
+		return -1;
+	auto scoped_guard = make_scoped_guard([=] { alter_space_delete(alter); });
 
 	/*
 	 * Handle the following 4 cases:
@@ -2255,9 +2456,10 @@ on_replace_dd_index(struct trigger * /* trigger */, void *event)
 		 */
 		if (index_is_used_by_fk_constraint(&old_space->parent_fk_constraint,
 						   iid)) {
-			tnt_raise(ClientError, ER_ALTER_SPACE,
+			diag_set(ClientError, ER_ALTER_SPACE,
 				  space_name(old_space),
 				  "can not drop a referenced index");
+			return -1;
 		}
 		alter_space_move_indexes(alter, 0, iid);
 		(void) new DropIndex(alter, old_index);
@@ -2268,6 +2470,8 @@ on_replace_dd_index(struct trigger * /* trigger */, void *event)
 		CreateIndex *create_index = new CreateIndex(alter);
 		create_index->new_index_def =
 			index_def_new_from_tuple(new_tuple, old_space);
+		if (create_index->new_index_def == NULL)
+			return -1;
 		index_def_update_optionality(create_index->new_index_def,
 					     alter->new_min_field_count);
 	}
@@ -2275,6 +2479,8 @@ on_replace_dd_index(struct trigger * /* trigger */, void *event)
 	if (old_index != NULL && new_tuple != NULL) {
 		struct index_def *index_def;
 		index_def = index_def_new_from_tuple(new_tuple, old_space);
+		if (index_def == NULL)
+			return -1;
 		auto index_def_guard =
 			make_scoped_guard([=] { index_def_delete(index_def); });
 		/*
@@ -2295,10 +2501,12 @@ on_replace_dd_index(struct trigger * /* trigger */, void *event)
 		 */
 		struct key_def **keys;
 		size_t bsize = old_space->index_count * sizeof(keys[0]);
-		keys = (struct key_def **) region_alloc_xc(&fiber()->gc,
-							   bsize);
-		for (uint32_t i = 0, j = 0; i < old_space->index_count;
-		     ++i) {
+		keys = (struct key_def **) region_alloc(&fiber()->gc, bsize);
+		if (keys == NULL) {
+			diag_set(OutOfMemory, bsize, "region", "new slab");
+			return -1;
+		}
+		for (uint32_t i = 0, j = 0; i < old_space->index_count; ++i) {
 			struct index_def *d = old_space->index[i]->def;
 			if (d->iid != index_def->iid)
 				keys[j++] = d->key_def;
@@ -2321,9 +2529,10 @@ on_replace_dd_index(struct trigger * /* trigger */, void *event)
 							     index_def)) {
 			if (index_is_used_by_fk_constraint(&old_space->parent_fk_constraint,
 							   iid)) {
-				tnt_raise(ClientError, ER_ALTER_SPACE,
-					  space_name(old_space),
-					  "can not alter a referenced index");
+				diag_set(ClientError, ER_ALTER_SPACE,
+					 space_name(old_space),
+					 "can not alter a referenced index");
+				return -1;
 			}
 			/*
 			 * Operation demands an index rebuild.
@@ -2350,8 +2559,13 @@ on_replace_dd_index(struct trigger * /* trigger */, void *event)
 	(void) new MoveCkConstraints(alter);
 	/* Add an op to update schema_version on commit. */
 	(void) new UpdateSchemaVersion(alter);
-	alter_space_do(stmt, alter);
+	try {
+		alter_space_do(stmt, alter);
+	} catch (Exception *e) {
+		return -1;
+	}
 	scoped_guard.is_active = false;
+	return 0;
 }
 
 /**
@@ -2365,7 +2579,7 @@ on_replace_dd_index(struct trigger * /* trigger */, void *event)
  * This is OK, because a WAL write error implies cascading
  * rollback of all transactions following this one.
  */
-static void
+static int
 on_replace_dd_truncate(struct trigger * /* trigger */, void *event)
 {
 	struct txn *txn = (struct txn *) event;
@@ -2374,19 +2588,22 @@ on_replace_dd_truncate(struct trigger * /* trigger */, void *event)
 
 	if (new_tuple == NULL) {
 		/* Space drop - nothing to do. */
-		return;
+		return 0;
 	}
 
-	uint32_t space_id =
-		tuple_field_u32_xc(new_tuple, BOX_TRUNCATE_FIELD_SPACE_ID);
-	struct space *old_space = space_cache_find_xc(space_id);
+	uint32_t space_id;
+	if (tuple_field_u32(new_tuple, BOX_TRUNCATE_FIELD_SPACE_ID, &space_id) != 0)
+		return -1;
+	struct space *old_space = space_cache_find(space_id);
+	if (old_space == NULL)
+		return -1;
 
 	if (stmt->row->type == IPROTO_INSERT) {
 		/*
 		 * Space creation during initial recovery -
 		 * nothing to do.
 		 */
-		return;
+		return 0;
 	}
 
 	/*
@@ -2394,19 +2611,22 @@ on_replace_dd_truncate(struct trigger * /* trigger */, void *event)
 	 * with internal objects. Since space truncation doesn't
 	 * invoke triggers, we don't permit it for system spaces.
 	 */
-	if (space_is_system(old_space))
-		tnt_raise(ClientError, ER_TRUNCATE_SYSTEM_SPACE,
+	if (space_is_system(old_space)) {
+		diag_set(ClientError, ER_TRUNCATE_SYSTEM_SPACE,
 			  space_name(old_space));
+		return -1;
+	}
 
+	struct alter_space *alter;
 	/*
 	 * Check if a write privilege was given, raise an error if not.
 	 */
-	access_check_space_xc(old_space, PRIV_W);
-
-	struct alter_space *alter = alter_space_new(old_space);
-	auto scoped_guard =
-		make_scoped_guard([=] { alter_space_delete(alter); });
-
+	if (access_check_space(old_space, PRIV_W) != 0)
+		return -1;
+	alter = alter_space_new(old_space);
+	if (alter == NULL)
+		return -1;
+	auto scoped_guard = make_scoped_guard([=] { alter_space_delete(alter); });
 	/*
 	 * Modify the WAL header to prohibit
 	 * replication of local & temporary
@@ -2426,14 +2646,19 @@ on_replace_dd_truncate(struct trigger * /* trigger */, void *event)
 	}
 
 	(void) new MoveCkConstraints(alter);
-	alter_space_do(stmt, alter);
+	try {
+		alter_space_do(stmt, alter);
+	} catch (Exception *e) {
+		return -1;
+	}
 	scoped_guard.is_active = false;
+	return 0;
 }
 
 /* {{{ access control */
 
 bool
-user_has_data(struct user *user)
+user_has_data(struct user *user, bool *has_data)
 {
 	uint32_t uid = user->def->uid;
 	uint32_t spaces[] = { BOX_SPACE_ID, BOX_FUNC_ID, BOX_SEQUENCE_ID,
@@ -2444,18 +2669,26 @@ user_has_data(struct user *user)
 	 */
 	uint32_t indexes[] = { 1, 1, 1, 1, 0 };
 	uint32_t count = sizeof(spaces)/sizeof(*spaces);
+	bool out;
 	for (uint32_t i = 0; i < count; i++) {
-		if (space_has_data(spaces[i], indexes[i], uid))
-			return true;
+		if (space_has_data(spaces[i], indexes[i], uid, &out) != 0)
+			return -1;
+		if (out) {
+			*has_data = true;
+			return 0;
+		}
+	}
+	if (! user_map_is_empty(&user->users)) {
+		*has_data = true;
+		return 0;
 	}
-	if (! user_map_is_empty(&user->users))
-		return true;
 	/*
 	 * If there was a role, the previous check would have
 	 * returned true.
 	 */
 	assert(user_map_is_empty(&user->roles));
-	return false;
+	*has_data = false;
+	return 0;
 }
 
 /**
@@ -2463,7 +2696,7 @@ user_has_data(struct user *user)
  * defined, but for now we only support chap-sha1. Get
  * password of chap-sha1 from the _user space.
  */
-void
+int
 user_def_fill_auth_data(struct user_def *user, const char *auth_data)
 {
 	uint8_t type = mp_typeof(*auth_data);
@@ -2475,13 +2708,14 @@ user_def_fill_auth_data(struct user_def *user, const char *auth_data)
 		 * table may well be encoded as an msgpack array.
 		 * Treat as no data.
 		 */
-		return;
+		return 0;
 	}
 	if (mp_typeof(*auth_data) != MP_MAP) {
 		/** Prevent users from making silly mistakes */
-		tnt_raise(ClientError, ER_CREATE_USER,
+		diag_set(ClientError, ER_CREATE_USER,
 			  user->name, "invalid password format, "
 			  "use box.schema.user.passwd() to reset password");
+		return -1;
 	}
 	uint32_t mech_count = mp_decode_map(&auth_data);
 	for (uint32_t i = 0; i < mech_count; i++) {
@@ -2498,50 +2732,65 @@ user_def_fill_auth_data(struct user_def *user, const char *auth_data)
 		}
 		const char *hash2_base64 = mp_decode_str(&auth_data, &len);
 		if (len != 0 && len != SCRAMBLE_BASE64_SIZE) {
-			tnt_raise(ClientError, ER_CREATE_USER,
+			diag_set(ClientError, ER_CREATE_USER,
 				  user->name, "invalid user password");
+			return -1;
 		}
 		if (user->uid == GUEST) {
 		    /** Guest user is permitted to have empty password */
-		    if (strncmp(hash2_base64, CHAP_SHA1_EMPTY_PASSWORD, len))
-			tnt_raise(ClientError, ER_GUEST_USER_PASSWORD);
+		    if (strncmp(hash2_base64, CHAP_SHA1_EMPTY_PASSWORD, len)) {
+			    diag_set(ClientError, ER_GUEST_USER_PASSWORD);
+			    return -1;
+		    }
 		}
 
 		base64_decode(hash2_base64, len, user->hash2,
 			      sizeof(user->hash2));
 		break;
 	}
+	return 0;
 }
 
 static struct user_def *
 user_def_new_from_tuple(struct tuple *tuple)
 {
 	uint32_t name_len;
-	const char *name = tuple_field_str_xc(tuple, BOX_USER_FIELD_NAME,
-					      &name_len);
+	const char *name = tuple_field_str(tuple, BOX_USER_FIELD_NAME,
+						      &name_len);
+	if (name == NULL)
+		return NULL;
 	if (name_len > BOX_NAME_MAX) {
-		tnt_raise(ClientError, ER_CREATE_USER,
+		diag_set(ClientError, ER_CREATE_USER,
 			  tt_cstr(name, BOX_INVALID_NAME_MAX),
 			  "user name is too long");
+		return NULL;
 	}
 	size_t size = user_def_sizeof(name_len);
 	/* Use calloc: in case user password is empty, fill it with \0 */
 	struct user_def *user = (struct user_def *) malloc(size);
-	if (user == NULL)
-		tnt_raise(OutOfMemory, size, "malloc", "user");
+	if (user == NULL) {
+		diag_set(OutOfMemory, size, "malloc", "user");
+		return NULL;
+	}
 	auto def_guard = make_scoped_guard([=] { free(user); });
-	user->uid = tuple_field_u32_xc(tuple, BOX_USER_FIELD_ID);
-	user->owner = tuple_field_u32_xc(tuple, BOX_USER_FIELD_UID);
-	const char *user_type =
-		tuple_field_cstr_xc(tuple, BOX_USER_FIELD_TYPE);
-	user->type= schema_object_type(user_type);
+	const char *user_type;
+	if (tuple_field_u32(tuple, BOX_USER_FIELD_ID, &(user->uid)) != 0)
+		return NULL;
+	if (tuple_field_u32(tuple, BOX_USER_FIELD_UID, &(user->owner)) != 0)
+		return NULL;
+	user_type = tuple_field_cstr(tuple, BOX_USER_FIELD_TYPE);
+	if (user_type == NULL)
+		return NULL;
+	user->type = schema_object_type(user_type);
 	memcpy(user->name, name, name_len);
 	user->name[name_len] = 0;
 	if (user->type != SC_ROLE && user->type != SC_USER) {
-		tnt_raise(ClientError, ER_CREATE_USER,
+		diag_set(ClientError, ER_CREATE_USER,
 			  user->name, "unknown user type");
+		return NULL;
 	}
-	identifier_check_xc(user->name, name_len);
+	if (identifier_check(user->name, name_len) != 0)
+		return NULL;
 	/*
 	 * AUTH_DATA field in _user space should contain
 	 * chap-sha1 -> base64_encode(sha1(sha1(password), 0).
@@ -2562,39 +2811,52 @@ user_def_new_from_tuple(struct tuple *tuple)
 		} else {
 			is_auth_empty = false;
 		}
-		if (!is_auth_empty && user->type == SC_ROLE)
-			tnt_raise(ClientError, ER_CREATE_ROLE, user->name,
-				  "authentication data can not be set for a "\
+		if (!is_auth_empty && user->type == SC_ROLE) {
+			diag_set(ClientError, ER_CREATE_ROLE, user->name,
+				 "authentication data can not be set for a "\
 				  "role");
-		user_def_fill_auth_data(user, auth_data);
+			return NULL;
+		}
+		if (user_def_fill_auth_data(user, auth_data) != 0)
+			return NULL;
 	}
 	def_guard.is_active = false;
 	return user;
 }
 
-static void
+static int
 user_cache_remove_user(struct trigger *trigger, void * /* event */)
 {
 	struct tuple *tuple = (struct tuple *)trigger->data;
-	uint32_t uid = tuple_field_u32_xc(tuple, BOX_USER_FIELD_ID);
+	uint32_t uid;
+	if (tuple_field_u32(tuple, BOX_USER_FIELD_ID, &uid) != 0)
+		return -1;
 	user_cache_delete(uid);
+	return 0;
 }
 
-static void
+static int
 user_cache_alter_user(struct trigger *trigger, void * /* event */)
 {
 	struct tuple *tuple = (struct tuple *)trigger->data;
 	struct user_def *user = user_def_new_from_tuple(tuple);
+	if (user == NULL)
+		return -1;
 	auto def_guard = make_scoped_guard([=] { free(user); });
 	/* Can throw if, e.g. too many users. */
-	user_cache_replace(user);
+	try {
+		user_cache_replace(user);
+	} catch (Exception *e) {
+		return -1;
+	}
 	def_guard.is_active = false;
+	return 0;
 }
 
 /**
  * A trigger invoked on replace in the user table.
  */
-static void
+static int
 on_replace_dd_user(struct trigger * /* trigger */, void *event)
 {
 	struct txn *txn = (struct txn *) event;
@@ -2602,40 +2864,60 @@ on_replace_dd_user(struct trigger * /* trigger */, void *event)
 	struct tuple *old_tuple = stmt->old_tuple;
 	struct tuple *new_tuple = stmt->new_tuple;
 
-	uint32_t uid = tuple_field_u32_xc(old_tuple ? old_tuple : new_tuple,
-					  BOX_USER_FIELD_ID);
+	uint32_t uid;
+	if (tuple_field_u32(old_tuple ? old_tuple : new_tuple,
+		BOX_USER_FIELD_ID, &uid) != 0)
+		return -1;
 	struct user *old_user = user_by_id(uid);
 	if (new_tuple != NULL && old_user == NULL) { /* INSERT */
 		struct user_def *user = user_def_new_from_tuple(new_tuple);
-		access_check_ddl(user->name, user->uid, user->owner, user->type,
-				 PRIV_C);
+		if (user == NULL)
+			return -1;
+		if (access_check_ddl(user->name, user->uid, user->owner, user->type,
+				     PRIV_C) != 0)
+			return -1;
 		auto def_guard = make_scoped_guard([=] { free(user); });
-		(void) user_cache_replace(user);
+		try {
+			(void) user_cache_replace(user);
+		} catch (Exception *e) {
+			return -1;
+		}
 		def_guard.is_active = false;
 		struct trigger *on_rollback =
 			txn_alter_trigger_new(user_cache_remove_user, new_tuple);
+		if (on_rollback == NULL)
+			return -1;
 		txn_stmt_on_rollback(stmt, on_rollback);
 	} else if (new_tuple == NULL) { /* DELETE */
-		access_check_ddl(old_user->def->name, old_user->def->uid,
-				 old_user->def->owner, old_user->def->type,
-				 PRIV_D);
+		if (access_check_ddl(old_user->def->name, old_user->def->uid,
+				     old_user->def->owner, old_user->def->type,
+				     PRIV_D) != 0)
+			return -1;
 		/* Can't drop guest or super user */
 		if (uid <= (uint32_t) BOX_SYSTEM_USER_ID_MAX || uid == SUPER) {
-			tnt_raise(ClientError, ER_DROP_USER,
+			diag_set(ClientError, ER_DROP_USER,
 				  old_user->def->name,
 				  "the user or the role is a system");
+			return -1;
 		}
 		/*
 		 * Can only delete user if it has no spaces,
 		 * no functions and no grants.
 		 */
-		if (user_has_data(old_user)) {
-			tnt_raise(ClientError, ER_DROP_USER,
-				  old_user->def->name, "the user has objects");
+		bool has_data;
+		if (user_has_data(old_user, &has_data) != 0) {
+			return -1;
+		}
+		if (has_data) {
+			diag_set(ClientError, ER_DROP_USER,
+				 old_user->def->name, "the user has objects");
+			return -1;
 		}
 		user_cache_delete(uid);
 		struct trigger *on_rollback =
 			txn_alter_trigger_new(user_cache_alter_user, old_tuple);
+		if (on_rollback == NULL)
+			return -1;
 		txn_stmt_on_rollback(stmt, on_rollback);
 	} else { /* UPDATE, REPLACE */
 		assert(old_user != NULL && new_tuple != NULL);
@@ -2645,15 +2927,25 @@ on_replace_dd_user(struct trigger * /* trigger */, void *event)
 		 * correct.
 		 */
 		struct user_def *user = user_def_new_from_tuple(new_tuple);
-		access_check_ddl(user->name, user->uid, user->uid,
-			         old_user->def->type, PRIV_A);
+		if (user == NULL)
+			return -1;
+		if (access_check_ddl(user->name, user->uid, user->uid,
+				     old_user->def->type, PRIV_A) != 0)
+			return -1;
 		auto def_guard = make_scoped_guard([=] { free(user); });
-		user_cache_replace(user);
-		def_guard.is_active = false;
+		try {
+			user_cache_replace(user);
+		} catch (Exception *e) {
+			return -1;
+		}
+		def_guard.is_active = false;
 		struct trigger *on_rollback =
 			txn_alter_trigger_new(user_cache_alter_user, old_tuple);
+		if (on_rollback == NULL)
+			return -1;
 		txn_stmt_on_rollback(stmt, on_rollback);
 	}
+	return 0;
 }
 
 /**
@@ -2663,11 +2955,12 @@ on_replace_dd_user(struct trigger * /* trigger */, void *event)
  * @param[out] fid Function identifier.
  * @param[out] uid Owner identifier.
  */
-static inline void
+static inline int
 func_def_get_ids_from_tuple(struct tuple *tuple, uint32_t *fid, uint32_t *uid)
 {
-	*fid = tuple_field_u32_xc(tuple, BOX_FUNC_FIELD_ID);
-	*uid = tuple_field_u32_xc(tuple, BOX_FUNC_FIELD_UID);
+	if (tuple_field_u32(tuple, BOX_FUNC_FIELD_ID, fid) != 0)
+		return -1;
+	return tuple_field_u32(tuple, BOX_FUNC_FIELD_UID, uid);
 }
 
 /** Create a function definition from tuple. */
@@ -2677,38 +2970,53 @@ func_def_new_from_tuple(struct tuple *tuple)
 	uint32_t field_count = tuple_field_count(tuple);
 	uint32_t name_len, body_len, comment_len;
 	const char *name, *body, *comment;
-	name = tuple_field_str_xc(tuple, BOX_FUNC_FIELD_NAME, &name_len);
+	name = tuple_field_str(tuple, BOX_FUNC_FIELD_NAME, &name_len);
+	if (name == NULL)
+		return NULL;
 	if (name_len > BOX_NAME_MAX) {
-		tnt_raise(ClientError, ER_CREATE_FUNCTION,
+		diag_set(ClientError, ER_CREATE_FUNCTION,
 			  tt_cstr(name, BOX_INVALID_NAME_MAX),
 			  "function name is too long");
+		return NULL;
 	}
-	identifier_check_xc(name, name_len);
+	if (identifier_check(name, name_len) != 0)
+		return NULL;
 	if (field_count > BOX_FUNC_FIELD_BODY) {
-		body = tuple_field_str_xc(tuple, BOX_FUNC_FIELD_BODY,
-					  &body_len);
-		comment = tuple_field_str_xc(tuple, BOX_FUNC_FIELD_COMMENT,
+		body = tuple_field_str(tuple, BOX_FUNC_FIELD_BODY, &body_len);
+		if (body == NULL)
+			return NULL;
+		comment = tuple_field_str(tuple, BOX_FUNC_FIELD_COMMENT,
 					     &comment_len);
+		if (comment == NULL)
+			return NULL;
 		uint32_t len;
-		const char *routine_type = tuple_field_str_xc(tuple,
+		const char *routine_type = tuple_field_str(tuple,
 					BOX_FUNC_FIELD_ROUTINE_TYPE, &len);
+		if (routine_type == NULL)
+			return NULL;
 		if (len != strlen("function") ||
 		    strncasecmp(routine_type, "function", len) != 0) {
-			tnt_raise(ClientError, ER_CREATE_FUNCTION, name,
+			diag_set(ClientError, ER_CREATE_FUNCTION, name,
 				  "unsupported routine_type value");
+			return NULL;
 		}
-		const char *sql_data_access = tuple_field_str_xc(tuple,
+		const char *sql_data_access = tuple_field_str(tuple,
 					BOX_FUNC_FIELD_SQL_DATA_ACCESS, &len);
+		if (sql_data_access == NULL)
+			return NULL;
 		if (len != strlen("none") ||
 		    strncasecmp(sql_data_access, "none", len) != 0) {
-			tnt_raise(ClientError, ER_CREATE_FUNCTION, name,
+			diag_set(ClientError, ER_CREATE_FUNCTION, name,
 				  "unsupported sql_data_access value");
+			return NULL;
 		}
-		bool is_null_call = tuple_field_bool_xc(tuple,
-						BOX_FUNC_FIELD_IS_NULL_CALL);
+		bool is_null_call;
+		if (tuple_field_bool(tuple, BOX_FUNC_FIELD_IS_NULL_CALL, &is_null_call) != 0)
+			return NULL;
 		if (is_null_call != true) {
-			tnt_raise(ClientError, ER_CREATE_FUNCTION, name,
+			diag_set(ClientError, ER_CREATE_FUNCTION, name,
 				  "unsupported is_null_call value");
+			return NULL;
 		}
 	} else {
 		body = NULL;
@@ -2720,13 +3028,17 @@ func_def_new_from_tuple(struct tuple *tuple)
 	uint32_t def_sz = func_def_sizeof(name_len, body_len, comment_len,
 					  &body_offset, &comment_offset);
 	struct func_def *def = (struct func_def *) malloc(def_sz);
-	if (def == NULL)
-		tnt_raise(OutOfMemory, def_sz, "malloc", "def");
+	if (def == NULL) {
+		diag_set(OutOfMemory, def_sz, "malloc", "def");
+		return NULL;
+	}
 	auto def_guard = make_scoped_guard([=] { free(def); });
-	func_def_get_ids_from_tuple(tuple, &def->fid, &def->uid);
+	if (func_def_get_ids_from_tuple(tuple, &def->fid, &def->uid) != 0)
+		return NULL;
 	if (def->fid > BOX_FUNCTION_MAX) {
-		tnt_raise(ClientError, ER_CREATE_FUNCTION,
+		diag_set(ClientError, ER_CREATE_FUNCTION,
 			  tt_cstr(name, name_len), "function id is too big");
+		return NULL;
 	}
 	func_opts_create(&def->opts);
 	memcpy(def->name, name, name_len);
@@ -2746,47 +3058,59 @@ func_def_new_from_tuple(struct tuple *tuple)
 	} else {
 		def->comment = NULL;
 	}
-	if (field_count > BOX_FUNC_FIELD_SETUID)
-		def->setuid = tuple_field_u32_xc(tuple, BOX_FUNC_FIELD_SETUID);
-	else
+	if (field_count > BOX_FUNC_FIELD_SETUID) {
+		uint32_t out;
+		if (tuple_field_u32(tuple, BOX_FUNC_FIELD_SETUID, &out) != 0)
+			return NULL;
+		def->setuid = out;
+	} else {
 		def->setuid = false;
+	}
 	if (field_count > BOX_FUNC_FIELD_LANGUAGE) {
 		const char *language =
-			tuple_field_cstr_xc(tuple, BOX_FUNC_FIELD_LANGUAGE);
+			tuple_field_cstr(tuple, BOX_FUNC_FIELD_LANGUAGE);
+		if (language == NULL)
+			return NULL;
 		def->language = STR2ENUM(func_language, language);
 		if (def->language == func_language_MAX ||
 		    def->language == FUNC_LANGUAGE_SQL) {
-			tnt_raise(ClientError, ER_FUNCTION_LANGUAGE,
+			diag_set(ClientError, ER_FUNCTION_LANGUAGE,
 				  language, def->name);
+			return NULL;
 		}
 	} else {
 		/* Lua is the default. */
 		def->language = FUNC_LANGUAGE_LUA;
 	}
 	if (field_count > BOX_FUNC_FIELD_BODY) {
-		def->is_deterministic =
-			tuple_field_bool_xc(tuple,
-					    BOX_FUNC_FIELD_IS_DETERMINISTIC);
-		def->is_sandboxed =
-			tuple_field_bool_xc(tuple,
-					    BOX_FUNC_FIELD_IS_SANDBOXED);
+		if (tuple_field_bool(tuple,BOX_FUNC_FIELD_IS_DETERMINISTIC,
+			&(def->is_deterministic)) != 0)
+			return NULL;
+		 if (tuple_field_bool(tuple,BOX_FUNC_FIELD_IS_SANDBOXED,
+		 	&(def->is_sandboxed)) != 0)
+			 return NULL;
 		const char *returns =
-			tuple_field_cstr_xc(tuple, BOX_FUNC_FIELD_RETURNS);
+			tuple_field_cstr(tuple, BOX_FUNC_FIELD_RETURNS);
+		if (returns == NULL)
+			return NULL;
 		def->returns = STR2ENUM(field_type, returns);
 		if (def->returns == field_type_MAX) {
-			tnt_raise(ClientError, ER_CREATE_FUNCTION,
+			diag_set(ClientError, ER_CREATE_FUNCTION,
 				  def->name, "invalid returns value");
+			return NULL;
 		}
 		def->exports.all = 0;
-		const char *exports =
-			tuple_field_with_type_xc(tuple, BOX_FUNC_FIELD_EXPORTS,
-						 MP_ARRAY);
+		const char *exports = tuple_field_with_type(tuple,
+			BOX_FUNC_FIELD_EXPORTS, MP_ARRAY);
+		if (exports == NULL)
+			return NULL;
 		uint32_t cnt = mp_decode_array(&exports);
 		for (uint32_t i = 0; i < cnt; i++) {
 			 if (mp_typeof(*exports) != MP_STR) {
-				tnt_raise(ClientError, ER_FIELD_TYPE,
+				diag_set(ClientError, ER_FIELD_TYPE,
 					  int2str(BOX_FUNC_FIELD_EXPORTS + 1),
 					  mp_type_strs[MP_STR]);
+				 return NULL;
 			}
 			uint32_t len;
 			const char *str = mp_decode_str(&exports, &len);
@@ -2798,32 +3122,39 @@ func_def_new_from_tuple(struct tuple *tuple)
 				def->exports.sql = true;
 				break;
 			default:
-				tnt_raise(ClientError, ER_CREATE_FUNCTION,
+				diag_set(ClientError, ER_CREATE_FUNCTION,
 					  def->name, "invalid exports value");
+				return NULL;
 			}
 		}
 		const char *aggregate =
-			tuple_field_cstr_xc(tuple, BOX_FUNC_FIELD_AGGREGATE);
+			tuple_field_cstr(tuple, BOX_FUNC_FIELD_AGGREGATE);
+		if (aggregate == NULL)
+			return NULL;
 		def->aggregate = STR2ENUM(func_aggregate, aggregate);
 		if (def->aggregate == func_aggregate_MAX) {
-			tnt_raise(ClientError, ER_CREATE_FUNCTION,
+			diag_set(ClientError, ER_CREATE_FUNCTION,
 				  def->name, "invalid aggregate value");
+			return NULL;
 		}
-		const char *param_list =
-			tuple_field_with_type_xc(tuple,
+		const char *param_list = tuple_field_with_type(tuple,
 					BOX_FUNC_FIELD_PARAM_LIST, MP_ARRAY);
+		if (param_list == NULL)
+			return NULL;
 		uint32_t argc = mp_decode_array(&param_list);
 		for (uint32_t i = 0; i < argc; i++) {
 			 if (mp_typeof(*param_list) != MP_STR) {
-				tnt_raise(ClientError, ER_FIELD_TYPE,
+				diag_set(ClientError, ER_FIELD_TYPE,
 					  int2str(BOX_FUNC_FIELD_PARAM_LIST + 1),
 					  mp_type_strs[MP_STR]);
+				 return NULL;
 			}
 			uint32_t len;
 			const char *str = mp_decode_str(&param_list, &len);
 			if (STRN2ENUM(field_type, str, len) == field_type_MAX) {
-				tnt_raise(ClientError, ER_CREATE_FUNCTION,
+				diag_set(ClientError, ER_CREATE_FUNCTION,
 					  def->name, "invalid argument type");
+				return NULL;
 			}
 		}
 		def->param_count = argc;
@@ -2831,7 +3162,7 @@ func_def_new_from_tuple(struct tuple *tuple)
 		if (opts_decode(&def->opts, func_opts_reg, &opts,
 				ER_WRONG_SPACE_OPTIONS, BOX_FUNC_FIELD_OPTS,
 				NULL) != 0)
-			diag_raise();
+			return NULL;
 	} else {
 		def->is_deterministic = false;
 		def->is_sandboxed = false;
@@ -2843,43 +3174,48 @@ func_def_new_from_tuple(struct tuple *tuple)
 		def->param_count = 0;
 	}
 	if (func_def_check(def) != 0)
-		diag_raise();
+		return NULL;
 	def_guard.is_active = false;
 	return def;
 }
 
-static void
+static int
 on_create_func_rollback(struct trigger *trigger, void * /* event */)
 {
 	/* Remove the new function from the cache and delete it. */
 	struct func *func = (struct func *)trigger->data;
 	func_cache_delete(func->def->fid);
-	trigger_run_xc(&on_alter_func, func);
+	if (trigger_run(&on_alter_func, func) != 0)
+		return -1;
 	func_delete(func);
+	return 0;
 }
 
-static void
+static int
 on_drop_func_commit(struct trigger *trigger, void * /* event */)
 {
 	/* Delete the old function. */
 	struct func *func = (struct func *)trigger->data;
 	func_delete(func);
+	return 0;
 }
 
-static void
+static int
 on_drop_func_rollback(struct trigger *trigger, void * /* event */)
 {
 	/* Insert the old function back into the cache. */
 	struct func *func = (struct func *)trigger->data;
 	func_cache_insert(func);
-	trigger_run_xc(&on_alter_func, func);
+	if (trigger_run(&on_alter_func, func) != 0)
+		return -1;
+	return 0;
 }
 
 /**
  * A trigger invoked on replace in a space containing
  * functions on which there were defined any grants.
  */
-static void
+static int
 on_replace_dd_func(struct trigger * /* trigger */, void *event)
 {
 	struct txn *txn = (struct txn *) event;
@@ -2887,53 +3223,74 @@ on_replace_dd_func(struct trigger * /* trigger */, void *event)
 	struct tuple *old_tuple = stmt->old_tuple;
 	struct tuple *new_tuple = stmt->new_tuple;
 
-	uint32_t fid = tuple_field_u32_xc(old_tuple ? old_tuple : new_tuple,
-					  BOX_FUNC_FIELD_ID);
+	uint32_t fid;
+	if (tuple_field_u32(old_tuple ? old_tuple : new_tuple,
+		BOX_FUNC_FIELD_ID, &fid) != 0)
+		return -1;
 	struct func *old_func = func_by_id(fid);
 	if (new_tuple != NULL && old_func == NULL) { /* INSERT */
-		struct func_def *def = func_def_new_from_tuple(new_tuple);
+		struct func_def *def;
+		def = func_def_new_from_tuple(new_tuple);
+		if (def == NULL)
+			return -1;
 		auto def_guard = make_scoped_guard([=] { free(def); });
-		access_check_ddl(def->name, def->fid, def->uid, SC_FUNCTION,
-				 PRIV_C);
+		if (access_check_ddl(def->name, def->fid, def->uid, SC_FUNCTION,
+				     PRIV_C) != 0)
+			return -1;
 		struct trigger *on_rollback =
 			txn_alter_trigger_new(on_create_func_rollback, NULL);
+		if (on_rollback == NULL)
+			return -1;
 		struct func *func = func_new(def);
 		if (func == NULL)
-			diag_raise();
+			return -1;
 		def_guard.is_active = false;
 		func_cache_insert(func);
 		on_rollback->data = func;
 		txn_stmt_on_rollback(stmt, on_rollback);
-		trigger_run_xc(&on_alter_func, func);
+		if (trigger_run(&on_alter_func, func) != 0)
+			return -1;
 	} else if (new_tuple == NULL) {         /* DELETE */
 		uint32_t uid;
-		func_def_get_ids_from_tuple(old_tuple, &fid, &uid);
+		if (func_def_get_ids_from_tuple(old_tuple, &fid, &uid) != 0)
+			return -1;
 		/*
 		 * Can only delete func if you're the one
 		 * who created it or a superuser.
 		 */
-		access_check_ddl(old_func->def->name, fid, uid, SC_FUNCTION,
-				 PRIV_D);
+		if (access_check_ddl(old_func->def->name, fid, uid, SC_FUNCTION,
+				     PRIV_D) != 0)
+			return -1;
 		/* Can only delete func if it has no grants. */
-		if (schema_find_grants("function", old_func->def->fid)) {
-			tnt_raise(ClientError, ER_DROP_FUNCTION,
-				  (unsigned) old_func->def->uid,
-				  "function has grants");
+		bool out;
+		if (schema_find_grants("function", old_func->def->fid, &out) != 0) {
+			return -1;
+		}
+		if (out) {
+			diag_set(ClientError, ER_DROP_FUNCTION,
+				 (unsigned) old_func->def->uid,
+				 "function has grants");
+			return -1;
 		}
-		if (old_func != NULL &&
-		    space_has_data(BOX_FUNC_INDEX_ID, 1, old_func->def->fid)) {
-			tnt_raise(ClientError, ER_DROP_FUNCTION,
+		if (space_has_data(BOX_FUNC_INDEX_ID, 1, old_func->def->fid, &out) != 0)
+			return -1;
+		if (old_func != NULL && out) {
+			diag_set(ClientError, ER_DROP_FUNCTION,
 				  (unsigned) old_func->def->uid,
 				  "function has references");
+			return -1;
 		}
 		struct trigger *on_commit =
 			txn_alter_trigger_new(on_drop_func_commit, old_func);
 		struct trigger *on_rollback =
 			txn_alter_trigger_new(on_drop_func_rollback, old_func);
+		if (on_commit == NULL || on_rollback == NULL)
+			return -1;
 		func_cache_delete(old_func->def->fid);
 		txn_stmt_on_commit(stmt, on_commit);
 		txn_stmt_on_rollback(stmt, on_rollback);
-		trigger_run_xc(&on_alter_func, old_func);
+		if (trigger_run(&on_alter_func, old_func) != 0)
+			return -1;
 	} else {                                /* UPDATE, REPLACE */
 		assert(new_tuple != NULL && old_tuple != NULL);
 		/**
@@ -2947,120 +3304,152 @@ on_replace_dd_func(struct trigger * /* trigger */, void *event)
 		});
 		old_def = func_def_new_from_tuple(old_tuple);
 		new_def = func_def_new_from_tuple(new_tuple);
+		if (old_def == NULL || new_def == NULL)
+			return -1;
 		if (func_def_cmp(new_def, old_def) != 0) {
-			tnt_raise(ClientError, ER_UNSUPPORTED, "function",
+			diag_set(ClientError, ER_UNSUPPORTED, "function",
 				  "alter");
+			return -1;
 		}
 	}
+	return 0;
 }
 
 /** Create a collation identifier definition from tuple. */
-void
+int
 coll_id_def_new_from_tuple(struct tuple *tuple, struct coll_id_def *def)
 {
 	memset(def, 0, sizeof(*def));
 	uint32_t name_len, locale_len, type_len;
-	def->id = tuple_field_u32_xc(tuple, BOX_COLLATION_FIELD_ID);
-	def->name = tuple_field_str_xc(tuple, BOX_COLLATION_FIELD_NAME, &name_len);
+	if (tuple_field_u32(tuple, BOX_COLLATION_FIELD_ID, &(def->id)) != 0)
+		return -1;
+	def->name = tuple_field_str(tuple, BOX_COLLATION_FIELD_NAME, &name_len);
+	if (def->name == NULL)
+		return -1;
 	def->name_len = name_len;
-	if (name_len > BOX_NAME_MAX)
-		tnt_raise(ClientError, ER_CANT_CREATE_COLLATION,
-			  "collation name is too long");
-	identifier_check_xc(def->name, name_len);
-
-	def->owner_id = tuple_field_u32_xc(tuple, BOX_COLLATION_FIELD_UID);
-	struct coll_def *base = &def->base;
-	const char *type = tuple_field_str_xc(tuple, BOX_COLLATION_FIELD_TYPE,
-					      &type_len);
+	if (name_len > BOX_NAME_MAX) {
+		diag_set(ClientError, ER_CANT_CREATE_COLLATION,
+			 "collation name is too long");
+		return -1;
+	}
+	struct coll_def *base;
+	const char *type;
+	if (identifier_check(def->name, name_len) != 0)
+		return -1;
+	if (tuple_field_u32(tuple, BOX_COLLATION_FIELD_UID, &(def->owner_id)) != 0)
+		return -1;
+	base = &def->base;
+	type = tuple_field_str(tuple, BOX_COLLATION_FIELD_TYPE,
+						      &type_len);
+	if (type == NULL)
+		return -1;
+
 	base->type = STRN2ENUM(coll_type, type, type_len);
-	if (base->type == coll_type_MAX)
-		tnt_raise(ClientError, ER_CANT_CREATE_COLLATION,
-			  "unknown collation type");
-	const char *locale =
-		tuple_field_str_xc(tuple, BOX_COLLATION_FIELD_LOCALE,
-				   &locale_len);
-	if (locale_len > COLL_LOCALE_LEN_MAX)
-		tnt_raise(ClientError, ER_CANT_CREATE_COLLATION,
-			  "collation locale is too long");
+	if (base->type == coll_type_MAX) {
+		diag_set(ClientError, ER_CANT_CREATE_COLLATION,
+			 "unknown collation type");
+		return -1;
+	}
+	const char *locale = tuple_field_str(tuple, BOX_COLLATION_FIELD_LOCALE,
+					    &locale_len);
+	if (locale == NULL)
+		return -1;
+	if (locale_len > COLL_LOCALE_LEN_MAX) {
+		diag_set(ClientError, ER_CANT_CREATE_COLLATION,
+			 "collation locale is too long");
+		return -1;
+	}
+	const char *options;
 	if (locale_len > 0)
-		identifier_check_xc(locale, locale_len);
+		if (identifier_check(locale, locale_len) != 0)
+			return -1;
 	snprintf(base->locale, sizeof(base->locale), "%.*s", locale_len,
 		 locale);
-	const char *options =
-		tuple_field_with_type_xc(tuple, BOX_COLLATION_FIELD_OPTIONS,
-					 MP_MAP);
-
+	options = tuple_field_with_type(tuple,
+					   BOX_COLLATION_FIELD_OPTIONS,MP_MAP);
+	if (options == NULL)
+		return -1;
 	if (opts_decode(&base->icu, coll_icu_opts_reg, &options,
 			ER_WRONG_COLLATION_OPTIONS,
-			BOX_COLLATION_FIELD_OPTIONS, NULL) != 0)
-		diag_raise();
-
+			BOX_COLLATION_FIELD_OPTIONS, NULL) != 0) {
+		return -1;
+	}
 	if (base->icu.french_collation == coll_icu_on_off_MAX) {
-		tnt_raise(ClientError, ER_CANT_CREATE_COLLATION,
+		diag_set(ClientError, ER_CANT_CREATE_COLLATION,
 			  "ICU wrong french_collation option setting, "
 				  "expected ON | OFF");
+		return -1;
 	}
 
 	if (base->icu.alternate_handling == coll_icu_alternate_handling_MAX) {
-		tnt_raise(ClientError, ER_CANT_CREATE_COLLATION,
+		diag_set(ClientError, ER_CANT_CREATE_COLLATION,
 			  "ICU wrong alternate_handling option setting, "
 				  "expected NON_IGNORABLE | SHIFTED");
+		return -1;
 	}
 
 	if (base->icu.case_first == coll_icu_case_first_MAX) {
-		tnt_raise(ClientError, ER_CANT_CREATE_COLLATION,
+		diag_set(ClientError, ER_CANT_CREATE_COLLATION,
 			  "ICU wrong case_first option setting, "
 				  "expected OFF | UPPER_FIRST | LOWER_FIRST");
+		return -1;
 	}
 
 	if (base->icu.case_level == coll_icu_on_off_MAX) {
-		tnt_raise(ClientError, ER_CANT_CREATE_COLLATION,
+		diag_set(ClientError, ER_CANT_CREATE_COLLATION,
 			  "ICU wrong case_level option setting, "
 				  "expected ON | OFF");
+		return -1;
 	}
 
 	if (base->icu.normalization_mode == coll_icu_on_off_MAX) {
-		tnt_raise(ClientError, ER_CANT_CREATE_COLLATION,
+		diag_set(ClientError, ER_CANT_CREATE_COLLATION,
 			  "ICU wrong normalization_mode option setting, "
 				  "expected ON | OFF");
+		return -1;
 	}
 
 	if (base->icu.strength == coll_icu_strength_MAX) {
-		tnt_raise(ClientError, ER_CANT_CREATE_COLLATION,
+		diag_set(ClientError, ER_CANT_CREATE_COLLATION,
 			  "ICU wrong strength option setting, "
 				  "expected PRIMARY | SECONDARY | "
 				  "TERTIARY | QUATERNARY | IDENTICAL");
+		return -1;
 	}
 
 	if (base->icu.numeric_collation == coll_icu_on_off_MAX) {
-		tnt_raise(ClientError, ER_CANT_CREATE_COLLATION,
+		diag_set(ClientError, ER_CANT_CREATE_COLLATION,
 			  "ICU wrong numeric_collation option setting, "
 				  "expected ON | OFF");
+		return -1;
 	}
+	return 0;
 }
 
 /** Delete the new collation identifier. */
-static void
+static int
 on_create_collation_rollback(struct trigger *trigger, void *event)
 {
 	(void) event;
 	struct coll_id *coll_id = (struct coll_id *) trigger->data;
 	coll_id_cache_delete(coll_id);
 	coll_id_delete(coll_id);
+	return 0;
 }
 
 
 /** Free a deleted collation identifier on commit. */
-static void
+static int
 on_drop_collation_commit(struct trigger *trigger, void *event)
 {
 	(void) event;
 	struct coll_id *coll_id = (struct coll_id *) trigger->data;
 	coll_id_delete(coll_id);
+	return 0;
 }
 
 /** Put the collation identifier back on rollback. */
-static void
+static int
 on_drop_collation_rollback(struct trigger *trigger, void *event)
 {
 	(void) event;
@@ -3069,13 +3458,14 @@ on_drop_collation_rollback(struct trigger *trigger, void *event)
 	if (coll_id_cache_replace(coll_id, &replaced_id) != 0)
 		panic("Out of memory on insertion into collation cache");
 	assert(replaced_id == NULL);
+	return 0;
 }
 
 /**
  * A trigger invoked on replace in a space containing
  * collations that a user defined.
  */
-static void
+static int
 on_replace_dd_collation(struct trigger * /* trigger */, void *event)
 {
 	struct txn *txn = (struct txn *) event;
@@ -3088,12 +3478,16 @@ on_replace_dd_collation(struct trigger * /* trigger */, void *event)
 			txn_alter_trigger_new(on_drop_collation_commit, NULL);
 		struct trigger *on_rollback =
 			txn_alter_trigger_new(on_drop_collation_rollback, NULL);
+		if (on_commit == NULL || on_rollback == NULL)
+			return -1;
 		/*
 		 * TODO: Check that no index uses the collation
 		 * identifier.
 		 */
-		int32_t old_id = tuple_field_u32_xc(old_tuple,
-						    BOX_COLLATION_FIELD_ID);
+		uint32_t out;
+		if (tuple_field_u32(old_tuple, BOX_COLLATION_FIELD_ID, &out) != 0)
+			return -1;
+		int32_t old_id = out;
 		/*
 		 * Don't allow user to drop "none" collation
 		 * since it is very special and vastly used
@@ -3101,14 +3495,16 @@ on_replace_dd_collation(struct trigger * /* trigger */, void *event)
 		 * fact that "none" collation features id == 0.
 		 */
 		if (old_id == COLL_NONE) {
-			tnt_raise(ClientError, ER_DROP_COLLATION, "none",
+			diag_set(ClientError, ER_DROP_COLLATION, "none",
 				  "system collation");
+			return -1;
 		}
 		struct coll_id *old_coll_id = coll_by_id(old_id);
 		assert(old_coll_id != NULL);
-		access_check_ddl(old_coll_id->name, old_coll_id->id,
-				 old_coll_id->owner_id, SC_COLLATION,
-				 PRIV_D);
+		if (access_check_ddl(old_coll_id->name, old_coll_id->id,
+				     old_coll_id->owner_id, SC_COLLATION,
+				     PRIV_D) != 0)
+			return -1;
 		/*
 		 * Set on_commit/on_rollback triggers after
 		 * deletion from the cache to make trigger logic
@@ -3123,17 +3519,21 @@ on_replace_dd_collation(struct trigger * /* trigger */, void *event)
 		/* INSERT */
 		struct trigger *on_rollback =
 			txn_alter_trigger_new(on_create_collation_rollback, NULL);
+		if (on_rollback == NULL)
+			return -1;
 		struct coll_id_def new_def;
-		coll_id_def_new_from_tuple(new_tuple, &new_def);
-		access_check_ddl(new_def.name, new_def.id, new_def.owner_id,
-				 SC_COLLATION, PRIV_C);
+		if (coll_id_def_new_from_tuple(new_tuple, &new_def) != 0)
+			return -1;
+		if (access_check_ddl(new_def.name, new_def.id, new_def.owner_id,
+				     SC_COLLATION, PRIV_C) != 0)
+			return -1;
 		struct coll_id *new_coll_id = coll_id_new(&new_def);
 		if (new_coll_id == NULL)
-			diag_raise();
+			return -1;
 		struct coll_id *replaced_id;
 		if (coll_id_cache_replace(new_coll_id, &replaced_id) != 0) {
 			coll_id_delete(new_coll_id);
-			diag_raise();
+			return -1;
 		}
 		assert(replaced_id == NULL);
 		on_rollback->data = new_coll_id;
@@ -3141,27 +3541,34 @@ on_replace_dd_collation(struct trigger * /* trigger */, void *event)
 	} else {
 		/* UPDATE */
 		assert(new_tuple != NULL && old_tuple != NULL);
-		tnt_raise(ClientError, ER_UNSUPPORTED, "collation", "alter");
+		diag_set(ClientError, ER_UNSUPPORTED, "collation", "alter");
+		return -1;
 	}
+	return 0;
 }
 
 /**
  * Create a privilege definition from tuple.
  */
-void
+int
 priv_def_create_from_tuple(struct priv_def *priv, struct tuple *tuple)
 {
-	priv->grantor_id = tuple_field_u32_xc(tuple, BOX_PRIV_FIELD_ID);
-	priv->grantee_id = tuple_field_u32_xc(tuple, BOX_PRIV_FIELD_UID);
+	if (tuple_field_u32(tuple, BOX_PRIV_FIELD_ID, &(priv->grantor_id)) != 0)
+		return -1;
+	if (tuple_field_u32(tuple, BOX_PRIV_FIELD_UID, &(priv->grantee_id)) != 0)
+		return -1;
 
 	const char *object_type =
-		tuple_field_cstr_xc(tuple, BOX_PRIV_FIELD_OBJECT_TYPE);
+		tuple_field_cstr(tuple, BOX_PRIV_FIELD_OBJECT_TYPE);
+	if (object_type == NULL)
+		return -1;
 	priv->object_type = schema_object_type(object_type);
 
 	const char *data = tuple_field(tuple, BOX_PRIV_FIELD_OBJECT_ID);
 	if (data == NULL) {
-		tnt_raise(ClientError, ER_NO_SUCH_FIELD_NO,
+		diag_set(ClientError, ER_NO_SUCH_FIELD_NO,
 			  BOX_PRIV_FIELD_OBJECT_ID + TUPLE_INDEX_BASE);
+		return -1;
 	}
 	/*
 	 * When granting or revoking privileges on a whole entity
@@ -3179,14 +3586,19 @@ priv_def_create_from_tuple(struct priv_def *priv, struct tuple *tuple)
 		}
 		FALLTHROUGH;
 	default:
-		priv->object_id = tuple_field_u32_xc(tuple,
-						     BOX_PRIV_FIELD_OBJECT_ID);
+		if (tuple_field_u32(tuple,BOX_PRIV_FIELD_OBJECT_ID, &(priv->object_id)) != 0)
+			return -1;
 	}
 	if (priv->object_type == SC_UNKNOWN) {
-		tnt_raise(ClientError, ER_UNKNOWN_SCHEMA_OBJECT,
+		diag_set(ClientError, ER_UNKNOWN_SCHEMA_OBJECT,
 			  object_type);
+		return -1;
 	}
-	priv->access = tuple_field_u32_xc(tuple, BOX_PRIV_FIELD_ACCESS);
+	uint32_t out;
+	if (tuple_field_u32(tuple, BOX_PRIV_FIELD_ACCESS, &out) != 0)
+		return -1;
+	priv->access = out;
+	return 0;
 }
 
 /*
@@ -3199,183 +3611,214 @@ priv_def_create_from_tuple(struct priv_def *priv, struct tuple *tuple)
  * object can be changed during WAL write.
  * In the future we must protect grant/revoke with a logical lock.
  */
-static void
+static int
 priv_def_check(struct priv_def *priv, enum priv_type priv_type)
 {
-	struct user *grantor = user_find_xc(priv->grantor_id);
+	struct user *grantor = user_find(priv->grantor_id);
+	if (grantor == NULL)
+		return -1;
 	/* May be a role */
 	struct user *grantee = user_by_id(priv->grantee_id);
 	if (grantee == NULL) {
-		tnt_raise(ClientError, ER_NO_SUCH_USER,
+		diag_set(ClientError, ER_NO_SUCH_USER,
 			  int2str(priv->grantee_id));
+		return -1;
 	}
 	const char *name = schema_find_name(priv->object_type, priv->object_id);
-	access_check_ddl(name, priv->object_id, grantor->def->uid,
-			 priv->object_type, priv_type);
+	if (access_check_ddl(name, priv->object_id, grantor->def->uid,
+			 priv->object_type, priv_type) != 0)
+		return -1;
 	switch (priv->object_type) {
-	case SC_UNIVERSE:
-		if (grantor->def->uid != ADMIN) {
-			tnt_raise(AccessDeniedError,
-				  priv_name(priv_type),
-				  schema_object_name(SC_UNIVERSE),
-				  name,
-				  grantor->def->name);
-		}
-		break;
-	case SC_SPACE:
-	{
-		struct space *space = space_cache_find_xc(priv->object_id);
-		if (space->def->uid != grantor->def->uid &&
-		    grantor->def->uid != ADMIN) {
-			tnt_raise(AccessDeniedError,
-				  priv_name(priv_type),
-				  schema_object_name(SC_SPACE), name,
-				  grantor->def->name);
-		}
-		break;
-	}
-	case SC_FUNCTION:
-	{
-		struct func *func = func_cache_find(priv->object_id);
-		if (func->def->uid != grantor->def->uid &&
-		    grantor->def->uid != ADMIN) {
-			tnt_raise(AccessDeniedError,
-				  priv_name(priv_type),
-				  schema_object_name(SC_FUNCTION), name,
-				  grantor->def->name);
-		}
-		break;
-	}
-	case SC_SEQUENCE:
-	{
-		struct sequence *seq = sequence_cache_find(priv->object_id);
-		if (seq->def->uid != grantor->def->uid &&
-		    grantor->def->uid != ADMIN) {
-			tnt_raise(AccessDeniedError,
-				  priv_name(priv_type),
-				  schema_object_name(SC_SEQUENCE), name,
-				  grantor->def->name);
+		case SC_UNIVERSE:
+			if (grantor->def->uid != ADMIN) {
+				diag_set(AccessDeniedError,
+					 priv_name(priv_type),
+					 schema_object_name(SC_UNIVERSE),
+					 name,
+					 grantor->def->name);
+				return -1;
+			}
+			break;
+		case SC_SPACE: {
+			struct space *space = space_cache_find(priv->object_id);
+			if (space == NULL)
+				return -1;
+			if (space->def->uid != grantor->def->uid &&
+			    grantor->def->uid != ADMIN) {
+				diag_set(AccessDeniedError,
+					 priv_name(priv_type),
+					 schema_object_name(SC_SPACE), name,
+					 grantor->def->name);
+				return -1;
+			}
+			break;
 		}
-		break;
-	}
-	case SC_ROLE:
-	{
-		struct user *role = user_by_id(priv->object_id);
-		if (role == NULL || role->def->type != SC_ROLE) {
-			tnt_raise(ClientError, ER_NO_SUCH_ROLE,
-				  role ? role->def->name :
-				  int2str(priv->object_id));
+		case SC_FUNCTION: {
+			struct func *func = func_by_id(priv->object_id);
+			if (func == NULL) {
+				diag_set(ClientError, ER_NO_SUCH_FUNCTION, int2str(priv->object_id));
+				return -1;
+			}
+			if (func->def->uid != grantor->def->uid &&
+			    grantor->def->uid != ADMIN) {
+				diag_set(AccessDeniedError,
+					 priv_name(priv_type),
+					 schema_object_name(SC_FUNCTION), name,
+					 grantor->def->name);
+				return -1;
+			}
+			break;
 		}
-		/*
-		 * Only the creator of the role can grant or revoke it.
-		 * Everyone can grant 'PUBLIC' role.
-		 */
-		if (role->def->owner != grantor->def->uid &&
-		    grantor->def->uid != ADMIN &&
-		    (role->def->uid != PUBLIC || priv->access != PRIV_X)) {
-			tnt_raise(AccessDeniedError,
-				  priv_name(priv_type),
-				  schema_object_name(SC_ROLE), name,
-				  grantor->def->name);
+		case SC_SEQUENCE: {
+			struct sequence *seq = sequence_by_id(priv->object_id);
+			if (seq == NULL) {
+				diag_set(ClientError, ER_NO_SUCH_SEQUENCE, int2str(priv->object_id));
+				return -1;
+			}
+			if (seq->def->uid != grantor->def->uid &&
+			    grantor->def->uid != ADMIN) {
+				diag_set(AccessDeniedError,
+					 priv_name(priv_type),
+					 schema_object_name(SC_SEQUENCE), name,
+					 grantor->def->name);
+				return -1;
+			}
+			break;
 		}
-		/* Not necessary to do during revoke, but who cares. */
-		role_check(grantee, role);
-		break;
-	}
-	case SC_USER:
-	{
-		struct user *user = user_by_id(priv->object_id);
-		if (user == NULL || user->def->type != SC_USER) {
-			tnt_raise(ClientError, ER_NO_SUCH_USER,
-				  user ? user->def->name :
-				  int2str(priv->object_id));
+		case SC_ROLE: {
+			struct user *role = user_by_id(priv->object_id);
+			if (role == NULL || role->def->type != SC_ROLE) {
+				diag_set(ClientError, ER_NO_SUCH_ROLE,
+					 role ? role->def->name :
+					 int2str(priv->object_id));
+				return -1;
+			}
+			/*
+			 * Only the creator of the role can grant or revoke it.
+			 * Everyone can grant 'PUBLIC' role.
+			 */
+			if (role->def->owner != grantor->def->uid &&
+			    grantor->def->uid != ADMIN &&
+			    (role->def->uid != PUBLIC || priv->access != PRIV_X)) {
+				diag_set(AccessDeniedError,
+					 priv_name(priv_type),
+					 schema_object_name(SC_ROLE), name,
+					 grantor->def->name);
+				return -1;
+			}
+			/* Not necessary to do during revoke, but who cares. */
+			if (role_check(grantee, role) != 0)
+				return -1;
+			break;
 		}
-		if (user->def->owner != grantor->def->uid &&
-		    grantor->def->uid != ADMIN) {
-			tnt_raise(AccessDeniedError,
-				  priv_name(priv_type),
-				  schema_object_name(SC_USER), name,
-				  grantor->def->name);
+		case SC_USER: {
+			struct user *user = user_by_id(priv->object_id);
+			if (user == NULL || user->def->type != SC_USER) {
+				diag_set(ClientError, ER_NO_SUCH_USER,
+					 user ? user->def->name :
+					 int2str(priv->object_id));
+				return -1;
+			}
+			if (user->def->owner != grantor->def->uid &&
+			    grantor->def->uid != ADMIN) {
+				diag_set(AccessDeniedError,
+					 priv_name(priv_type),
+					 schema_object_name(SC_USER), name,
+					 grantor->def->name);
+				return -1;
+			}
+			break;
 		}
-		break;
-	}
-	case SC_ENTITY_SPACE:
-	case SC_ENTITY_FUNCTION:
-	case SC_ENTITY_SEQUENCE:
-	case SC_ENTITY_ROLE:
-	case SC_ENTITY_USER:
-	{
-		/* Only admin may grant privileges on an entire entity. */
-		if (grantor->def->uid != ADMIN) {
-			tnt_raise(AccessDeniedError, priv_name(priv_type),
-				  schema_object_name(priv->object_type), name,
-				  grantor->def->name);
+		case SC_ENTITY_SPACE:
+		case SC_ENTITY_FUNCTION:
+		case SC_ENTITY_SEQUENCE:
+		case SC_ENTITY_ROLE:
+		case SC_ENTITY_USER: {
+			/* Only admin may grant privileges on an entire entity. */
+			if (grantor->def->uid != ADMIN) {
+				diag_set(AccessDeniedError, priv_name(priv_type),
+					 schema_object_name(priv->object_type), name,
+					 grantor->def->name);
+				return -1;
+			}
 		}
-	}
-	default:
-		break;
+		default:
+			break;
 	}
 	if (priv->access == 0) {
-		tnt_raise(ClientError, ER_GRANT,
+		diag_set(ClientError, ER_GRANT,
 			  "the grant tuple has no privileges");
+		return -1;
 	}
+	return 0;
 }
 
 /**
  * Update a metadata cache object with the new access
  * data.
  */
-static void
+static int
 grant_or_revoke(struct priv_def *priv)
 {
 	struct user *grantee = user_by_id(priv->grantee_id);
 	if (grantee == NULL)
-		return;
+		return 0;
 	/*
 	 * Grant a role to a user only when privilege type is 'execute'
 	 * and the role is specified.
 	 */
-	if (priv->object_type == SC_ROLE && !(priv->access & ~PRIV_X)) {
-		struct user *role = user_by_id(priv->object_id);
-		if (role == NULL || role->def->type != SC_ROLE)
-			return;
-		if (priv->access)
-			role_grant(grantee, role);
-		else
-			role_revoke(grantee, role);
-	} else {
-		priv_grant(grantee, priv);
+	try {
+		if (priv->object_type == SC_ROLE && !(priv->access & ~PRIV_X)) {
+			struct user *role = user_by_id(priv->object_id);
+			if (role == NULL || role->def->type != SC_ROLE)
+				return 0;
+			if (priv->access)
+				role_grant(grantee, role);
+			else
+				role_revoke(grantee, role);
+		} else {
+			priv_grant(grantee, priv);
+		}
+	} catch (Exception *e) {
+		return -1;
 	}
+	return 0;
 }
 
 /** A trigger called on rollback of grant. */
-static void
+static int
 revoke_priv(struct trigger *trigger, void *event)
 {
 	(void) event;
 	struct tuple *tuple = (struct tuple *)trigger->data;
 	struct priv_def priv;
-	priv_def_create_from_tuple(&priv, tuple);
+	if (priv_def_create_from_tuple(&priv, tuple) != 0)
+		return -1;
 	priv.access = 0;
-	grant_or_revoke(&priv);
+	if (grant_or_revoke(&priv) != 0)
+		return -1;
+	return 0;
 }
 
 /** A trigger called on rollback of revoke or modify. */
-static void
+static int
 modify_priv(struct trigger *trigger, void *event)
 {
 	(void) event;
 	struct tuple *tuple = (struct tuple *)trigger->data;
 	struct priv_def priv;
-	priv_def_create_from_tuple(&priv, tuple);
-	grant_or_revoke(&priv);
+	if (priv_def_create_from_tuple(&priv, tuple) != 0)
+		return -1;
+	if (grant_or_revoke(&priv) != 0)
+		return -1;
+	return 0;
 }
 
 /**
  * A trigger invoked on replace in the space containing
  * all granted privileges.
  */
-static void
+static int
 on_replace_dd_priv(struct trigger * /* trigger */, void *event)
 {
 	struct txn *txn = (struct txn *) event;
@@ -3385,29 +3828,45 @@ on_replace_dd_priv(struct trigger * /* trigger */, void *event)
 	struct priv_def priv;
 
 	if (new_tuple != NULL && old_tuple == NULL) {	/* grant */
-		priv_def_create_from_tuple(&priv, new_tuple);
-		priv_def_check(&priv, PRIV_GRANT);
-		grant_or_revoke(&priv);
+		if (priv_def_create_from_tuple(&priv, new_tuple) != 0)
+			return -1;
+		if (priv_def_check(&priv, PRIV_GRANT) != 0)
+			return -1;
+		if (grant_or_revoke(&priv) != 0)
+			return -1;
 		struct trigger *on_rollback =
 			txn_alter_trigger_new(revoke_priv, new_tuple);
+		if (on_rollback == NULL)
+			return -1;
 		txn_stmt_on_rollback(stmt, on_rollback);
 	} else if (new_tuple == NULL) {                /* revoke */
 		assert(old_tuple);
-		priv_def_create_from_tuple(&priv, old_tuple);
-		priv_def_check(&priv, PRIV_REVOKE);
+		if (priv_def_create_from_tuple(&priv, old_tuple) != 0)
+			return -1;
+		if (priv_def_check(&priv, PRIV_REVOKE) != 0)
+			return -1;
 		priv.access = 0;
-		grant_or_revoke(&priv);
+		if (grant_or_revoke(&priv) != 0)
+			return -1;
 		struct trigger *on_rollback =
 			txn_alter_trigger_new(modify_priv, old_tuple);
+		if (on_rollback == NULL)
+			return -1;
 		txn_stmt_on_rollback(stmt, on_rollback);
 	} else {                                       /* modify */
-		priv_def_create_from_tuple(&priv, new_tuple);
-		priv_def_check(&priv, PRIV_GRANT);
-		grant_or_revoke(&priv);
+		if (priv_def_create_from_tuple(&priv, new_tuple) != 0)
+			return -1;
+		if (priv_def_check(&priv, PRIV_GRANT) != 0)
+			return -1;
+		if (grant_or_revoke(&priv) != 0)
+			return -1;
 		struct trigger *on_rollback =
 			txn_alter_trigger_new(modify_priv, old_tuple);
+		if (on_rollback == NULL)
+			return -1;
 		txn_stmt_on_rollback(stmt, on_rollback);
 	}
+	return 0;
 }
 
 /* }}} access control */
@@ -3423,23 +3882,29 @@ on_replace_dd_priv(struct trigger * /* trigger */, void *event)
  * concern us, we can safely change the cluster id in before-replace
  * event, not in after-replace event.
  */
-static void
+static int
 on_replace_dd_schema(struct trigger * /* trigger */, void *event)
 {
 	struct txn *txn = (struct txn *) event;
 	struct txn_stmt *stmt = txn_current_stmt(txn);
 	struct tuple *old_tuple = stmt->old_tuple;
 	struct tuple *new_tuple = stmt->new_tuple;
-	const char *key = tuple_field_cstr_xc(new_tuple ? new_tuple : old_tuple,
+	const char *key = tuple_field_cstr(new_tuple ? new_tuple : old_tuple,
 					      BOX_SCHEMA_FIELD_KEY);
+	if (key == NULL)
+		return -1;
 	if (strcmp(key, "cluster") == 0) {
-		if (new_tuple == NULL)
-			tnt_raise(ClientError, ER_REPLICASET_UUID_IS_RO);
+		if (new_tuple == NULL) {
+			diag_set(ClientError, ER_REPLICASET_UUID_IS_RO);
+			return -1;
+		}
 		tt_uuid uu;
-		tuple_field_uuid_xc(new_tuple, BOX_CLUSTER_FIELD_UUID, &uu);
+		if (tuple_field_uuid(new_tuple, BOX_CLUSTER_FIELD_UUID, &uu) != 0)
+			return -1;
 		REPLICASET_UUID = uu;
 		say_info("cluster uuid %s", tt_uuid_str(&uu));
 	}
+	return 0;
 }
 
 /**
@@ -3447,14 +3912,16 @@ on_replace_dd_schema(struct trigger * /* trigger */, void *event)
  * write ahead log. Update the cluster configuration cache
  * with it.
  */
-static void
+static int
 register_replica(struct trigger *trigger, void * /* event */)
 {
 	struct tuple *new_tuple = (struct tuple *)trigger->data;
-
-	uint32_t id = tuple_field_u32_xc(new_tuple, BOX_CLUSTER_FIELD_ID);
+	uint32_t id;
+	if (tuple_field_u32(new_tuple, BOX_CLUSTER_FIELD_ID, &id) != 0)
+		return -1;
 	tt_uuid uuid;
-	tuple_field_uuid_xc(new_tuple, BOX_CLUSTER_FIELD_UUID, &uuid);
+	if (tuple_field_uuid(new_tuple, BOX_CLUSTER_FIELD_UUID, &uuid) != 0)
+		return -1;
 	struct replica *replica = replica_by_uuid(&uuid);
 	if (replica != NULL) {
 		replica_set_id(replica, id);
@@ -3462,23 +3929,27 @@ register_replica(struct trigger *trigger, void * /* event */)
 		try {
 			replica = replicaset_add(id, &uuid);
 			/* Can't throw exceptions from on_commit trigger */
-		} catch(Exception *e) {
+		} catch (Exception *e) {
 			panic("Can't register replica: %s", e->errmsg);
 		}
 	}
+	return 0;
 }
 
-static void
+static int
 unregister_replica(struct trigger *trigger, void * /* event */)
 {
 	struct tuple *old_tuple = (struct tuple *)trigger->data;
 
 	struct tt_uuid old_uuid;
-	tuple_field_uuid_xc(old_tuple, BOX_CLUSTER_FIELD_UUID, &old_uuid);
+
+	if (tuple_field_uuid(old_tuple, BOX_CLUSTER_FIELD_UUID, &old_uuid) != 0)
+		return -1;
 
 	struct replica *replica = replica_by_uuid(&old_uuid);
 	assert(replica != NULL);
 	replica_clear_id(replica);
+	return 0;
 }
 
 /**
@@ -3499,7 +3970,7 @@ unregister_replica(struct trigger *trigger, void * /* event */)
  * replica set can not by mistake join/follow another replica
  * set without first being reset (emptied).
  */
-static void
+static int
 on_replace_dd_cluster(struct trigger *trigger, void *event)
 {
 	(void) trigger;
@@ -3508,16 +3979,21 @@ on_replace_dd_cluster(struct trigger *trigger, void *event)
 	struct tuple *old_tuple = stmt->old_tuple;
 	struct tuple *new_tuple = stmt->new_tuple;
 	if (new_tuple != NULL) { /* Insert or replace */
-		/* Check fields */
-		uint32_t replica_id =
-			tuple_field_u32_xc(new_tuple, BOX_CLUSTER_FIELD_ID);
-		replica_check_id(replica_id);
 		tt_uuid replica_uuid;
-		tuple_field_uuid_xc(new_tuple, BOX_CLUSTER_FIELD_UUID,
-				    &replica_uuid);
-		if (tt_uuid_is_nil(&replica_uuid))
-			tnt_raise(ClientError, ER_INVALID_UUID,
+		/* Check fields */
+		uint32_t replica_id;
+		if (tuple_field_u32(new_tuple, BOX_CLUSTER_FIELD_ID, &replica_id) != 0)
+			return -1;
+		if (replica_check_id(replica_id) != 0)
+			return -1;
+		if (tuple_field_uuid(new_tuple, BOX_CLUSTER_FIELD_UUID,
+				    &replica_uuid) != 0)
+			return -1;
+		if (tt_uuid_is_nil(&replica_uuid)) {
+			diag_set(ClientError, ER_INVALID_UUID,
 				  tt_uuid_str(&replica_uuid));
+			return -1;
+		}
 		if (old_tuple != NULL) {
 			/*
 			 * Forbid changes of UUID for a registered instance:
@@ -3525,17 +4001,21 @@ on_replace_dd_cluster(struct trigger *trigger, void *event)
 			 * in sync with appliers and relays.
 			 */
 			tt_uuid old_uuid;
-			tuple_field_uuid_xc(old_tuple, BOX_CLUSTER_FIELD_UUID,
-					    &old_uuid);
+			if (tuple_field_uuid(old_tuple, BOX_CLUSTER_FIELD_UUID,
+						    &old_uuid) != 0)
+				return -1;
 			if (!tt_uuid_is_equal(&replica_uuid, &old_uuid)) {
-				tnt_raise(ClientError, ER_UNSUPPORTED,
+				diag_set(ClientError, ER_UNSUPPORTED,
 					  "Space _cluster",
 					  "updates of instance uuid");
+				return -1;
 			}
 		} else {
 			struct trigger *on_commit;
 			on_commit = txn_alter_trigger_new(register_replica,
 							  new_tuple);
+			if (on_commit == NULL)
+				return -1;
 			txn_stmt_on_commit(stmt, on_commit);
 		}
 	} else {
@@ -3544,15 +4024,20 @@ on_replace_dd_cluster(struct trigger *trigger, void *event)
 		 * from _cluster.
 		 */
 		assert(old_tuple != NULL);
-		uint32_t replica_id =
-			tuple_field_u32_xc(old_tuple, BOX_CLUSTER_FIELD_ID);
-		replica_check_id(replica_id);
+		uint32_t replica_id;
+		if (tuple_field_u32(old_tuple, BOX_CLUSTER_FIELD_ID, &replica_id) != 0)
+			return -1;
+		if (replica_check_id(replica_id) != 0)
+			return -1;
 
 		struct trigger *on_commit;
 		on_commit = txn_alter_trigger_new(unregister_replica,
 						  old_tuple);
+		if (on_commit == NULL)
+			return -1;
 		txn_stmt_on_commit(stmt, on_commit);
 	}
+	return 0;
 }
 
 /* }}} cluster configuration */
@@ -3564,79 +4049,105 @@ static struct sequence_def *
 sequence_def_new_from_tuple(struct tuple *tuple, uint32_t errcode)
 {
 	uint32_t name_len;
-	const char *name = tuple_field_str_xc(tuple, BOX_USER_FIELD_NAME,
-					      &name_len);
+	const char *name = tuple_field_str(tuple, BOX_USER_FIELD_NAME,
+						      &name_len);
+	if (name == NULL)
+		return NULL;
 	if (name_len > BOX_NAME_MAX) {
-		tnt_raise(ClientError, errcode,
+		diag_set(ClientError, errcode,
 			  tt_cstr(name, BOX_INVALID_NAME_MAX),
 			  "sequence name is too long");
+		return NULL;
 	}
-	identifier_check_xc(name, name_len);
+	if (identifier_check(name, name_len) != 0)
+		return NULL;
 	size_t sz = sequence_def_sizeof(name_len);
 	struct sequence_def *def = (struct sequence_def *) malloc(sz);
-	if (def == NULL)
-		tnt_raise(OutOfMemory, sz, "malloc", "sequence");
+	if (def == NULL) {
+		diag_set(OutOfMemory, sz, "malloc", "sequence");
+		return NULL;
+	}
 	auto def_guard = make_scoped_guard([=] { free(def); });
 	memcpy(def->name, name, name_len);
 	def->name[name_len] = '\0';
-	def->id = tuple_field_u32_xc(tuple, BOX_SEQUENCE_FIELD_ID);
-	def->uid = tuple_field_u32_xc(tuple, BOX_SEQUENCE_FIELD_UID);
-	def->step = tuple_field_i64_xc(tuple, BOX_SEQUENCE_FIELD_STEP);
-	def->min = tuple_field_i64_xc(tuple, BOX_SEQUENCE_FIELD_MIN);
-	def->max = tuple_field_i64_xc(tuple, BOX_SEQUENCE_FIELD_MAX);
-	def->start = tuple_field_i64_xc(tuple, BOX_SEQUENCE_FIELD_START);
-	def->cache = tuple_field_i64_xc(tuple, BOX_SEQUENCE_FIELD_CACHE);
-	def->cycle = tuple_field_bool_xc(tuple, BOX_SEQUENCE_FIELD_CYCLE);
-	if (def->step == 0)
-		tnt_raise(ClientError, errcode, def->name,
-			  "step option must be non-zero");
-	if (def->min > def->max)
-		tnt_raise(ClientError, errcode, def->name,
-			  "max must be greater than or equal to min");
-	if (def->start < def->min || def->start > def->max)
-		tnt_raise(ClientError, errcode, def->name,
-			  "start must be between min and max");
+	if (tuple_field_u32(tuple, BOX_SEQUENCE_FIELD_ID, &(def->id)) != 0)
+		return NULL;
+	if (tuple_field_u32(tuple, BOX_SEQUENCE_FIELD_UID, &(def->uid)) != 0)
+		return NULL;
+	if (tuple_field_i64(tuple, BOX_SEQUENCE_FIELD_STEP, &(def->step)) != 0)
+		return NULL;
+	if (tuple_field_i64(tuple, BOX_SEQUENCE_FIELD_MIN, &(def->min)) != 0)
+		return NULL;
+	if (tuple_field_i64(tuple, BOX_SEQUENCE_FIELD_MAX, &(def->max)) != 0)
+		return NULL;
+	if (tuple_field_i64(tuple, BOX_SEQUENCE_FIELD_START, &(def->start)) != 0)
+		return NULL;
+	if (tuple_field_i64(tuple, BOX_SEQUENCE_FIELD_CACHE, &(def->cache)) != 0)
+		return NULL;
+	if (tuple_field_bool(tuple, BOX_SEQUENCE_FIELD_CYCLE, &(def->cycle)) != 0)
+		return NULL;
+	if (def->step == 0) {
+		diag_set(ClientError, errcode, def->name,
+			 "step option must be non-zero");
+		return NULL;
+	}
+	if (def->min > def->max) {
+		diag_set(ClientError, errcode, def->name,
+			 "max must be greater than or equal to min");
+		return NULL;
+	}
+	if (def->start < def->min || def->start > def->max) {
+		diag_set(ClientError, errcode, def->name,
+			 "start must be between min and max");
+		return NULL;
+	}
 	def_guard.is_active = false;
 	return def;
 }
 
-static void
+static int
 on_create_sequence_rollback(struct trigger *trigger, void * /* event */)
 {
 	/* Remove the new sequence from the cache and delete it. */
 	struct sequence *seq = (struct sequence *)trigger->data;
 	sequence_cache_delete(seq->def->id);
-	trigger_run_xc(&on_alter_sequence, seq);
+	if (trigger_run(&on_alter_sequence, seq) != 0)
+		return -1;
 	sequence_delete(seq);
+	return 0;
 }
 
-static void
+static int
 on_drop_sequence_commit(struct trigger *trigger, void * /* event */)
 {
 	/* Delete the old sequence. */
 	struct sequence *seq = (struct sequence *)trigger->data;
 	sequence_delete(seq);
+	return 0;
 }
 
-static void
+static int
 on_drop_sequence_rollback(struct trigger *trigger, void * /* event */)
 {
 	/* Insert the old sequence back into the cache. */
 	struct sequence *seq = (struct sequence *)trigger->data;
 	sequence_cache_insert(seq);
-	trigger_run_xc(&on_alter_sequence, seq);
+	if (trigger_run(&on_alter_sequence, seq) != 0)
+		return -1;
+	return 0;
 }
 
 
-static void
+static int
 on_alter_sequence_commit(struct trigger *trigger, void * /* event */)
 {
 	/* Delete the old old sequence definition. */
 	struct sequence_def *def = (struct sequence_def *)trigger->data;
 	free(def);
+	return 0;
 }
 
-static void
+static int
 on_alter_sequence_rollback(struct trigger *trigger, void * /* event */)
 {
 	/* Restore the old sequence definition. */
@@ -3645,14 +4156,16 @@ on_alter_sequence_rollback(struct trigger *trigger, void * /* event */)
 	assert(seq != NULL);
 	free(seq->def);
 	seq->def = def;
-	trigger_run_xc(&on_alter_sequence, seq);
+	if (trigger_run(&on_alter_sequence, seq) != 0)
+		return -1;
+	return 0;
 }
 
 /**
  * A trigger invoked on replace in space _sequence.
  * Used to alter a sequence definition.
  */
-static void
+static int
 on_replace_dd_sequence(struct trigger * /* trigger */, void *event)
 {
 	struct txn *txn = (struct txn *) event;
@@ -3667,76 +4180,112 @@ on_replace_dd_sequence(struct trigger * /* trigger */, void *event)
 	if (old_tuple == NULL && new_tuple != NULL) {		/* INSERT */
 		new_def = sequence_def_new_from_tuple(new_tuple,
 						      ER_CREATE_SEQUENCE);
-		access_check_ddl(new_def->name, new_def->id, new_def->uid,
-				 SC_SEQUENCE, PRIV_C);
+		if (new_def == NULL)
+			return -1;
+		if (access_check_ddl(new_def->name, new_def->id, new_def->uid,
+				 SC_SEQUENCE, PRIV_C) != 0)
+			return -1;
 		struct trigger *on_rollback =
 			txn_alter_trigger_new(on_create_sequence_rollback, NULL);
-		seq = sequence_new_xc(new_def);
+		if (on_rollback == NULL)
+			return -1;
+		seq = sequence_new(new_def);
+		if (seq == NULL)
+			return -1;
 		sequence_cache_insert(seq);
 		on_rollback->data = seq;
 		txn_stmt_on_rollback(stmt, on_rollback);
 	} else if (old_tuple != NULL && new_tuple == NULL) {	/* DELETE */
-		uint32_t id = tuple_field_u32_xc(old_tuple,
-						 BOX_SEQUENCE_DATA_FIELD_ID);
+		uint32_t id;
+		if (tuple_field_u32(old_tuple,BOX_SEQUENCE_DATA_FIELD_ID, &id) != 0)
+			return -1;
 		seq = sequence_by_id(id);
 		assert(seq != NULL);
-		access_check_ddl(seq->def->name, seq->def->id, seq->def->uid,
-				 SC_SEQUENCE, PRIV_D);
-		if (space_has_data(BOX_SEQUENCE_DATA_ID, 0, id))
-			tnt_raise(ClientError, ER_DROP_SEQUENCE,
+		if (access_check_ddl(seq->def->name, seq->def->id, seq->def->uid,
+				 SC_SEQUENCE, PRIV_D) != 0)
+			return -1;
+		bool out;
+		if (space_has_data(BOX_SEQUENCE_DATA_ID, 0, id, &out) != 0)
+			return -1;
+		if (out) {
+			diag_set(ClientError, ER_DROP_SEQUENCE,
 				  seq->def->name, "the sequence has data");
-		if (space_has_data(BOX_SPACE_SEQUENCE_ID, 1, id))
-			tnt_raise(ClientError, ER_DROP_SEQUENCE,
-				  seq->def->name, "the sequence is in use");
-		if (schema_find_grants("sequence", seq->def->id))
-			tnt_raise(ClientError, ER_DROP_SEQUENCE,
+			return -1;
+		}
+		if (space_has_data(BOX_SPACE_SEQUENCE_ID, 1, id, &out) != 0)
+			return -1;
+		if (out) {
+			diag_set(ClientError, ER_DROP_SEQUENCE,
+				 seq->def->name, "the sequence is in use");
+			return -1;
+		}
+		if (schema_find_grants("sequence", seq->def->id, &out) != 0) {
+			return -1;
+		}
+		if (out) {
+			diag_set(ClientError, ER_DROP_SEQUENCE,
 				  seq->def->name, "the sequence has grants");
+			return -1;
+		}
 		struct trigger *on_commit =
 			txn_alter_trigger_new(on_drop_sequence_commit, seq);
 		struct trigger *on_rollback =
 			txn_alter_trigger_new(on_drop_sequence_rollback, seq);
+		if (on_commit == NULL || on_rollback == NULL)
+			return -1;
 		sequence_cache_delete(seq->def->id);
 		txn_stmt_on_commit(stmt, on_commit);
 		txn_stmt_on_rollback(stmt, on_rollback);
 	} else {						/* UPDATE */
 		new_def = sequence_def_new_from_tuple(new_tuple,
 						      ER_ALTER_SEQUENCE);
+		if (new_def == NULL)
+			return -1;
 		seq = sequence_by_id(new_def->id);
 		assert(seq != NULL);
-		access_check_ddl(seq->def->name, seq->def->id, seq->def->uid,
-				 SC_SEQUENCE, PRIV_A);
+		if (access_check_ddl(seq->def->name, seq->def->id, seq->def->uid,
+				 SC_SEQUENCE, PRIV_A) != 0)
+			return -1;
 		struct trigger *on_commit =
 			txn_alter_trigger_new(on_alter_sequence_commit, seq->def);
 		struct trigger *on_rollback =
 			txn_alter_trigger_new(on_alter_sequence_rollback, seq->def);
+		if (on_commit == NULL || on_rollback == NULL)
+			return -1;
 		seq->def = new_def;
 		txn_stmt_on_commit(stmt, on_commit);
 		txn_stmt_on_rollback(stmt, on_rollback);
 	}
 
 	def_guard.is_active = false;
-	trigger_run_xc(&on_alter_sequence, seq);
+	if (trigger_run(&on_alter_sequence, seq) != 0)
+		return -1;
+	return 0;
 }
 
 /** Restore the old sequence value on rollback. */
-static void
+static int
 on_drop_sequence_data_rollback(struct trigger *trigger, void * /* event */)
 {
 	struct tuple *tuple = (struct tuple *)trigger->data;
-	uint32_t id = tuple_field_u32_xc(tuple, BOX_SEQUENCE_DATA_FIELD_ID);
-	int64_t val = tuple_field_i64_xc(tuple, BOX_SEQUENCE_DATA_FIELD_VALUE);
-
+	uint32_t id;
+	if (tuple_field_u32(tuple, BOX_SEQUENCE_DATA_FIELD_ID, &id) != 0)
+		return -1;
+	int64_t val;
+	if (tuple_field_i64(tuple, BOX_SEQUENCE_DATA_FIELD_VALUE, &val) != 0)
+		return -1;
 	struct sequence *seq = sequence_by_id(id);
 	assert(seq != NULL);
 	if (sequence_set(seq, val) != 0)
 		panic("Can't restore sequence value");
+	return 0;
 }
 
 /**
  * A trigger invoked on replace in space _sequence_data.
  * Used to update a sequence value.
  */
-static void
+static int
 on_replace_dd_sequence_data(struct trigger * /* trigger */, void *event)
 {
 	struct txn *txn = (struct txn *) event;
@@ -3744,17 +4293,21 @@ on_replace_dd_sequence_data(struct trigger * /* trigger */, void *event)
 	struct tuple *old_tuple = stmt->old_tuple;
 	struct tuple *new_tuple = stmt->new_tuple;
 
-	uint32_t id = tuple_field_u32_xc(old_tuple ?: new_tuple,
-					 BOX_SEQUENCE_DATA_FIELD_ID);
-	struct sequence *seq = sequence_cache_find(id);
+	uint32_t id;
+	if (tuple_field_u32(old_tuple ?: new_tuple,BOX_SEQUENCE_DATA_FIELD_ID,
+			    &id) != 0)
+		return -1;
+	struct sequence *seq = sequence_by_id(id);
 	if (seq == NULL)
-		diag_raise();
-	if (new_tuple != NULL) {			/* INSERT, UPDATE */
-		int64_t value = tuple_field_i64_xc(new_tuple,
-				BOX_SEQUENCE_DATA_FIELD_VALUE);
+		return -1;
+	if (new_tuple != NULL) {                        /* INSERT, UPDATE */
+		int64_t value;
+		if (tuple_field_i64(new_tuple, BOX_SEQUENCE_DATA_FIELD_VALUE,
+			&value) != 0)
+			return -1;
 		if (sequence_set(seq, value) != 0)
-			diag_raise();
-	} else {					/* DELETE */
+			return -1;
+	} else {                                        /* DELETE */
 		/*
 		 * A sequence isn't supposed to roll back to the old
 		 * value if the transaction it was used in is aborted
@@ -3763,21 +4316,27 @@ on_replace_dd_sequence_data(struct trigger * /* trigger */, void *event)
 		 * on rollback.
 		 */
 		struct trigger *on_rollback = txn_alter_trigger_new(
-				on_drop_sequence_data_rollback, old_tuple);
+			on_drop_sequence_data_rollback, old_tuple);
+		if (on_rollback == NULL)
+			return -1;
 		txn_stmt_on_rollback(stmt, on_rollback);
 		sequence_reset(seq);
 	}
+	return 0;
 }
 
 /**
  * Extract field number and path from _space_sequence tuple.
  * The path is allocated using malloc().
  */
-static uint32_t
+static int
 sequence_field_from_tuple(struct space *space, struct tuple *tuple,
-			  char **path_ptr)
+			  char **path_ptr, uint32_t *out)
 {
-	struct index *pk = index_find_xc(space, 0);
+	struct index *pk = index_find(space, 0);
+	if (pk == NULL) {
+		return -1;
+	}
 	struct key_part *part = &pk->def->key_def->parts[0];
 	uint32_t fieldno = part->fieldno;
 	const char *path_raw = part->path;
@@ -3785,60 +4344,75 @@ sequence_field_from_tuple(struct space *space, struct tuple *tuple,
 
 	/* Sequence field was added in 2.2.1. */
 	if (tuple_field_count(tuple) > BOX_SPACE_SEQUENCE_FIELD_FIELDNO) {
-		fieldno = tuple_field_u32_xc(tuple,
-				BOX_SPACE_SEQUENCE_FIELD_FIELDNO);
-		path_raw = tuple_field_str_xc(tuple,
-				BOX_SPACE_SEQUENCE_FIELD_PATH, &path_len);
+		if (tuple_field_u32(tuple,BOX_SPACE_SEQUENCE_FIELD_FIELDNO, &fieldno) != 0)
+			return -1;
+		path_raw = tuple_field_str(tuple,
+			BOX_SPACE_SEQUENCE_FIELD_PATH, &path_len);
+		if (path_raw == NULL)
+			return -1;
 		if (path_len == 0)
 			path_raw = NULL;
 	}
-	index_def_check_sequence(pk->def, fieldno, path_raw, path_len,
-				 space_name(space));
+	if (index_def_check_sequence(pk->def, fieldno, path_raw, path_len,
+				 space_name(space)) != 0)
+		return -1;
 	char *path = NULL;
 	if (path_raw != NULL) {
 		path = (char *)malloc(path_len + 1);
-		if (path == NULL)
-			tnt_raise(OutOfMemory, path_len + 1,
+		if (path == NULL) {
+			diag_set(OutOfMemory, path_len + 1,
 				  "malloc", "sequence path");
+			return -1;
+		}
 		memcpy(path, path_raw, path_len);
 		path[path_len] = 0;
 	}
 	*path_ptr = path;
-	return fieldno;
+	*out = fieldno;
+	return 0;
 }
 
 /** Attach a sequence to a space on rollback in _space_sequence. */
-static void
+static int
 set_space_sequence(struct trigger *trigger, void * /* event */)
 {
 	struct tuple *tuple = (struct tuple *)trigger->data;
-	uint32_t space_id = tuple_field_u32_xc(tuple,
-			BOX_SPACE_SEQUENCE_FIELD_ID);
-	uint32_t sequence_id = tuple_field_u32_xc(tuple,
-			BOX_SPACE_SEQUENCE_FIELD_SEQUENCE_ID);
-	bool is_generated = tuple_field_bool_xc(tuple,
-			BOX_SPACE_SEQUENCE_FIELD_IS_GENERATED);
+	uint32_t space_id;
+	if (tuple_field_u32(tuple, BOX_SPACE_SEQUENCE_FIELD_ID, &space_id) != 0)
+		return -1;
+	uint32_t sequence_id;
+	if (tuple_field_u32(tuple, BOX_SPACE_SEQUENCE_FIELD_SEQUENCE_ID, &sequence_id) != 0)
+		return -1;
+	bool is_generated;
+	if (tuple_field_bool(tuple,BOX_SPACE_SEQUENCE_FIELD_IS_GENERATED,
+		&is_generated) != 0)
+		return -1;
 	struct space *space = space_by_id(space_id);
 	assert(space != NULL);
 	struct sequence *seq = sequence_by_id(sequence_id);
 	assert(seq != NULL);
 	char *path;
-	uint32_t fieldno = sequence_field_from_tuple(space, tuple, &path);
+	uint32_t fieldno;
+	if (sequence_field_from_tuple(space, tuple, &path, &fieldno) != 0)
+		return -1;
 	seq->is_generated = is_generated;
 	space->sequence = seq;
 	space->sequence_fieldno = fieldno;
 	free(space->sequence_path);
 	space->sequence_path = path;
-	trigger_run_xc(&on_alter_space, space);
+	if (trigger_run(&on_alter_space, space) != 0)
+		return -1;
+	return 0;
 }
 
 /** Detach a sequence from a space on rollback in _space_sequence. */
-static void
+static int
 clear_space_sequence(struct trigger *trigger, void * /* event */)
 {
 	struct tuple *tuple = (struct tuple *)trigger->data;
-	uint32_t space_id = tuple_field_u32_xc(tuple,
-			BOX_SPACE_SEQUENCE_FIELD_ID);
+	uint32_t space_id;
+	if (tuple_field_u32(tuple, BOX_SPACE_SEQUENCE_FIELD_ID, &space_id) != 0)
+		return -1;
 	struct space *space = space_by_id(space_id);
 	assert(space != NULL);
 	assert(space->sequence != NULL);
@@ -3847,29 +4421,37 @@ clear_space_sequence(struct trigger *trigger, void * /* event */)
 	space->sequence_fieldno = 0;
 	free(space->sequence_path);
 	space->sequence_path = NULL;
-	trigger_run_xc(&on_alter_space, space);
+	if (trigger_run(&on_alter_space, space) != 0)
+		return -1;
+	return 0;
 }
 
 /**
  * A trigger invoked on replace in space _space_sequence.
  * Used to update space <-> sequence mapping.
  */
-static void
+static int
 on_replace_dd_space_sequence(struct trigger * /* trigger */, void *event)
 {
 	struct txn *txn = (struct txn *) event;
 	struct txn_stmt *stmt = txn_current_stmt(txn);
 	struct tuple *tuple = stmt->new_tuple ? stmt->new_tuple : stmt->old_tuple;
-
-	uint32_t space_id = tuple_field_u32_xc(tuple,
-					       BOX_SPACE_SEQUENCE_FIELD_ID);
-	uint32_t sequence_id = tuple_field_u32_xc(tuple,
-				BOX_SPACE_SEQUENCE_FIELD_SEQUENCE_ID);
-	bool is_generated = tuple_field_bool_xc(tuple,
-				BOX_SPACE_SEQUENCE_FIELD_IS_GENERATED);
-
-	struct space *space = space_cache_find_xc(space_id);
-	struct sequence *seq = sequence_cache_find(sequence_id);
+	uint32_t space_id;
+	if (tuple_field_u32(tuple,BOX_SPACE_SEQUENCE_FIELD_ID, &space_id) != 0)
+		return -1;
+	uint32_t sequence_id;
+	if (tuple_field_u32(tuple, BOX_SPACE_SEQUENCE_FIELD_SEQUENCE_ID, &sequence_id) != 0)
+		return -1;
+	bool is_generated;
+	if (tuple_field_bool(tuple, BOX_SPACE_SEQUENCE_FIELD_IS_GENERATED,
+		&is_generated) != 0)
+		return -1;
+	struct space *space = space_cache_find(space_id);
+	if (space == NULL)
+		return -1;
+	struct sequence *seq = sequence_by_id(sequence_id);
+	if (seq == NULL)
+		return -1;
 
 	enum priv_type priv_type = stmt->new_tuple ? PRIV_C : PRIV_D;
 	if (stmt->new_tuple && stmt->old_tuple)
@@ -3877,34 +4459,40 @@ on_replace_dd_space_sequence(struct trigger * /* trigger */, void *event)
 
 	/* Check we have the correct access type on the sequence.  * */
 	if (is_generated || !stmt->new_tuple) {
-		access_check_ddl(seq->def->name, seq->def->id, seq->def->uid,
-				 SC_SEQUENCE, priv_type);
+		if (access_check_ddl(seq->def->name, seq->def->id, seq->def->uid,
+				     SC_SEQUENCE, priv_type) != 0)
+			return -1;
 	} else {
 		/*
 		 * In case user wants to attach an existing sequence,
 		 * check that it has read and write access.
 		 */
-		access_check_ddl(seq->def->name, seq->def->id, seq->def->uid,
-				 SC_SEQUENCE, PRIV_R);
-		access_check_ddl(seq->def->name, seq->def->id, seq->def->uid,
-				 SC_SEQUENCE, PRIV_W);
+		if (access_check_ddl(seq->def->name, seq->def->id, seq->def->uid,
+				     SC_SEQUENCE, PRIV_R) != 0)
+			return -1;
+		if (access_check_ddl(seq->def->name, seq->def->id, seq->def->uid,
+				     SC_SEQUENCE, PRIV_W) != 0)
+			return -1;
 	}
 	/** Check we have alter access on space. */
-	access_check_ddl(space->def->name, space->def->id, space->def->uid,
-			 SC_SPACE, PRIV_A);
+	if (access_check_ddl(space->def->name, space->def->id, space->def->uid,
+			     SC_SPACE, PRIV_A) != 0)
+		return -1;
 
 	if (stmt->new_tuple != NULL) {			/* INSERT, UPDATE */
 		char *sequence_path;
 		uint32_t sequence_fieldno;
-		sequence_fieldno = sequence_field_from_tuple(space, tuple,
-							     &sequence_path);
+		if (sequence_field_from_tuple(space, tuple, &sequence_path,
+			&sequence_fieldno) != 0)
+			return -1;
 		auto sequence_path_guard = make_scoped_guard([=] {
 			free(sequence_path);
 		});
 		if (seq->is_generated) {
-			tnt_raise(ClientError, ER_ALTER_SPACE,
+			diag_set(ClientError, ER_ALTER_SPACE,
 				  space_name(space),
 				  "can not attach generated sequence");
+			return -1;
 		}
 		struct trigger *on_rollback;
 		if (stmt->old_tuple != NULL)
@@ -3913,6 +4501,8 @@ on_replace_dd_space_sequence(struct trigger * /* trigger */, void *event)
 		else
 			on_rollback = txn_alter_trigger_new(clear_space_sequence,
 							    stmt->new_tuple);
+		if (on_rollback == NULL)
+			return -1;
 		seq->is_generated = is_generated;
 		space->sequence = seq;
 		space->sequence_fieldno = sequence_fieldno;
@@ -3924,6 +4514,8 @@ on_replace_dd_space_sequence(struct trigger * /* trigger */, void *event)
 		struct trigger *on_rollback;
 		on_rollback = txn_alter_trigger_new(set_space_sequence,
 						    stmt->old_tuple);
+		if (on_rollback == NULL)
+			return -1;
 		assert(space->sequence == seq);
 		seq->is_generated = false;
 		space->sequence = NULL;
@@ -3932,13 +4524,15 @@ on_replace_dd_space_sequence(struct trigger * /* trigger */, void *event)
 		space->sequence_path = NULL;
 		txn_stmt_on_rollback(stmt, on_rollback);
 	}
-	trigger_run_xc(&on_alter_space, space);
+	if (trigger_run(&on_alter_space, space) != 0)
+		return -1;
+	return 0;
 }
 
 /* }}} sequence */
 
 /** Delete the new trigger on rollback of an INSERT statement. */
-static void
+static int
 on_create_trigger_rollback(struct trigger *trigger, void * /* event */)
 {
 	struct sql_trigger *old_trigger = (struct sql_trigger *)trigger->data;
@@ -3950,28 +4544,30 @@ on_create_trigger_rollback(struct trigger *trigger, void * /* event */)
 	assert(rc == 0);
 	assert(new_trigger == old_trigger);
 	sql_trigger_delete(sql_get(), new_trigger);
+	return 0;
 }
 
 /** Restore the old trigger on rollback of a DELETE statement. */
-static void
+static int
 on_drop_trigger_rollback(struct trigger *trigger, void * /* event */)
 {
 	struct sql_trigger *old_trigger = (struct sql_trigger *)trigger->data;
 	struct sql_trigger *new_trigger;
 	if (old_trigger == NULL)
-		return;
+		return 0;
 	if (sql_trigger_replace(sql_trigger_name(old_trigger),
 				sql_trigger_space_id(old_trigger),
 				old_trigger, &new_trigger) != 0)
 		panic("Out of memory on insertion into trigger hash");
 	assert(new_trigger == NULL);
+	return 0;
 }
 
 /**
  * Restore the old trigger and delete the new trigger on rollback
  * of a REPLACE statement.
  */
-static void
+static int
 on_replace_trigger_rollback(struct trigger *trigger, void * /* event */)
 {
 	struct sql_trigger *old_trigger = (struct sql_trigger *)trigger->data;
@@ -3981,24 +4577,26 @@ on_replace_trigger_rollback(struct trigger *trigger, void * /* event */)
 				old_trigger, &new_trigger) != 0)
 		panic("Out of memory on insertion into trigger hash");
 	sql_trigger_delete(sql_get(), new_trigger);
+	return 0;
 }
 
 /**
  * Trigger invoked on commit in the _trigger space.
  * Drop useless old sql_trigger AST object if any.
  */
-static void
+static int
 on_replace_trigger_commit(struct trigger *trigger, void * /* event */)
 {
 	struct sql_trigger *old_trigger = (struct sql_trigger *)trigger->data;
 	sql_trigger_delete(sql_get(), old_trigger);
+	return 0;
 }
 
 /**
  * A trigger invoked on replace in a space containing
  * SQL triggers.
  */
-static void
+static int
 on_replace_dd_trigger(struct trigger * /* trigger */, void *event)
 {
 	struct txn *txn = (struct txn *) event;
@@ -4009,19 +4607,24 @@ on_replace_dd_trigger(struct trigger * /* trigger */, void *event)
 	struct trigger *on_rollback = txn_alter_trigger_new(NULL, NULL);
 	struct trigger *on_commit =
 		txn_alter_trigger_new(on_replace_trigger_commit, NULL);
+	if (on_rollback == NULL || on_commit == NULL)
+		return -1;
 
 	if (old_tuple != NULL && new_tuple == NULL) {
 		/* DROP trigger. */
 		uint32_t trigger_name_len;
-		const char *trigger_name_src =
-			tuple_field_str_xc(old_tuple, BOX_TRIGGER_FIELD_NAME,
-					   &trigger_name_len);
-		uint32_t space_id =
-			tuple_field_u32_xc(old_tuple,
-					   BOX_TRIGGER_FIELD_SPACE_ID);
-		char *trigger_name =
-			(char *)region_alloc_xc(&fiber()->gc,
-						trigger_name_len + 1);
+		const char *trigger_name_src = tuple_field_str(old_tuple,
+			BOX_TRIGGER_FIELD_NAME, &trigger_name_len);
+		if (trigger_name_src == NULL)
+			return -1;
+		uint32_t space_id;
+		if (tuple_field_u32(old_tuple,BOX_TRIGGER_FIELD_SPACE_ID,
+			&space_id) != 0)
+			return -1;
+		char *trigger_name = (char *)region_alloc(&fiber()->gc,
+					     trigger_name_len + 1);
+		if (trigger_name == NULL)
+			return -1;
 		memcpy(trigger_name, trigger_name_src, trigger_name_len);
 		trigger_name[trigger_name_len] = 0;
 
@@ -4037,21 +4640,22 @@ on_replace_dd_trigger(struct trigger * /* trigger */, void *event)
 	} else {
 		/* INSERT, REPLACE trigger. */
 		uint32_t trigger_name_len;
-		const char *trigger_name_src =
-			tuple_field_str_xc(new_tuple, BOX_TRIGGER_FIELD_NAME,
-					   &trigger_name_len);
-
-		const char *space_opts =
-			tuple_field_with_type_xc(new_tuple,
-						 BOX_TRIGGER_FIELD_OPTS,
-						 MP_MAP);
+		const char *trigger_name_src = tuple_field_str(new_tuple,
+			BOX_TRIGGER_FIELD_NAME, &trigger_name_len);
+		if (trigger_name_src == NULL)
+			return -1;
+		const char *space_opts = tuple_field_with_type(new_tuple,
+				BOX_TRIGGER_FIELD_OPTS,MP_MAP);
+		if (space_opts == NULL)
+			return -1;
 		struct space_opts opts;
 		struct region *region = &fiber()->gc;
-		space_opts_decode(&opts, space_opts, region);
+		if (space_opts_decode(&opts, space_opts, region) != 0)
+			return -1;
 		struct sql_trigger *new_trigger =
 			sql_trigger_compile(sql_get(), opts.sql);
 		if (new_trigger == NULL)
-			diag_raise();
+			return -1;
 
 		auto new_trigger_guard = make_scoped_guard([=] {
 		    sql_trigger_delete(sql_get(), new_trigger);
@@ -4061,24 +4665,27 @@ on_replace_dd_trigger(struct trigger * /* trigger */, void *event)
 		if (strlen(trigger_name) != trigger_name_len ||
 		    memcmp(trigger_name_src, trigger_name,
 			   trigger_name_len) != 0) {
-			tnt_raise(ClientError, ER_SQL_EXECUTE,
+			diag_set(ClientError, ER_SQL_EXECUTE,
 				  "trigger name does not match extracted "
 				  "from SQL");
+			return -1;
 		}
-		uint32_t space_id =
-			tuple_field_u32_xc(new_tuple,
-					   BOX_TRIGGER_FIELD_SPACE_ID);
+		uint32_t space_id;
+		if (tuple_field_u32(new_tuple,BOX_TRIGGER_FIELD_SPACE_ID,
+			&space_id) != 0)
+			return -1;
 		if (space_id != sql_trigger_space_id(new_trigger)) {
-			tnt_raise(ClientError, ER_SQL_EXECUTE,
+			diag_set(ClientError, ER_SQL_EXECUTE,
 				  "trigger space_id does not match the value "
 				  "resolved on AST building from SQL");
+			return -1;
 		}
 
 		struct sql_trigger *old_trigger;
 		if (sql_trigger_replace(trigger_name,
 					sql_trigger_space_id(new_trigger),
 					new_trigger, &old_trigger) != 0)
-			diag_raise();
+			return -1;
 
 		on_commit->data = old_trigger;
 		if (old_tuple != NULL) {
@@ -4093,6 +4700,7 @@ on_replace_dd_trigger(struct trigger * /* trigger */, void *event)
 
 	txn_stmt_on_rollback(stmt, on_rollback);
 	txn_stmt_on_commit(stmt, on_commit);
+	return 0;
 }
 
 /**
@@ -4112,38 +4720,45 @@ decode_fk_links(struct tuple *tuple, uint32_t *out_count,
 		const char *constraint_name, uint32_t constraint_len,
 		uint32_t errcode)
 {
-	const char *parent_cols =
-		tuple_field_with_type_xc(tuple,
-					 BOX_FK_CONSTRAINT_FIELD_PARENT_COLS,
-					 MP_ARRAY);
+	const char *parent_cols = tuple_field_with_type(tuple,
+		BOX_FK_CONSTRAINT_FIELD_PARENT_COLS, MP_ARRAY);
+	if (parent_cols == NULL)
+		return NULL;
 	uint32_t count = mp_decode_array(&parent_cols);
 	if (count == 0) {
-		tnt_raise(ClientError, errcode,
+		diag_set(ClientError, errcode,
 			  tt_cstr(constraint_name, constraint_len),
 			  "at least one link must be specified");
+		return NULL;
 	}
-	const char *child_cols =
-		tuple_field_with_type_xc(tuple,
-					 BOX_FK_CONSTRAINT_FIELD_CHILD_COLS,
-					 MP_ARRAY);
+	const char *child_cols = tuple_field_with_type(tuple,
+			BOX_FK_CONSTRAINT_FIELD_CHILD_COLS, MP_ARRAY);
+	if (child_cols == NULL)
+		return NULL;
 	if (mp_decode_array(&child_cols) != count) {
-		tnt_raise(ClientError, errcode,
+		diag_set(ClientError, errcode,
 			  tt_cstr(constraint_name, constraint_len),
 			  "number of referenced and referencing fields "
 			  "must be the same");
+		return NULL;
 	}
 	*out_count = count;
 	size_t size = count * sizeof(struct field_link);
 	struct field_link *region_links =
-		(struct field_link *) region_alloc_xc(&fiber()->gc, size);
+		(struct field_link *)region_alloc(&fiber()->gc, size);
+	if (region_links == NULL) {
+		diag_set(OutOfMemory, size, "region", "new slab");
+		return NULL;
+	}
 	memset(region_links, 0, size);
 	for (uint32_t i = 0; i < count; ++i) {
 		if (mp_typeof(*parent_cols) != MP_UINT ||
 		    mp_typeof(*child_cols) != MP_UINT) {
-			tnt_raise(ClientError, errcode,
+			diag_set(ClientError, errcode,
 				  tt_cstr(constraint_name, constraint_len),
 				  tt_sprintf("value of %d link is not unsigned",
 					     i));
+			return NULL;
 		}
 		region_links[i].parent_field = mp_decode_uint(&parent_cols);
 		region_links[i].child_field = mp_decode_uint(&child_cols);
@@ -4156,24 +4771,29 @@ static struct fk_constraint_def *
 fk_constraint_def_new_from_tuple(struct tuple *tuple, uint32_t errcode)
 {
 	uint32_t name_len;
-	const char *name =
-		tuple_field_str_xc(tuple, BOX_FK_CONSTRAINT_FIELD_NAME,
-				   &name_len);
+	const char *name = tuple_field_str(tuple, BOX_FK_CONSTRAINT_FIELD_NAME, &name_len);
+	if (name == NULL)
+		return NULL;
 	if (name_len > BOX_NAME_MAX) {
-		tnt_raise(ClientError, errcode,
+		diag_set(ClientError, errcode,
 			  tt_cstr(name, BOX_INVALID_NAME_MAX),
 			  "constraint name is too long");
+		return NULL;
 	}
-	identifier_check_xc(name, name_len);
+	if (identifier_check(name, name_len) != 0)
+		return NULL;
 	uint32_t link_count;
 	struct field_link *links = decode_fk_links(tuple, &link_count, name,
 						   name_len, errcode);
+	if (links == NULL)
+		return NULL;
 	size_t fk_def_sz = fk_constraint_def_sizeof(link_count, name_len);
 	struct fk_constraint_def *fk_def =
 		(struct fk_constraint_def *) malloc(fk_def_sz);
 	if (fk_def == NULL) {
-		tnt_raise(OutOfMemory, fk_def_sz, "malloc",
+		diag_set(OutOfMemory, fk_def_sz, "malloc",
 			  "struct fk_constraint_def");
+		return NULL;
 	}
 	auto def_guard = make_scoped_guard([=] { free(fk_def); });
 	memcpy(fk_def->name, name, name_len);
@@ -4182,37 +4802,44 @@ fk_constraint_def_new_from_tuple(struct tuple *tuple, uint32_t errcode)
 					      name_len + 1);
 	memcpy(fk_def->links, links, link_count * sizeof(struct field_link));
 	fk_def->field_count = link_count;
-	fk_def->child_id = tuple_field_u32_xc(tuple,
-					      BOX_FK_CONSTRAINT_FIELD_CHILD_ID);
-	fk_def->parent_id =
-		tuple_field_u32_xc(tuple, BOX_FK_CONSTRAINT_FIELD_PARENT_ID);
-	fk_def->is_deferred =
-		tuple_field_bool_xc(tuple, BOX_FK_CONSTRAINT_FIELD_DEFERRED);
-	const char *match = tuple_field_str_xc(tuple,
-					       BOX_FK_CONSTRAINT_FIELD_MATCH,
-					       &name_len);
+	if (tuple_field_u32(tuple, BOX_FK_CONSTRAINT_FIELD_CHILD_ID,
+		&(fk_def->child_id )) != 0)
+		return NULL;
+	if (tuple_field_u32(tuple, BOX_FK_CONSTRAINT_FIELD_PARENT_ID, &(fk_def->parent_id)) != 0)
+		return NULL;
+	if (tuple_field_bool(tuple, BOX_FK_CONSTRAINT_FIELD_DEFERRED, &(fk_def->is_deferred)) != 0)
+		return NULL;
+	const char *match = tuple_field_str(tuple,
+		BOX_FK_CONSTRAINT_FIELD_MATCH, &name_len);
+	if (match == NULL)
+		return NULL;
 	fk_def->match = STRN2ENUM(fk_constraint_match, match, name_len);
 	if (fk_def->match == fk_constraint_match_MAX) {
-		tnt_raise(ClientError, errcode, fk_def->name,
+		diag_set(ClientError, errcode, fk_def->name,
 			  "unknown MATCH clause");
+		return NULL;
 	}
-	const char *on_delete_action =
-		tuple_field_str_xc(tuple, BOX_FK_CONSTRAINT_FIELD_ON_DELETE,
-				   &name_len);
+	const char *on_delete_action = tuple_field_str(tuple,
+		BOX_FK_CONSTRAINT_FIELD_ON_DELETE, &name_len);
+	if (on_delete_action == NULL)
+		return NULL;
 	fk_def->on_delete = STRN2ENUM(fk_constraint_action,
 				      on_delete_action, name_len);
 	if (fk_def->on_delete == fk_constraint_action_MAX) {
-		tnt_raise(ClientError, errcode, fk_def->name,
+		diag_set(ClientError, errcode, fk_def->name,
 			  "unknown ON DELETE action");
+		return NULL;
 	}
-	const char *on_update_action =
-		tuple_field_str_xc(tuple, BOX_FK_CONSTRAINT_FIELD_ON_UPDATE,
-				   &name_len);
+	const char *on_update_action = tuple_field_str(tuple,
+		BOX_FK_CONSTRAINT_FIELD_ON_UPDATE, &name_len);
+	if (on_update_action == NULL)
+		return NULL;
 	fk_def->on_update = STRN2ENUM(fk_constraint_action,
 				      on_update_action, name_len);
 	if (fk_def->on_update == fk_constraint_action_MAX) {
-		tnt_raise(ClientError, errcode, fk_def->name,
+		diag_set(ClientError, errcode, fk_def->name,
 			  "unknown ON UPDATE action");
+		return NULL;
 	}
 	def_guard.is_active = false;
 	return fk_def;
@@ -4286,7 +4913,7 @@ space_reset_fk_constraint_mask(struct space *space)
  * from parent's and child's lists of constraints and
  * release memory.
  */
-static void
+static int
 on_create_fk_constraint_rollback(struct trigger *trigger, void *event)
 {
 	(void) event;
@@ -4296,10 +4923,11 @@ on_create_fk_constraint_rollback(struct trigger *trigger, void *event)
 	space_reset_fk_constraint_mask(space_by_id(fk->def->parent_id));
 	space_reset_fk_constraint_mask(space_by_id(fk->def->child_id));
 	fk_constraint_delete(fk);
+	return 0;
 }
 
 /** Return old FK and release memory for the new one. */
-static void
+static int
 on_replace_fk_constraint_rollback(struct trigger *trigger, void *event)
 {
 	(void) event;
@@ -4314,10 +4942,11 @@ on_replace_fk_constraint_rollback(struct trigger *trigger, void *event)
 	rlist_add_entry(&parent->parent_fk_constraint, old_fk, in_parent_space);
 	space_reset_fk_constraint_mask(parent);
 	space_reset_fk_constraint_mask(child);
+	return 0;
 }
 
 /** On rollback of drop simply return back FK to DD. */
-static void
+static int
 on_drop_fk_constraint_rollback(struct trigger *trigger, void *event)
 {
 	(void) event;
@@ -4330,6 +4959,7 @@ on_drop_fk_constraint_rollback(struct trigger *trigger, void *event)
 			       FIELD_LINK_CHILD);
 	fk_constraint_set_mask(old_fk, &parent->fk_constraint_mask,
 			       FIELD_LINK_PARENT);
+	return 0;
 }
 
 /**
@@ -4337,11 +4967,12 @@ on_drop_fk_constraint_rollback(struct trigger *trigger, void *event)
  * foreign key entry from both (parent's and child's) lists,
  * so just release memory.
  */
-static void
+static int
 on_drop_or_replace_fk_constraint_commit(struct trigger *trigger, void *event)
 {
 	(void) event;
 	fk_constraint_delete((struct fk_constraint *) trigger->data);
+	return 0;
 }
 
 /**
@@ -4351,7 +4982,7 @@ on_drop_or_replace_fk_constraint_commit(struct trigger *trigger, void *event)
  * use bit mask. Otherwise, fall through slow check where we
  * use O(field_cont^2) simple nested cycle iterations.
  */
-static void
+static int
 fk_constraint_check_dup_links(struct fk_constraint_def *fk_def)
 {
 	uint64_t field_mask = 0;
@@ -4364,7 +4995,7 @@ fk_constraint_check_dup_links(struct fk_constraint_def *fk_def)
 			goto error;
 		field_mask |= parent_field;
 	}
-	return;
+	return 0;
 slow_check:
 	for (uint32_t i = 0; i < fk_def->field_count; ++i) {
 		uint32_t parent_field = fk_def->links[i].parent_field;
@@ -4373,14 +5004,15 @@ slow_check:
 				goto error;
 		}
 	}
-	return;
+	return 0;
 error:
-	tnt_raise(ClientError, ER_CREATE_FK_CONSTRAINT, fk_def->name,
+	diag_set(ClientError, ER_CREATE_FK_CONSTRAINT, fk_def->name,
 		  "referenced fields can not contain duplicates");
+	return -1;
 }
 
 /** A trigger invoked on replace in the _fk_constraint space. */
-static void
+static int
 on_replace_dd_fk_constraint(struct trigger * /* trigger*/, void *event)
 {
 	struct txn *txn = (struct txn *) event;
@@ -4392,20 +5024,26 @@ on_replace_dd_fk_constraint(struct trigger * /* trigger*/, void *event)
 		struct fk_constraint_def *fk_def =
 			fk_constraint_def_new_from_tuple(new_tuple,
 							 ER_CREATE_FK_CONSTRAINT);
+		if (fk_def == NULL)
+			return -1;
 		auto fk_def_guard = make_scoped_guard([=] { free(fk_def); });
-		struct space *child_space =
-			space_cache_find_xc(fk_def->child_id);
+		struct space *child_space = space_cache_find(fk_def->child_id);
+		if (child_space == NULL)
+			return -1;
 		if (child_space->def->opts.is_view) {
-			tnt_raise(ClientError, ER_CREATE_FK_CONSTRAINT,
+			diag_set(ClientError, ER_CREATE_FK_CONSTRAINT,
 				  fk_def->name,
 				  "referencing space can't be VIEW");
+			return -1;
 		}
-		struct space *parent_space =
-			space_cache_find_xc(fk_def->parent_id);
+		struct space *parent_space = space_cache_find(fk_def->parent_id);
+		if (parent_space == NULL)
+			return -1;
 		if (parent_space->def->opts.is_view) {
-			tnt_raise(ClientError, ER_CREATE_FK_CONSTRAINT,
+			diag_set(ClientError, ER_CREATE_FK_CONSTRAINT,
 				  fk_def->name,
 				  "referenced space can't be VIEW");
+			return -1;
 		}
 		/*
 		 * FIXME: until SQL triggers are completely
@@ -4416,9 +5054,10 @@ on_replace_dd_fk_constraint(struct trigger * /* trigger*/, void *event)
 		 */
 		struct index *pk = space_index(child_space, 0);
 		if (pk != NULL && index_size(pk) > 0) {
-			tnt_raise(ClientError, ER_CREATE_FK_CONSTRAINT,
+			diag_set(ClientError, ER_CREATE_FK_CONSTRAINT,
 				  fk_def->name,
 				  "referencing space must be empty");
+			return -1;
 		}
 		/* Check types of referenced fields. */
 		for (uint32_t i = 0; i < fk_def->field_count; ++i) {
@@ -4426,9 +5065,10 @@ on_replace_dd_fk_constraint(struct trigger * /* trigger*/, void *event)
 			uint32_t parent_fieldno = fk_def->links[i].parent_field;
 			if (child_fieldno >= child_space->def->field_count ||
 			    parent_fieldno >= parent_space->def->field_count) {
-				tnt_raise(ClientError, ER_CREATE_FK_CONSTRAINT,
+				diag_set(ClientError, ER_CREATE_FK_CONSTRAINT,
 					  fk_def->name, "foreign key refers to "
 						        "nonexistent field");
+				return -1;
 			}
 			struct field_def *child_field =
 				&child_space->def->fields[child_fieldno];
@@ -4436,16 +5076,19 @@ on_replace_dd_fk_constraint(struct trigger * /* trigger*/, void *event)
 				&parent_space->def->fields[parent_fieldno];
 			if (! field_type1_contains_type2(parent_field->type,
 							 child_field->type)) {
-				tnt_raise(ClientError, ER_CREATE_FK_CONSTRAINT,
+				diag_set(ClientError, ER_CREATE_FK_CONSTRAINT,
 					  fk_def->name, "field type mismatch");
+				return -1;
 			}
 			if (child_field->coll_id != parent_field->coll_id) {
-				tnt_raise(ClientError, ER_CREATE_FK_CONSTRAINT,
+				diag_set(ClientError, ER_CREATE_FK_CONSTRAINT,
 					  fk_def->name,
 					  "field collation mismatch");
+				return -1;
 			}
 		}
-		fk_constraint_check_dup_links(fk_def);
+		if (fk_constraint_check_dup_links(fk_def) != 0)
+			return -1;
 		/*
 		 * Search for suitable index in parent space:
 		 * it must be unique and consist exactly from
@@ -4464,8 +5107,8 @@ on_replace_dd_fk_constraint(struct trigger * /* trigger*/, void *event)
 			for (j = 0; j < fk_def->field_count; ++j) {
 				if (key_def_find_by_fieldno(idx->def->key_def,
 							    fk_def->links[j].
-							    parent_field) ==
-							    NULL)
+								    parent_field) ==
+				    NULL)
 					break;
 			}
 			if (j != fk_def->field_count)
@@ -4474,15 +5117,17 @@ on_replace_dd_fk_constraint(struct trigger * /* trigger*/, void *event)
 			break;
 		}
 		if (fk_index == NULL) {
-			tnt_raise(ClientError, ER_CREATE_FK_CONSTRAINT,
+			diag_set(ClientError, ER_CREATE_FK_CONSTRAINT,
 				  fk_def->name, "referenced fields don't "
 						"compose unique index");
+			return -1;
 		}
 		struct fk_constraint *fk =
 			(struct fk_constraint *) malloc(sizeof(*fk));
 		if (fk == NULL) {
-			tnt_raise(OutOfMemory, sizeof(*fk),
+			diag_set(OutOfMemory, sizeof(*fk),
 				  "malloc", "struct fk_constraint");
+			return -1;
 		}
 		auto fk_guard = make_scoped_guard([=] { free(fk); });
 		memset(fk, 0, sizeof(*fk));
@@ -4496,6 +5141,8 @@ on_replace_dd_fk_constraint(struct trigger * /* trigger*/, void *event)
 			struct trigger *on_rollback =
 				txn_alter_trigger_new(on_create_fk_constraint_rollback,
 						      fk);
+			if (on_rollback == NULL)
+				return -1;
 			txn_stmt_on_rollback(stmt, on_rollback);
 			fk_constraint_set_mask(fk,
 					       &parent_space->fk_constraint_mask,
@@ -4514,10 +5161,14 @@ on_replace_dd_fk_constraint(struct trigger * /* trigger*/, void *event)
 			struct trigger *on_rollback =
 				txn_alter_trigger_new(on_replace_fk_constraint_rollback,
 						      old_fk);
+			if (on_rollback == NULL)
+				return -1;
 			txn_stmt_on_rollback(stmt, on_rollback);
 			struct trigger *on_commit =
 				txn_alter_trigger_new(on_drop_or_replace_fk_constraint_commit,
 						      old_fk);
+			if (on_commit == NULL)
+				return -1;
 			txn_stmt_on_commit(stmt, on_commit);
 			space_reset_fk_constraint_mask(child_space);
 			space_reset_fk_constraint_mask(parent_space);
@@ -4529,25 +5180,32 @@ on_replace_dd_fk_constraint(struct trigger * /* trigger*/, void *event)
 		struct fk_constraint_def *fk_def =
 			fk_constraint_def_new_from_tuple(old_tuple,
 						ER_DROP_FK_CONSTRAINT);
+		if (fk_def == NULL)
+			return -1;
 		auto fk_def_guard = make_scoped_guard([=] { free(fk_def); });
-		struct space *child_space =
-			space_cache_find_xc(fk_def->child_id);
-		struct space *parent_space =
-			space_cache_find_xc(fk_def->parent_id);
+		struct space *child_space = space_cache_find(fk_def->child_id);
+		struct space *parent_space = space_cache_find(fk_def->parent_id);
+		if (child_space == NULL or parent_space == NULL)
+			return -1;
 		struct fk_constraint *old_fk=
 			fk_constraint_remove(&child_space->child_fk_constraint,
 					     fk_def->name);
 		struct trigger *on_commit =
 			txn_alter_trigger_new(on_drop_or_replace_fk_constraint_commit,
 					      old_fk);
+		if (on_commit == NULL)
+			return -1;
 		txn_stmt_on_commit(stmt, on_commit);
 		struct trigger *on_rollback =
 			txn_alter_trigger_new(on_drop_fk_constraint_rollback,
 					      old_fk);
+		if (on_rollback == NULL)
+			return -1;
 		txn_stmt_on_rollback(stmt, on_rollback);
 		space_reset_fk_constraint_mask(child_space);
 		space_reset_fk_constraint_mask(parent_space);
 	}
+	return 0;
 }
 
 /** Create an instance of check constraint definition by tuple. */
@@ -4555,39 +5213,43 @@ static struct ck_constraint_def *
 ck_constraint_def_new_from_tuple(struct tuple *tuple)
 {
 	uint32_t name_len;
-	const char *name =
-		tuple_field_str_xc(tuple, BOX_CK_CONSTRAINT_FIELD_NAME,
-				   &name_len);
+	const char *name = tuple_field_str(tuple, BOX_CK_CONSTRAINT_FIELD_NAME, &name_len);
+	if (name == NULL)
+		return NULL;
 	if (name_len > BOX_NAME_MAX) {
-		tnt_raise(ClientError, ER_CREATE_CK_CONSTRAINT,
+		diag_set(ClientError, ER_CREATE_CK_CONSTRAINT,
 			  tt_cstr(name, BOX_INVALID_NAME_MAX),
 				  "check constraint name is too long");
+		return NULL;
 	}
-	identifier_check_xc(name, name_len);
-	uint32_t space_id =
-		tuple_field_u32_xc(tuple, BOX_CK_CONSTRAINT_FIELD_SPACE_ID);
-	const char *language_str =
-		tuple_field_cstr_xc(tuple, BOX_CK_CONSTRAINT_FIELD_LANGUAGE);
+	if (identifier_check(name, name_len) != 0)
+		return NULL;
+	uint32_t space_id;
+	if (tuple_field_u32(tuple, BOX_CK_CONSTRAINT_FIELD_SPACE_ID, &space_id) != 0)
+		return NULL;
+	const char *language_str = tuple_field_cstr(tuple, BOX_CK_CONSTRAINT_FIELD_LANGUAGE);
+	if (language_str == NULL)
+		return NULL;
 	enum ck_constraint_language language =
 		STR2ENUM(ck_constraint_language, language_str);
 	if (language == ck_constraint_language_MAX) {
-		tnt_raise(ClientError, ER_FUNCTION_LANGUAGE, language_str,
+		diag_set(ClientError, ER_FUNCTION_LANGUAGE, language_str,
 			  tt_cstr(name, name_len));
+		return NULL;
 	}
 	uint32_t expr_str_len;
-	const char *expr_str =
-		tuple_field_str_xc(tuple, BOX_CK_CONSTRAINT_FIELD_CODE,
-				   &expr_str_len);
+	const char *expr_str = tuple_field_str(tuple,
+			BOX_CK_CONSTRAINT_FIELD_CODE, &expr_str_len);
+	if (expr_str == NULL)
+		return NULL;
 	struct ck_constraint_def *ck_def =
 		ck_constraint_def_new(name, name_len, expr_str, expr_str_len,
 				      space_id, language);
-	if (ck_def == NULL)
-		diag_raise();
 	return ck_def;
 }
 
 /** Rollback INSERT check constraint. */
-static void
+static int
 on_create_ck_constraint_rollback(struct trigger *trigger, void * /* event */)
 {
 	struct ck_constraint *ck = (struct ck_constraint *)trigger->data;
@@ -4598,20 +5260,23 @@ on_create_ck_constraint_rollback(struct trigger *trigger, void * /* event */)
 					   strlen(ck->def->name)) != NULL);
 	space_remove_ck_constraint(space, ck);
 	ck_constraint_delete(ck);
-	trigger_run_xc(&on_alter_space, space);
+	if (trigger_run(&on_alter_space, space) != 0)
+		return -1;
+	return 0;
 }
 
 /** Commit DELETE check constraint. */
-static void
+static int
 on_drop_ck_constraint_commit(struct trigger *trigger, void * /* event */)
 {
 	struct ck_constraint *ck = (struct ck_constraint *)trigger->data;
 	assert(ck != NULL);
 	ck_constraint_delete(ck);
+	return 0;
 }
 
 /** Rollback DELETE check constraint. */
-static void
+static int
 on_drop_ck_constraint_rollback(struct trigger *trigger, void * /* event */)
 {
 	struct ck_constraint *ck = (struct ck_constraint *)trigger->data;
@@ -4622,20 +5287,25 @@ on_drop_ck_constraint_rollback(struct trigger *trigger, void * /* event */)
 					   strlen(ck->def->name)) == NULL);
 	if (space_add_ck_constraint(space, ck) != 0)
 		panic("Can't recover after CK constraint drop rollback");
-	trigger_run_xc(&on_alter_space, space);
+	if (trigger_run(&on_alter_space, space) != 0)
+		return -1;
+	return 0;
 }
 
 /** Commit REPLACE check constraint. */
-static void
+static int
 on_replace_ck_constraint_commit(struct trigger *trigger, void * /* event */)
 {
 	struct ck_constraint *ck = (struct ck_constraint *)trigger->data;
+
 	if (ck != NULL)
 		ck_constraint_delete(ck);
+
+	return 0;
 }
 
 /** Rollback REPLACE check constraint. */
-static void
+static int
 on_replace_ck_constraint_rollback(struct trigger *trigger, void * /* event */)
 {
 	struct ck_constraint *ck = (struct ck_constraint *)trigger->data;
@@ -4648,35 +5318,46 @@ on_replace_ck_constraint_rollback(struct trigger *trigger, void * /* event */)
 	rlist_del_entry(new_ck, link);
 	rlist_add_entry(&space->ck_constraint, ck, link);
 	ck_constraint_delete(new_ck);
-	trigger_run_xc(&on_alter_space, space);
+	if (trigger_run(&on_alter_space, space) != 0)
+		return -1;
+	return 0;
 }
 
 /** A trigger invoked on replace in the _ck_constraint space. */
-static void
+static int
 on_replace_dd_ck_constraint(struct trigger * /* trigger*/, void *event)
 {
 	struct txn *txn = (struct txn *) event;
 	struct txn_stmt *stmt = txn_current_stmt(txn);
 	struct tuple *old_tuple = stmt->old_tuple;
 	struct tuple *new_tuple = stmt->new_tuple;
-	uint32_t space_id =
-		tuple_field_u32_xc(old_tuple != NULL ? old_tuple : new_tuple,
-				   BOX_CK_CONSTRAINT_FIELD_SPACE_ID);
-	struct space *space = space_cache_find_xc(space_id);
+	uint32_t space_id;
+	if (tuple_field_u32(old_tuple != NULL ? old_tuple : new_tuple,
+		BOX_CK_CONSTRAINT_FIELD_SPACE_ID, &space_id) != 0)
+		return -1;
+	struct space *space = space_cache_find(space_id);
+	if (space == NULL)
+		return -1;
 	struct trigger *on_rollback = txn_alter_trigger_new(NULL, NULL);
 	struct trigger *on_commit = txn_alter_trigger_new(NULL, NULL);
+	if (on_commit == NULL || on_rollback == NULL)
+		return -1;
 
 	if (new_tuple != NULL) {
-		bool is_deferred =
-			tuple_field_bool_xc(new_tuple,
-					    BOX_CK_CONSTRAINT_FIELD_DEFERRED);
+		bool is_deferred;
+		if (tuple_field_bool(new_tuple,
+			BOX_CK_CONSTRAINT_FIELD_DEFERRED, &is_deferred) != 0)
+			return -1;
 		if (is_deferred) {
-			tnt_raise(ClientError, ER_UNSUPPORTED, "Tarantool",
+			diag_set(ClientError, ER_UNSUPPORTED, "Tarantool",
 				  "deferred ck constraints");
+			return -1;
 		}
 		/* Create or replace check constraint. */
 		struct ck_constraint_def *ck_def =
 			ck_constraint_def_new_from_tuple(new_tuple);
+		if (ck_def == NULL)
+			return -1;
 		auto ck_def_guard = make_scoped_guard([=] {
 			ck_constraint_def_delete(ck_def);
 		});
@@ -4686,14 +5367,15 @@ on_replace_dd_ck_constraint(struct trigger * /* trigger*/, void *event)
 		 */
 		struct index *pk = space_index(space, 0);
 		if (pk != NULL && index_size(pk) > 0) {
-			tnt_raise(ClientError, ER_CREATE_CK_CONSTRAINT,
+			diag_set(ClientError, ER_CREATE_CK_CONSTRAINT,
 				  ck_def->name,
 				  "referencing space must be empty");
+			return -1;
 		}
-		struct ck_constraint *new_ck_constraint =
-			ck_constraint_new(ck_def, space->def);
+		struct ck_constraint *new_ck_constraint;
+		new_ck_constraint = ck_constraint_new(ck_def, space->def);
 		if (new_ck_constraint == NULL)
-			diag_raise();
+			return -1;
 		ck_def_guard.is_active = false;
 		auto ck_guard = make_scoped_guard([=] {
 			ck_constraint_delete(new_ck_constraint);
@@ -4703,8 +5385,9 @@ on_replace_dd_ck_constraint(struct trigger * /* trigger*/, void *event)
 			space_ck_constraint_by_name(space, name, strlen(name));
 		if (old_ck_constraint != NULL)
 			rlist_del_entry(old_ck_constraint, link);
-		if (space_add_ck_constraint(space, new_ck_constraint) != 0)
-			diag_raise();
+		if (space_add_ck_constraint(space, new_ck_constraint) != 0) {
+		    return -1;
+		}
 		ck_guard.is_active = false;
 		if (old_tuple != NULL) {
 			on_rollback->data = old_ck_constraint;
@@ -4719,10 +5402,10 @@ on_replace_dd_ck_constraint(struct trigger * /* trigger*/, void *event)
 		assert(new_tuple == NULL && old_tuple != NULL);
 		/* Drop check constraint. */
 		uint32_t name_len;
-		const char *name =
-			tuple_field_str_xc(old_tuple,
-					   BOX_CK_CONSTRAINT_FIELD_NAME,
-					   &name_len);
+		const char *name = tuple_field_str(old_tuple,
+				BOX_CK_CONSTRAINT_FIELD_NAME, &name_len);
+		if (name == NULL)
+			return -1;
 		struct ck_constraint *old_ck_constraint =
 			space_ck_constraint_by_name(space, name, name_len);
 		assert(old_ck_constraint != NULL);
@@ -4736,11 +5419,14 @@ on_replace_dd_ck_constraint(struct trigger * /* trigger*/, void *event)
 	txn_stmt_on_rollback(stmt, on_rollback);
 	txn_stmt_on_commit(stmt, on_commit);
 
-	trigger_run_xc(&on_alter_space, space);
+	if (trigger_run(&on_alter_space, space) != 0)
+		return -1;
+
+	return 0;
 }
 
 /** A trigger invoked on replace in the _func_index space. */
-static void
+static int
 on_replace_dd_func_index(struct trigger *trigger, void *event)
 {
 	(void) trigger;
@@ -4754,33 +5440,57 @@ on_replace_dd_func_index(struct trigger *trigger, void *event)
 	struct index *index;
 	struct space *space;
 	if (old_tuple == NULL && new_tuple != NULL) {
-		uint32_t space_id = tuple_field_u32_xc(new_tuple,
-					BOX_FUNC_INDEX_FIELD_SPACE_ID);
-		uint32_t index_id = tuple_field_u32_xc(new_tuple,
-					BOX_FUNC_INDEX_FIELD_INDEX_ID);
-		uint32_t fid = tuple_field_u32_xc(new_tuple,
-					BOX_FUNC_INDEX_FUNCTION_ID);
-		space = space_cache_find_xc(space_id);
-		index = index_find_xc(space, index_id);
-		func = func_cache_find(fid);
-		func_index_check_func(func);
+		uint32_t space_id;
+		uint32_t index_id;
+		uint32_t fid;
+		if (tuple_field_u32(new_tuple,BOX_FUNC_INDEX_FIELD_SPACE_ID,
+			&space_id) != 0)
+			return -1;
+		if (tuple_field_u32(new_tuple,BOX_FUNC_INDEX_FIELD_INDEX_ID,
+			&index_id) != 0)
+			return -1;
+		if (tuple_field_u32(new_tuple,BOX_FUNC_INDEX_FUNCTION_ID, &fid) != 0)
+			return -1;
+		space = space_cache_find(space_id);
+		if (space == NULL)
+			return -1;
+		index = index_find(space, index_id);
+		if (index == NULL)
+			return -1;
+		func = func_by_id(fid);
+		if (func == NULL) {
+			diag_set(ClientError, ER_NO_SUCH_FUNCTION, int2str(fid));
+			return -1;
+		}
+		if (func_index_check_func(func) != 0)
+			return -1;
 		if (index->def->opts.func_id != func->def->fid) {
-			tnt_raise(ClientError, ER_WRONG_INDEX_OPTIONS, 0,
+			diag_set(ClientError, ER_WRONG_INDEX_OPTIONS, 0,
 				  "Function ids defined in _index and "
 				  "_func_index don't match");
+			return -1;
 		}
 	} else if (old_tuple != NULL && new_tuple == NULL) {
-		uint32_t space_id = tuple_field_u32_xc(old_tuple,
-					BOX_FUNC_INDEX_FIELD_SPACE_ID);
-		uint32_t index_id = tuple_field_u32_xc(old_tuple,
-					BOX_FUNC_INDEX_FIELD_INDEX_ID);
-		space = space_cache_find_xc(space_id);
-		index = index_find_xc(space, index_id);
+		uint32_t space_id;
+		uint32_t index_id;
+
+		if (tuple_field_u32(old_tuple,BOX_FUNC_INDEX_FIELD_SPACE_ID,
+			&space_id) != 0)
+			return -1;
+		if (tuple_field_u32(old_tuple,BOX_FUNC_INDEX_FIELD_INDEX_ID,
+			&index_id) != 0)
+			return -1;
+		space = space_cache_find(space_id);
+		if (space == NULL)
+			return -1;
+		index = index_find(space, index_id);
+		if (index == NULL)
+			return -1;
 		func = NULL;
 	} else {
 		assert(old_tuple != NULL && new_tuple != NULL);
-		tnt_raise(ClientError, ER_UNSUPPORTED,
-			  "functional index", "alter");
+		diag_set(ClientError, ER_UNSUPPORTED, "functional index", "alter");
+		return -1;
 	}
 
 	/**
@@ -4788,9 +5498,11 @@ on_replace_dd_func_index(struct trigger *trigger, void *event)
 	 * function. Index rebuild is not required.
 	 */
 	if (index_def_get_func(index->def) == func)
-		return;
+		return 0;
 
 	alter = alter_space_new(space);
+	if (alter == NULL)
+		return -1;
 	auto scoped_guard = make_scoped_guard([=] {alter_space_delete(alter);});
 	alter_space_move_indexes(alter, 0, index->def->iid);
 	(void) new RebuildFuncIndex(alter, index->def, func);
@@ -4798,9 +5510,15 @@ on_replace_dd_func_index(struct trigger *trigger, void *event)
 				 space->index_id_max + 1);
 	(void) new MoveCkConstraints(alter);
 	(void) new UpdateSchemaVersion(alter);
-	alter_space_do(stmt, alter);
+	try {
+		alter_space_do(stmt, alter);
+	} catch (Exception *e) {
+		return -1;
+	}
 
 	scoped_guard.is_active = false;
+
+	return 0;
 }
 
 struct trigger alter_space_on_replace_space = {
diff --git a/src/box/applier.cc b/src/box/applier.cc
index cf03ea160..e770dea58 100644
--- a/src/box/applier.cc
+++ b/src/box/applier.cc
@@ -604,7 +604,7 @@ applier_read_tx(struct applier *applier, struct stailq *rows)
 				    next)->row.is_commit);
 }
 
-static void
+static int
 applier_txn_rollback_cb(struct trigger *trigger, void *event)
 {
 	(void) trigger;
@@ -615,14 +615,16 @@ applier_txn_rollback_cb(struct trigger *trigger, void *event)
 	trigger_run(&replicaset.applier.on_rollback, event);
 	/* Rollback applier vclock to the committed one. */
 	vclock_copy(&replicaset.applier.vclock, &replicaset.vclock);
+	return 0;
 }
 
-static void
+static int
 applier_txn_commit_cb(struct trigger *trigger, void *event)
 {
 	(void) trigger;
 	/* Broadcast the commit event across all appliers. */
 	trigger_run(&replicaset.applier.on_commit, event);
+	return 0;
 }
 
 /**
@@ -738,18 +740,19 @@ fail:
 /*
  * A trigger to update an applier state after a replication commit.
  */
-static void
+static int
 applier_on_commit(struct trigger *trigger, void *event)
 {
 	(void) event;
 	struct applier *applier = (struct applier *)trigger->data;
 	fiber_cond_signal(&applier->writer_cond);
+	return 0;
 }
 
 /*
  * A trigger to update an applier state after a replication rollback.
  */
-static void
+static int
 applier_on_rollback(struct trigger *trigger, void *event)
 {
 	(void) event;
@@ -761,6 +764,7 @@ applier_on_rollback(struct trigger *trigger, void *event)
 	}
 	/* Stop the applier fiber. */
 	fiber_cancel(applier->reader);
+	return 0;
 }
 
 /**
@@ -1124,7 +1128,7 @@ struct applier_on_state {
 	struct fiber_cond wakeup;
 };
 
-static void
+static int
 applier_on_state_f(struct trigger *trigger, void *event)
 {
 	(void) event;
@@ -1136,12 +1140,14 @@ applier_on_state_f(struct trigger *trigger, void *event)
 	if (applier->state != APPLIER_OFF &&
 	    applier->state != APPLIER_STOPPED &&
 	    applier->state != on_state->desired_state)
-		return;
+		return 0;
 
 	/* Wake up waiter */
 	fiber_cond_signal(&on_state->wakeup);
 
 	applier_pause(applier);
+
+	return 0;
 }
 
 static inline void
diff --git a/src/box/ck_constraint.c b/src/box/ck_constraint.c
index 1cde27022..2e0fa24a5 100644
--- a/src/box/ck_constraint.c
+++ b/src/box/ck_constraint.c
@@ -175,7 +175,7 @@ ck_constraint_program_run(struct ck_constraint *ck_constraint,
 	return sql_reset(ck_constraint->stmt);
 }
 
-void
+int
 ck_constraint_on_replace_trigger(struct trigger *trigger, void *event)
 {
 	(void) trigger;
@@ -184,7 +184,7 @@ ck_constraint_on_replace_trigger(struct trigger *trigger, void *event)
 	assert(stmt != NULL);
 	struct tuple *new_tuple = stmt->new_tuple;
 	if (new_tuple == NULL)
-		return;
+		return 0;
 
 	struct space *space = stmt->space;
 	assert(space != NULL);
@@ -195,15 +195,16 @@ ck_constraint_on_replace_trigger(struct trigger *trigger, void *event)
 	if (field_ref == NULL) {
 		diag_set(OutOfMemory, field_ref_sz, "region_alloc",
 			 "field_ref");
-		diag_raise();
+		return -1;
 	}
 	vdbe_field_ref_prepare_tuple(field_ref, new_tuple);
 
 	struct ck_constraint *ck_constraint;
 	rlist_foreach_entry(ck_constraint, &space->ck_constraint, link) {
 		if (ck_constraint_program_run(ck_constraint, field_ref) != 0)
-			diag_raise();
+			return -1;
 	}
+	return 0;
 }
 
 struct ck_constraint *
diff --git a/src/box/ck_constraint.h b/src/box/ck_constraint.h
index f26f77a38..6af82afe6 100644
--- a/src/box/ck_constraint.h
+++ b/src/box/ck_constraint.h
@@ -198,7 +198,7 @@ ck_constraint_delete(struct ck_constraint *ck_constraint);
  * Raises an exception when some ck constraint is unsatisfied.
  * The diag message is set.
  */
-void
+int
 ck_constraint_on_replace_trigger(struct trigger *trigger, void *event);
 
 /**
diff --git a/src/box/identifier.h b/src/box/identifier.h
index a0ed6c10e..0d39793ba 100644
--- a/src/box/identifier.h
+++ b/src/box/identifier.h
@@ -51,16 +51,6 @@ identifier_check(const char *str, int str_len);
 #if defined(__cplusplus)
 } /* extern "C" */
 
-/**
- * Throw an error if identifier is not valid.
- */
-static inline void
-identifier_check_xc(const char *str, int str_len)
-{
-	if (identifier_check(str, str_len))
-		diag_raise();
-}
-
 #endif /* defined(__cplusplus) */
 
 #endif /* TARANTOOL_BOX_IDENTIFIER_H_INCLUDED */
diff --git a/src/box/iproto.cc b/src/box/iproto.cc
index 8f899fed8..34c8f469a 100644
--- a/src/box/iproto.cc
+++ b/src/box/iproto.cc
@@ -1505,7 +1505,7 @@ error:
 	tx_reply_error(msg);
 }
 
-static void
+static int
 tx_process_call_on_yield(struct trigger *trigger, void *event)
 {
 	(void)event;
@@ -1513,6 +1513,7 @@ tx_process_call_on_yield(struct trigger *trigger, void *event)
 	TRASH(&msg->call);
 	tx_discard_input(msg);
 	trigger_clear(trigger);
+	return 0;
 }
 
 static void
diff --git a/src/box/lua/call.c b/src/box/lua/call.c
index 0ac2eb7a6..c77be6698 100644
--- a/src/box/lua/call.c
+++ b/src/box/lua/call.c
@@ -949,17 +949,18 @@ lbox_func_delete(struct lua_State *L, struct func *func)
 	lua_pop(L, 2); /* box, func */
 }
 
-static void
+static int
 lbox_func_new_or_delete(struct trigger *trigger, void *event)
 {
 	struct lua_State *L = (struct lua_State *) trigger->data;
 	struct func *func = (struct func *)event;
 	if (!func->def->exports.lua)
-		return;
+		return 0;
 	if (func_by_id(func->def->fid) != NULL)
 		lbox_func_new(L, func);
 	else
 		lbox_func_delete(L, func);
+	return 0;
 }
 
 static struct trigger on_alter_func_in_lua = {
diff --git a/src/box/lua/sequence.c b/src/box/lua/sequence.c
index bd9ec7589..bf0714c1a 100644
--- a/src/box/lua/sequence.c
+++ b/src/box/lua/sequence.c
@@ -157,7 +157,7 @@ lbox_sequence_delete(struct lua_State *L, struct sequence *seq)
 	lua_pop(L, 2); /* box, sequence */
 }
 
-static void
+static int
 lbox_sequence_new_or_delete(struct trigger *trigger, void *event)
 {
 	struct lua_State *L = trigger->data;
@@ -166,6 +166,7 @@ lbox_sequence_new_or_delete(struct trigger *trigger, void *event)
 		lbox_sequence_new(L, seq);
 	else
 		lbox_sequence_delete(L, seq);
+	return 0;
 }
 
 void
diff --git a/src/box/lua/space.cc b/src/box/lua/space.cc
index d0a7e7815..ea8294f95 100644
--- a/src/box/lua/space.cc
+++ b/src/box/lua/space.cc
@@ -500,7 +500,7 @@ box_lua_space_delete(struct lua_State *L, uint32_t id)
 	lua_pop(L, 2); /* box, space */
 }
 
-static void
+static int
 box_lua_space_new_or_delete(struct trigger *trigger, void *event)
 {
 	struct lua_State *L = (struct lua_State *) trigger->data;
@@ -511,6 +511,7 @@ box_lua_space_new_or_delete(struct trigger *trigger, void *event)
 	} else {
 		box_lua_space_delete(L, space->def->id);
 	}
+	return 0;
 }
 
 static struct trigger on_alter_space_in_lua = {
diff --git a/src/box/memtx_space.c b/src/box/memtx_space.c
index cf29cf328..862a1adcc 100644
--- a/src/box/memtx_space.c
+++ b/src/box/memtx_space.c
@@ -864,7 +864,7 @@ struct memtx_ddl_state {
 	int rc;
 };
 
-static void
+static int
 memtx_check_on_replace(struct trigger *trigger, void *event)
 {
 	struct txn *txn = event;
@@ -873,11 +873,11 @@ memtx_check_on_replace(struct trigger *trigger, void *event)
 
 	/* Nothing to check on DELETE. */
 	if (stmt->new_tuple == NULL)
-		return;
+		return 0;
 
 	/* We have already failed. */
 	if (state->rc != 0)
-		return;
+		return 0;
 
 	/*
 	 * Only check format for already processed part of the space,
@@ -886,11 +886,12 @@ memtx_check_on_replace(struct trigger *trigger, void *event)
 	 */
 	if (tuple_compare(state->cursor, HINT_NONE, stmt->new_tuple, HINT_NONE,
 			  state->cmp_def) < 0)
-		return;
+		return 0;
 
 	state->rc = tuple_validate(state->format, stmt->new_tuple);
 	if (state->rc != 0)
 		diag_move(diag_get(), &state->diag);
+	return 0;
 }
 
 static int
@@ -987,7 +988,7 @@ memtx_init_ephemeral_space(struct space *space)
 	memtx_space_add_primary_key(space);
 }
 
-static void
+static int
 memtx_build_on_replace(struct trigger *trigger, void *event)
 {
 	struct txn *txn = event;
@@ -1002,13 +1003,13 @@ memtx_build_on_replace(struct trigger *trigger, void *event)
 	 */
 	if (tuple_compare(state->cursor, HINT_NONE, cmp_tuple, HINT_NONE,
 			  state->cmp_def) < 0)
-		return;
+		return 0;
 
 	if (stmt->new_tuple != NULL &&
 	    tuple_validate(state->format, stmt->new_tuple) != 0) {
 		state->rc = -1;
 		diag_move(diag_get(), &state->diag);
-		return;
+		return 0;
 	}
 
 	struct tuple *delete = NULL;
@@ -1021,7 +1022,7 @@ memtx_build_on_replace(struct trigger *trigger, void *event)
 	if (state->rc != 0) {
 		diag_move(diag_get(), &state->diag);
 	}
-	return;
+	return 0;
 }
 
 static int
diff --git a/src/box/relay.cc b/src/box/relay.cc
index efa3373f9..b99d45a15 100644
--- a/src/box/relay.cc
+++ b/src/box/relay.cc
@@ -386,7 +386,7 @@ tx_gc_advance(struct cmsg *msg)
 	free(m);
 }
 
-static void
+static int
 relay_on_close_log_f(struct trigger *trigger, void * /* event */)
 {
 	static const struct cmsg_hop route[] = {
@@ -396,7 +396,7 @@ relay_on_close_log_f(struct trigger *trigger, void * /* event */)
 	struct relay_gc_msg *m = (struct relay_gc_msg *)malloc(sizeof(*m));
 	if (m == NULL) {
 		say_warn("failed to allocate relay gc message");
-		return;
+		return 0;
 	}
 	cmsg_init(&m->msg, route);
 	m->relay = relay;
@@ -407,6 +407,7 @@ relay_on_close_log_f(struct trigger *trigger, void * /* event */)
 	 * sent xlog.
 	 */
 	stailq_add_tail_entry(&relay->pending_gc, m, in_pending);
+	return 0;
 }
 
 /**
diff --git a/src/box/replication.cc b/src/box/replication.cc
index 28f7acedc..abd519e86 100644
--- a/src/box/replication.cc
+++ b/src/box/replication.cc
@@ -114,15 +114,19 @@ replication_free(void)
 	free(replicaset.replica_by_id);
 }
 
-void
+int
 replica_check_id(uint32_t replica_id)
 {
-        if (replica_id == REPLICA_ID_NIL)
-		tnt_raise(ClientError, ER_REPLICA_ID_IS_RESERVED,
+        if (replica_id == REPLICA_ID_NIL) {
+		diag_set(ClientError, ER_REPLICA_ID_IS_RESERVED,
 			  (unsigned) replica_id);
-	if (replica_id >= VCLOCK_MAX)
-		tnt_raise(LoggedError, ER_REPLICA_MAX,
+		return -1;
+	}
+	if (replica_id >= VCLOCK_MAX) {
+		diag_set(ClientError, ER_REPLICA_MAX,
 			  (unsigned) replica_id);
+		return -1;
+	}
 	/*
 	 * It's okay to update the instance id while it is joining to
 	 * a cluster as long as the id is set by the time bootstrap is
@@ -133,9 +137,12 @@ replica_check_id(uint32_t replica_id)
 	 * case it will replay this operation during the final join
 	 * stage.
 	 */
-        if (!replicaset.is_joining && replica_id == instance_id)
-		tnt_raise(ClientError, ER_LOCAL_INSTANCE_ID_IS_READ_ONLY,
+        if (!replicaset.is_joining && replica_id == instance_id) {
+		diag_set(ClientError, ER_LOCAL_INSTANCE_ID_IS_READ_ONLY,
 			  (unsigned) replica_id);
+		return -1;
+	}
+	return 0;
 }
 
 /* Return true if replica doesn't have id, relay and applier */
@@ -147,7 +154,7 @@ replica_is_orphan(struct replica *replica)
 	       relay_get_state(replica->relay) != RELAY_FOLLOW;
 }
 
-static void
+static int
 replica_on_applier_state_f(struct trigger *trigger, void *event);
 
 static struct replica *
@@ -401,48 +408,53 @@ replica_on_applier_disconnect(struct replica *replica)
 		replicaset.applier.loading++;
 }
 
-static void
+static int
 replica_on_applier_state_f(struct trigger *trigger, void *event)
 {
 	(void)event;
 	struct replica *replica = container_of(trigger,
 			struct replica, on_applier_state);
 	switch (replica->applier->state) {
-	case APPLIER_INITIAL_JOIN:
-		replicaset.is_joining = true;
-		break;
-	case APPLIER_JOINED:
-		replicaset.is_joining = false;
-		break;
-	case APPLIER_CONNECTED:
-		if (tt_uuid_is_nil(&replica->uuid))
-			replica_on_applier_connect(replica);
-		else
-			replica_on_applier_reconnect(replica);
-		break;
-	case APPLIER_LOADING:
-	case APPLIER_DISCONNECTED:
-		replica_on_applier_disconnect(replica);
-		break;
-	case APPLIER_FOLLOW:
-		replica_on_applier_sync(replica);
-		break;
-	case APPLIER_OFF:
-		/*
-		 * Connection to self, duplicate connection
-		 * to the same master, or the applier fiber
-		 * has been cancelled. Assume synced.
-		 */
-		replica_on_applier_sync(replica);
-		break;
-	case APPLIER_STOPPED:
-		/* Unrecoverable error. */
-		replica_on_applier_disconnect(replica);
-		break;
-	default:
-		break;
+		case APPLIER_INITIAL_JOIN:
+			replicaset.is_joining = true;
+			break;
+		case APPLIER_JOINED:
+			replicaset.is_joining = false;
+			break;
+		case APPLIER_CONNECTED:
+			try {
+				if (tt_uuid_is_nil(&replica->uuid))
+					replica_on_applier_connect(replica);
+				else
+					replica_on_applier_reconnect(replica);
+			} catch (Exception *e) {
+				return -1;
+			}
+			break;
+		case APPLIER_LOADING:
+		case APPLIER_DISCONNECTED:
+			replica_on_applier_disconnect(replica);
+			break;
+		case APPLIER_FOLLOW:
+			replica_on_applier_sync(replica);
+			break;
+		case APPLIER_OFF:
+			/*
+			 * Connection to self, duplicate connection
+			 * to the same master, or the applier fiber
+			 * has been cancelled. Assume synced.
+			 */
+			replica_on_applier_sync(replica);
+			break;
+		case APPLIER_STOPPED:
+			/* Unrecoverable error. */
+			replica_on_applier_disconnect(replica);
+			break;
+		default:
+			break;
 	}
 	fiber_cond_signal(&replicaset.applier.cond);
+	return 0;
 }
 
 /**
@@ -575,7 +587,7 @@ struct applier_on_connect {
 	struct replicaset_connect_state *state;
 };
 
-static void
+static int
 applier_on_connect_f(struct trigger *trigger, void *event)
 {
 	struct applier_on_connect *on_connect = container_of(trigger,
@@ -592,10 +604,11 @@ applier_on_connect_f(struct trigger *trigger, void *event)
 		state->connected++;
 		break;
 	default:
-		return;
+		return 0;
 	}
 	fiber_cond_signal(&state->wakeup);
 	applier_pause(applier);
+	return 0;
 }
 
 void
diff --git a/src/box/replication.h b/src/box/replication.h
index 19f283c7d..470420592 100644
--- a/src/box/replication.h
+++ b/src/box/replication.h
@@ -352,7 +352,7 @@ replica_on_relay_stop(struct replica *replica);
 #if defined(__cplusplus)
 } /* extern "C" */
 
-void
+int
 replica_check_id(uint32_t replica_id);
 
 /**
diff --git a/src/box/schema.cc b/src/box/schema.cc
index 8d8aae448..9767207e0 100644
--- a/src/box/schema.cc
+++ b/src/box/schema.cc
@@ -599,12 +599,22 @@ func_by_name(const char *name, uint32_t name_len)
 	return (struct func *) mh_strnptr_node(funcs_by_name, func)->val;
 }
 
-bool
-schema_find_grants(const char *type, uint32_t id)
+int
+schema_find_grants(const char *type, uint32_t id, bool *out)
 {
-	struct space *priv = space_cache_find_xc(BOX_PRIV_ID);
+	struct space *priv = space_cache_find(BOX_PRIV_ID);
+	if (priv == NULL)
+		return -1;
+
 	/** "object" index */
-	struct index *index = index_find_system_xc(priv, 2);
+	if (!space_is_memtx(priv)) {
+		diag_set(ClientError, ER_UNSUPPORTED,
+			 priv->engine->name, "system data");
+		return -1;
+	}
+	struct index *index = index_find(priv, 2);
+	if (index == NULL)
+		return -1;
 	/*
 	 * +10 = max(mp_sizeof_uint32) +
 	 *       max(mp_sizeof_strl(uint32)).
@@ -612,9 +622,15 @@ schema_find_grants(const char *type, uint32_t id)
 	char key[GRANT_NAME_MAX + 10];
 	assert(strlen(type) <= GRANT_NAME_MAX);
 	mp_encode_uint(mp_encode_str(key, type, strlen(type)), id);
-	struct iterator *it = index_create_iterator_xc(index, ITER_EQ, key, 2);
+	struct iterator *it = index_create_iterator(index, ITER_EQ, key, 2);
+	if (it == NULL)
+		return -1;
 	IteratorGuard iter_guard(it);
-	return iterator_next_xc(it);
+	struct tuple *tuple;
+	if (iterator_next(it, &tuple) != 0)
+		return -1;
+	*out = (tuple != NULL);
+	return 0;
 }
 
 struct sequence *
diff --git a/src/box/schema.h b/src/box/schema.h
index f9d15b38d..88bfd74ad 100644
--- a/src/box/schema.h
+++ b/src/box/schema.h
@@ -171,15 +171,6 @@ schema_free();
 
 struct space *schema_space(uint32_t id);
 
-static inline struct func *
-func_cache_find(uint32_t fid)
-{
-	struct func *func = func_by_id(fid);
-	if (func == NULL)
-		tnt_raise(ClientError, ER_NO_SUCH_FUNCTION, int2str(fid));
-	return func;
-}
-
 
 /**
  * Check whether or not an object has grants on it (restrict
@@ -188,8 +179,8 @@ func_cache_find(uint32_t fid)
  * @retval true object has grants
  * @retval false object has no grants
  */
-bool
-schema_find_grants(const char *type, uint32_t id);
+int
+schema_find_grants(const char *type, uint32_t id, bool *out);
 
 /**
  * A wrapper around sequence_by_id() that raises an exception
diff --git a/src/box/sequence.h b/src/box/sequence.h
index 976020a25..a164da9af 100644
--- a/src/box/sequence.h
+++ b/src/box/sequence.h
@@ -179,15 +179,6 @@ sequence_get_value(struct sequence *seq);
 #if defined(__cplusplus)
 } /* extern "C" */
 
-static inline struct sequence *
-sequence_new_xc(struct sequence_def *def)
-{
-	struct sequence *seq = sequence_new(def);
-	if (seq == NULL)
-		diag_raise();
-	return seq;
-}
-
 #endif /* defined(__cplusplus) */
 
 #endif /* INCLUDES_TARANTOOL_BOX_SEQUENCE_H */
diff --git a/src/box/session.cc b/src/box/session.cc
index 59bf226dd..d69b6572f 100644
--- a/src/box/session.cc
+++ b/src/box/session.cc
@@ -80,7 +80,7 @@ sid_max()
 	return sid_max;
 }
 
-static void
+static int
 session_on_stop(struct trigger *trigger, void * /* event */)
 {
 	/*
@@ -91,6 +91,7 @@ session_on_stop(struct trigger *trigger, void * /* event */)
 	trigger_clear(trigger);
 	/* Destroy the session */
 	session_destroy(fiber_get_session(fiber()));
+	return 0;
 }
 
 static int
diff --git a/src/box/tuple.h b/src/box/tuple.h
index 4c4050ca8..71fe2b981 100644
--- a/src/box/tuple.h
+++ b/src/box/tuple.h
@@ -1156,16 +1156,6 @@ tuple_field_str_xc(struct tuple *tuple, uint32_t fieldno, uint32_t *len)
 	return ret;
 }
 
-/** @copydoc tuple_field_cstr() */
-static inline const char *
-tuple_field_cstr_xc(struct tuple *tuple, uint32_t fieldno)
-{
-	const char *out = tuple_field_cstr(tuple, fieldno);
-	if (out == NULL)
-		diag_raise();
-	return out;
-}
-
 /** @copydoc tuple_field_uuid() */
 static inline void
 tuple_field_uuid_xc(struct tuple *tuple, int fieldno, struct tt_uuid *out)
diff --git a/src/box/txn.c b/src/box/txn.c
index 53ebfc053..ce0354a69 100644
--- a/src/box/txn.c
+++ b/src/box/txn.c
@@ -40,10 +40,10 @@ double too_long_threshold;
 /* Txn cache. */
 static struct stailq txn_cache = {NULL, &txn_cache.first};
 
-static void
+static int
 txn_on_stop(struct trigger *trigger, void *event);
 
-static void
+static int
 txn_on_yield(struct trigger *trigger, void *event);
 
 static void
@@ -787,12 +787,13 @@ box_txn_rollback_to_savepoint(box_txn_savepoint_t *svp)
 	return 0;
 }
 
-static void
+static int
 txn_on_stop(struct trigger *trigger, void *event)
 {
 	(void) trigger;
 	(void) event;
 	txn_rollback(in_txn());                 /* doesn't yield or fail */
+	return 0;
 }
 
 /**
@@ -812,7 +813,7 @@ txn_on_stop(struct trigger *trigger, void *event)
  * So much hassle to be user-friendly until we have a true
  * interactive transaction support in memtx.
  */
-static void
+static int
 txn_on_yield(struct trigger *trigger, void *event)
 {
 	(void) trigger;
@@ -822,4 +823,5 @@ txn_on_yield(struct trigger *trigger, void *event)
 	assert(!txn_has_flag(txn, TXN_CAN_YIELD));
 	txn_rollback_to_svp(txn, NULL);
 	txn_set_flag(txn, TXN_IS_ABORTED_BY_YIELD);
+	return 0;
 }
diff --git a/src/box/user.cc b/src/box/user.cc
index c46ff67d1..50614c6f2 100644
--- a/src/box/user.cc
+++ b/src/box/user.cc
@@ -339,7 +339,8 @@ user_reload_privs(struct user *user)
 		struct tuple *tuple;
 		while ((tuple = iterator_next_xc(it)) != NULL) {
 			struct priv_def priv;
-			priv_def_create_from_tuple(&priv, tuple);
+			if (priv_def_create_from_tuple(&priv, tuple) != 0)
+				diag_raise();
 			/**
 			 * Skip role grants, we're only
 			 * interested in real objects.
@@ -559,7 +560,7 @@ user_cache_free()
 
 /** {{{ roles */
 
-void
+int
 role_check(struct user *grantee, struct user *role)
 {
 	/*
@@ -592,9 +593,11 @@ role_check(struct user *grantee, struct user *role)
 	 */
 	if (user_map_is_set(&transitive_closure,
 			    role->auth_token)) {
-		tnt_raise(ClientError, ER_ROLE_LOOP,
+		diag_set(ClientError, ER_ROLE_LOOP,
 			  role->def->name, grantee->def->name);
+		return -1;
 	}
+	return 0;
 }
 
 /**
diff --git a/src/box/user.h b/src/box/user.h
index 527fb2e7c..526cd39ca 100644
--- a/src/box/user.h
+++ b/src/box/user.h
@@ -144,16 +144,6 @@ user_cache_replace(struct user_def *user);
 void
 user_cache_delete(uint32_t uid);
 
-/* Find a user by name. Used by authentication. */
-static inline struct user *
-user_find_xc(uint32_t uid)
-{
-	struct user *user = user_find(uid);
-	if (user == NULL)
-		diag_raise();
-	return user;
-}
-
 static inline struct user *
 user_find_by_name_xc(const char *name, uint32_t len)
 {
@@ -178,7 +168,7 @@ user_cache_free();
  * and no loop in the graph will occur when grantee gets
  * a given role.
  */
-void
+int
 role_check(struct user *grantee, struct user *role);
 
 /**
@@ -201,7 +191,7 @@ role_revoke(struct user *grantee, struct user *role);
 void
 priv_grant(struct user *grantee, struct priv_def *priv);
 
-void
+int
 priv_def_create_from_tuple(struct priv_def *priv, struct tuple *tuple);
 
 /* }}} */
diff --git a/src/box/vinyl.c b/src/box/vinyl.c
index cd009c1c2..493a120f5 100644
--- a/src/box/vinyl.c
+++ b/src/box/vinyl.c
@@ -1065,7 +1065,7 @@ struct vy_check_format_ctx {
  * This is an on_replace trigger callback that checks inserted
  * tuples against a new format.
  */
-static void
+static int
 vy_check_format_on_replace(struct trigger *trigger, void *event)
 {
 	struct txn *txn = event;
@@ -1073,15 +1073,16 @@ vy_check_format_on_replace(struct trigger *trigger, void *event)
 	struct vy_check_format_ctx *ctx = trigger->data;
 
 	if (stmt->new_tuple == NULL)
-		return; /* DELETE, nothing to do */
+		return 0; /* DELETE, nothing to do */
 
 	if (ctx->is_failed)
-		return; /* already failed, nothing to do */
+		return 0; /* already failed, nothing to do */
 
 	if (tuple_validate(ctx->format, stmt->new_tuple) != 0) {
 		ctx->is_failed = true;
 		diag_move(diag_get(), &ctx->diag);
 	}
+	return 0;
 }
 
 static int
@@ -3736,13 +3737,14 @@ fail:
 
 /* {{{ Cursor */
 
-static void
+static int
 vinyl_iterator_on_tx_destroy(struct trigger *trigger, void *event)
 {
 	(void)event;
 	struct vinyl_iterator *it = container_of(trigger,
 			struct vinyl_iterator, on_tx_destroy);
 	it->tx = NULL;
+	return 0;
 }
 
 static int
@@ -4035,7 +4037,7 @@ struct vy_build_ctx {
  * This is an on_replace trigger callback that forwards DML requests
  * to the index that is currently being built.
  */
-static void
+static int
 vy_build_on_replace(struct trigger *trigger, void *event)
 {
 	struct txn *txn = event;
@@ -4046,7 +4048,7 @@ vy_build_on_replace(struct trigger *trigger, void *event)
 	struct vy_lsm *lsm = ctx->lsm;
 
 	if (ctx->is_failed)
-		return; /* already failed, nothing to do */
+		return 0; /* already failed, nothing to do */
 
 	/* Check new tuples for conformity to the new format. */
 	if (stmt->new_tuple != NULL &&
@@ -4083,7 +4085,7 @@ vy_build_on_replace(struct trigger *trigger, void *event)
 		if (rc != 0)
 			goto err;
 	}
-	return;
+	return 0;
 err:
 	/*
 	 * No need to abort the DDL request if this transaction
@@ -4093,9 +4095,10 @@ err:
 	 * context isn't valid and so we must not modify it.
 	 */
 	if (tx->state == VINYL_TX_ABORT)
-		return;
+		return 0;
 	ctx->is_failed = true;
 	diag_move(diag_get(), &ctx->diag);
+	return 0;
 }
 
 /**
@@ -4488,7 +4491,7 @@ vinyl_space_build_index(struct space *src_space, struct index *new_index,
 
 /* {{{ Deferred DELETE handling */
 
-static void
+static int
 vy_deferred_delete_on_commit(struct trigger *trigger, void *event)
 {
 	struct txn *txn = event;
@@ -4501,15 +4504,17 @@ vy_deferred_delete_on_commit(struct trigger *trigger, void *event)
 	mem->dump_lsn = txn->signature;
 	/* Unpin the mem pinned in vy_deferred_delete_on_replace(). */
 	vy_mem_unpin(mem);
+	return 0;
 }
 
-static void
+static int
 vy_deferred_delete_on_rollback(struct trigger *trigger, void *event)
 {
 	(void)event;
 	struct vy_mem *mem = trigger->data;
 	/* Unpin the mem pinned in vy_deferred_delete_on_replace(). */
 	vy_mem_unpin(mem);
+	return 0;
 }
 
 /**
@@ -4536,7 +4541,7 @@ vy_deferred_delete_on_rollback(struct trigger *trigger, void *event)
  * one of the trees got dumped while the other didn't, we would
  * mistakenly skip both statements on recovery.
  */
-static void
+static int
 vy_deferred_delete_on_replace(struct trigger *trigger, void *event)
 {
 	(void)trigger;
@@ -4546,7 +4551,7 @@ vy_deferred_delete_on_replace(struct trigger *trigger, void *event)
 	bool is_first_statement = txn_is_first_statement(txn);
 
 	if (stmt->new_tuple == NULL)
-		return;
+		return 0;
 	/*
 	 * Extract space id, LSN of the deferred DELETE statement,
 	 * and the deleted tuple from the system space row.
@@ -4555,27 +4560,27 @@ vy_deferred_delete_on_replace(struct trigger *trigger, void *event)
 	tuple_rewind(&it, stmt->new_tuple);
 	uint32_t space_id;
 	if (tuple_next_u32(&it, &space_id) != 0)
-		diag_raise();
+		return -1;
 	uint64_t lsn;
 	if (tuple_next_u64(&it, &lsn) != 0)
-		diag_raise();
+		return -1;
 	const char *delete_data = tuple_next_with_type(&it, MP_ARRAY);
 	if (delete_data == NULL)
-		diag_raise();
+		return -1;
 	const char *delete_data_end = delete_data;
 	mp_next(&delete_data_end);
 
 	/* Look up the space. */
 	struct space *space = space_cache_find(space_id);
 	if (space == NULL)
-		diag_raise();
+		return -1;
 	/*
 	 * All secondary indexes could have been dropped, in
 	 * which case we don't need to generate deferred DELETE
 	 * statements anymore.
 	 */
 	if (space->index_count <= 1)
-		return;
+		return 0;
 	/*
 	 * Wait for memory quota if necessary before starting to
 	 * process the batch (we can't yield between statements).
@@ -4589,7 +4594,7 @@ vy_deferred_delete_on_replace(struct trigger *trigger, void *event)
 	struct tuple *delete = vy_stmt_new_delete(pk->mem_format, delete_data,
 						  delete_data_end);
 	if (delete == NULL)
-		diag_raise();
+		return -1;
 	/*
 	 * A deferred DELETE may be generated after new statements
 	 * were committed for the deleted key. So we must use the
@@ -4681,7 +4686,8 @@ vy_deferred_delete_on_replace(struct trigger *trigger, void *event)
 
 	tuple_unref(delete);
 	if (rc != 0)
-		diag_raise();
+		return -1;
+	return 0;
 }
 
 static struct trigger on_replace_vinyl_deferred_delete = {
diff --git a/src/lib/core/fiber.c b/src/lib/core/fiber.c
index ce90f930c..710940838 100644
--- a/src/lib/core/fiber.c
+++ b/src/lib/core/fiber.c
@@ -1292,12 +1292,13 @@ cord_cojoin(struct cord *cord)
 	return cord_join(cord);
 }
 
-void
+int
 break_ev_loop_f(struct trigger *trigger, void *event)
 {
 	(void) trigger;
 	(void) event;
 	ev_break(loop(), EVBREAK_ALL);
+    	return 0;
 }
 
 struct costart_ctx
diff --git a/src/lib/core/trigger.cc b/src/lib/core/trigger.cc
index 4a43151e1..6beb1f600 100644
--- a/src/lib/core/trigger.cc
+++ b/src/lib/core/trigger.cc
@@ -35,25 +35,19 @@
 int
 trigger_run(struct rlist *list, void *event)
 {
-	try {
-		struct trigger *trigger, *tmp;
-		rlist_foreach_entry_safe(trigger, list, link, tmp)
-			trigger->run(trigger, event);
-	} catch (Exception *e) {
-		return -1;
-	}
+	struct trigger *trigger, *tmp;
+	rlist_foreach_entry_safe(trigger, list, link, tmp)
+		if (trigger->run(trigger, event) != 0)
+			return -1;
 	return 0;
 }
 
 int
 trigger_run_reverse(struct rlist *list, void *event)
 {
-	try {
-		struct trigger *trigger, *tmp;
-		rlist_foreach_entry_safe_reverse(trigger, list, link, tmp)
-			trigger->run(trigger, event);
-	} catch (Exception *e) {
-		return -1;
-	}
+	struct trigger *trigger, *tmp;
+	rlist_foreach_entry_safe_reverse(trigger, list, link, tmp)
+		if (trigger->run(trigger, event) != 0)
+			return -1;
 	return 0;
 }
diff --git a/src/lib/core/trigger.h b/src/lib/core/trigger.h
index 76fa6345d..7b500dd92 100644
--- a/src/lib/core/trigger.h
+++ b/src/lib/core/trigger.h
@@ -40,7 +40,7 @@ extern "C" {
  * on an event.
  */
 struct trigger;
-typedef void (*trigger_f)(struct trigger *trigger, void *event);
+typedef int (*trigger_f)(struct trigger *trigger, void *event);
 typedef void (*trigger_f0)(struct trigger *trigger);
 
 struct trigger
diff --git a/src/lua/trigger.c b/src/lua/trigger.c
index 4803e85c5..83718ddc3 100644
--- a/src/lua/trigger.c
+++ b/src/lua/trigger.c
@@ -62,7 +62,7 @@ lbox_trigger_destroy(struct trigger *ptr)
 	free(ptr);
 }
 
-static void
+static int
 lbox_trigger_run(struct trigger *ptr, void *event)
 {
 	struct lbox_trigger *trigger = (struct lbox_trigger *) ptr;
@@ -97,14 +97,14 @@ lbox_trigger_run(struct trigger *ptr, void *event)
 	}
 	if (luaT_call(L, nargs, LUA_MULTRET)) {
 		luaL_unref(tarantool_L, LUA_REGISTRYINDEX, coro_ref);
-		diag_raise();
+	    	return -1;
 	}
 	int nret = lua_gettop(L) - top;
 	if (trigger->pop_event != NULL &&
 	    trigger->pop_event(L, nret, event) != 0) {
 		lua_settop(L, top);
 		luaL_unref(tarantool_L, LUA_REGISTRYINDEX, coro_ref);
-		diag_raise();
+	    	return -1;
 	}
 	/*
 	 * Clear the stack after pop_event saves all
@@ -112,6 +112,7 @@ lbox_trigger_run(struct trigger *ptr, void *event)
 	 */
 	lua_settop(L, top);
 	luaL_unref(tarantool_L, LUA_REGISTRYINDEX, coro_ref);
+    	return 0;
 }
 
 static struct lbox_trigger *
diff --git a/src/main.cc b/src/main.cc
index 5776aa41d..d40e4740a 100644
--- a/src/main.cc
+++ b/src/main.cc
@@ -671,10 +671,11 @@ print_help(const char *program)
 	puts("to see online documentation, submit bugs or contribute a patch.");
 }
 
-static void
+static int
 break_loop(struct trigger *, void *)
 {
 	ev_break(loop(), EVBREAK_ALL);
+	return 0;
 }
 
 int
diff --git a/test/unit/cbus.c b/test/unit/cbus.c
index be930ab8f..ecf5fce72 100644
--- a/test/unit/cbus.c
+++ b/test/unit/cbus.c
@@ -43,13 +43,14 @@ do_nothing(struct cmsg *m)
 }
 
 /** Callback called on each flush to the main thread. */
-static void
+static int
 flush_cb(struct trigger *t, void *e)
 {
 	(void) t;
 	(void) e;
 	++flushed_cnt;
 	printf("flush event, counter = %d\n", flushed_cnt);
+	return 0;
 }
 
 /** Callback to finish the test. It breaks the main event loop. */
diff --git a/test/unit/swim.c b/test/unit/swim.c
index 1371e7d26..bb12baf8d 100644
--- a/test/unit/swim.c
+++ b/test/unit/swim.c
@@ -827,7 +827,7 @@ struct trigger_ctx {
 	struct swim_on_member_event_ctx ctx;
 };
 
-static void
+static int
 swim_on_member_event_save(struct trigger *t, void *event)
 {
 	struct trigger_ctx *c = (struct trigger_ctx *) t->data;
@@ -836,9 +836,10 @@ swim_on_member_event_save(struct trigger *t, void *event)
 		swim_member_unref(c->ctx.member);
 	c->ctx = *((struct swim_on_member_event_ctx *) event);
 	swim_member_ref(c->ctx.member);
+	return 0;
 }
 
-static void
+static int
 swim_on_member_event_yield(struct trigger *t, void *event)
 {
 	struct trigger_ctx *c = (struct trigger_ctx *) t->data;
@@ -846,6 +847,7 @@ swim_on_member_event_yield(struct trigger *t, void *event)
 	c->f = fiber();
 	while (c->need_sleep)
 		fiber_yield();
+	return 0;
 }
 
 static void
diff --git a/test/unit/swim_test_utils.c b/test/unit/swim_test_utils.c
index 7da82d93c..9dbd28a9f 100644
--- a/test/unit/swim_test_utils.c
+++ b/test/unit/swim_test_utils.c
@@ -176,7 +176,7 @@ swim_cluster_id_to_uri(char *buffer, int id)
  * A trigger to check correctness of event context, and ability
  * to yield.
  */
-void
+int
 swim_test_event_cb(struct trigger *trigger, void *event)
 {
 	(void) trigger;
@@ -186,6 +186,7 @@ swim_test_event_cb(struct trigger *trigger, void *event)
 	assert((ctx->events & SWIM_EV_NEW) == 0 ||
 	       (ctx->events & SWIM_EV_DROP) == 0);
 	fiber_sleep(0);
+	return 0;
 }
 
 /** Create a SWIM cluster node @a n with a 0-based @a id. */
-- 
2.17.1

^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2019-08-15  6:09 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-08-08 18:30 [tarantool-patches] [PATCH] triggers: remove exceptions Ilya Kosarev
2019-08-15  6:09 ` [tarantool-patches] " Георгий Кириченко

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox