Hi, Sergey!

LGTM

On 1/14/26 11:50, Sergey Kaplun wrote:
Hi, Sergey!
Fixed your comments, see the iterative patches below.

On 13.01.26, Sergey Bronnikov wrote:
Hi, Sergey!

thanks for the patch! LGTM in general, see my comments below.

Sergey

On 12/26/25 12:18, Sergey Kaplun wrote:
<snipped>

+macro(AddBenchTarget perf_suite)
+  file(MAKE_DIRECTORY "${PERF_OUTPUT_DIR}/${perf_suite}/")
+  message(STATUS "Add perf suite ${perf_suite}")
+  add_custom_target(${perf_suite})
+  add_custom_target(${perf_suite}-console
+    COMMAND ${CMAKE_CTEST_COMMAND}
+      -L ${perf_suite}
use long option "--label-regex" instead "-L"
Fixed:
===================================================================
diff --git a/perf/CMakeLists.txt b/perf/CMakeLists.txt
index 06603dfd..d03d74a0 100644
--- a/perf/CMakeLists.txt
+++ b/perf/CMakeLists.txt
@@ -47,7 +47,7 @@ macro(AddBenchTarget perf_suite)
   add_custom_target(${perf_suite})
   add_custom_target(${perf_suite}-console
     COMMAND ${CMAKE_CTEST_COMMAND}
-      -L ${perf_suite}
+      --label-regex ${perf_suite}
       --parallel 1
       --verbose
       --output-on-failure
===================================================================

+      --parallel 1
+      --verbose
+      --output-on-failure
+      --no-tests=error
+  )
+  add_dependencies(${perf_suite}-console luajit-main)
+endmacro()
+
+# Add the bench to the pair of targets created by the call above.
+macro(AddBench bench_name bench_path perf_suite LUA_PATH)
+  set(bench_title "perf/${perf_suite}/${bench_name}")
+  get_filename_component(bench_name_stripped  ${bench_name} NAME_WE)
+  set(bench_out_file
+    ${PERF_OUTPUT_DIR}/${perf_suite}/${bench_name_stripped}.json
+  )
+  set(bench_command "${LUAJIT_BINARY} ${bench_path}")
+  if(${ARGC} GREATER 4)
+    set(input_file ${ARGV4})
+    set(bench_command "${bench_command} < ${input_file}")
+  endif()
+  set(BENCH_FLAGS
+    "--benchmark_out_format=json --benchmark_out=${bench_out_file}"
Why BENCH_FLAGS is in uppercase and bench_command_flags in lower-case?

What is the difference?

Also, it seems these variables are unused and can be removed:

@ -56,10 +58,6 @@ macro(AddBench bench_name bench_path perf_suite LUA_PATH)
      set(input_file ${ARGV4})
      set(bench_command "${bench_command} < ${input_file}")
    endif()
-  set(BENCH_FLAGS
-    "--benchmark_out_format=json --benchmark_out=${bench_out_file}"
-  )
-  set(bench_command_flags ${bench_command} ${BENCH_FLAGS})
    separate_arguments(bench_command_separated UNIX_COMMAND 
${bench_command})
    add_custom_command(
      COMMAND ${CMAKE_COMMAND} -E env

Dropped, thanks!

===================================================================
diff --git a/perf/CMakeLists.txt b/perf/CMakeLists.txt
index 5a5cf777..0916402e 100644
--- a/perf/CMakeLists.txt
+++ b/perf/CMakeLists.txt
@@ -63,10 +63,6 @@ macro(AddBench bench_name bench_path perf_suite LUA_PATH)
     set(input_file ${ARGV4})
     set(bench_command "${bench_command} < ${input_file}")
   endif()
-  set(BENCH_FLAGS
-    "--benchmark_out_format=json --benchmark_out=${bench_out_file}"
-  )
-  set(bench_command_flags ${bench_command} ${BENCH_FLAGS})
   separate_arguments(bench_command_separated UNIX_COMMAND ${bench_command})
   add_custom_command(
     COMMAND ${CMAKE_COMMAND} -E env
===================================================================

+  )
+  set(bench_command_flags ${bench_command} ${BENCH_FLAGS})
+  separate_arguments(bench_command_separated UNIX_COMMAND ${bench_command})
+  add_custom_command(
+    COMMAND ${CMAKE_COMMAND} -E env
+      LUA_PATH="${LUA_PATH}"
+      LUA_CPATH="${LUA_CPATH}"
+        ${bench_command_separated}
+          --benchmark_out_format=json
+          --benchmark_out="${bench_out_file}"
+    OUTPUT ${bench_out_file}
+    DEPENDS luajit-main
+    COMMENT
+      "Running benchmark ${bench_title} saving results in ${bench_out_file}."
+  )
+  add_custom_target(${bench_name} DEPENDS ${bench_out_file})
+  add_dependencies(${perf_suite} ${bench_name})
+
+  # Report in the console.
+  add_test(NAME ${bench_title}
+    COMMAND sh -c "${bench_command}"
I propose to use find_program for shell executable, like we do in 
tarantool's root cmakelists.txt:

--- a/perf/CMakeLists.txt
+++ b/perf/CMakeLists.txt
@@ -1,5 +1,7 @@
  # Running various bench suites against LuaJIT.

+find_program(SHELL sh)
+
  include(MakeLuaPath)

  if(CMAKE_BUILD_TYPE STREQUAL "Debug")
@@ -78,7 +80,7 @@ macro(AddBench bench_name bench_path perf_suite LUA_PATH)

    # Report in the console.
    add_test(NAME ${bench_title}
-    COMMAND sh -c "${bench_command}"
+    COMMAND ${SHELL} -c "${bench_command}"
    )
    set_tests_properties(${bench_title} PROPERTIES
      ENVIRONMENT "LUA_PATH=${LUA_PATH}"
Added:

===================================================================
diff --git a/perf/CMakeLists.txt b/perf/CMakeLists.txt
index 0916402e..e3af1996 100644
--- a/perf/CMakeLists.txt
+++ b/perf/CMakeLists.txt
@@ -2,6 +2,11 @@
 
 include(MakeLuaPath)
 
+find_program(SH sh)
+if(NOT SH)
+  message(FATAL_ERROR "`sh' is not found")
+endif()
+
 if(CMAKE_BUILD_TYPE STREQUAL "Debug")
   message(WARNING "LuaJIT and perf tests are built in the Debug mode. "
                   "Timings may be affected.")
@@ -81,7 +86,7 @@ macro(AddBench bench_name bench_path perf_suite LUA_PATH)
 
   # Report in the console.
   add_test(NAME ${bench_title}
-    COMMAND sh -c "${bench_command}"
+    COMMAND ${SH} -c "${bench_command}"
   )
   set_tests_properties(${bench_title} PROPERTIES
     ENVIRONMENT "LUA_PATH=${LUA_PATH}"
===================================================================

+  )
+  set_tests_properties(${bench_title} PROPERTIES
+    ENVIRONMENT "LUA_PATH=${LUA_PATH}"
+    LABELS ${perf_suite}
+    DEPENDS luajit-main
+  )
+  unset(input_file)
+endmacro()
+
<snipped>