[Tarantool-patches] [PATCH v2 luajit 37/41] perf: add CMake infrastructure
Sergey Bronnikov
sergeyb at tarantool.org
Tue Jan 13 18:04:22 MSK 2026
Hi, Sergey!
thanks for the patch! LGTM in general, see my comments below.
Sergey
On 12/26/25 12:18, Sergey Kaplun wrote:
> This commit introduces CMake building scripts for the benches introduced
> before. The benchmarks are enabled only if `LUAJIT_ENABLE_PERF` option
> is set. For each suite (LuaJIT-benches in this patch set)
> `AddBenchTarget()` macro generates 2 targets:
> * Target to run all benches and store results in the
> perf/output/<suite_name> directory.
> * Target to run all benches via CTest and inspect results in the
> console.
>
> For the LuaJIT-benches there are 2 generated files:
> * FASTA_5000000 -- is used as an input for <k-nukleotide.lua> and
> <revcomp.lua>.
> * SUMCOLL_5000.txt -- is used as an input for <sum-file.lua>.
>
> These files and <perf/output> directory are added to the .gitignore files.
> ---
> .gitignore | 5 ++
> CMakeLists.txt | 11 ++++
> perf/CMakeLists.txt | 99 ++++++++++++++++++++++++++++++
> perf/LuaJIT-benches/CMakeLists.txt | 52 ++++++++++++++++
> 4 files changed, 167 insertions(+)
> create mode 100644 perf/CMakeLists.txt
> create mode 100644 perf/LuaJIT-benches/CMakeLists.txt
>
> diff --git a/.gitignore b/.gitignore
> index c26a7eb8..bfc7d401 100644
> --- a/.gitignore
> +++ b/.gitignore
> @@ -28,3 +28,8 @@ luajit-parse-memprof
> luajit-parse-sysprof
> luajit.pc
> *.c_test
> +
> +# Generated by the performance tests.
> +FASTA_5000000
> +SUMCOL_5000.txt
> +perf/output/
> diff --git a/CMakeLists.txt b/CMakeLists.txt
> index c0da4362..73f46835 100644
> --- a/CMakeLists.txt
> +++ b/CMakeLists.txt
> @@ -464,6 +464,17 @@ if(LUAJIT_USE_TEST)
> endif()
> add_subdirectory(test)
>
> +# --- Benchmarks source tree ---------------------------------------------------
> +
> +# The option to enable performance tests for the LuaJIT.
> +# Disabled by default, since commonly it is used only by LuaJIT
> +# developers and run in the CI with the specially set-up machine.
> +option(LUAJIT_ENABLE_PERF "Generate <perf> target" OFF)
> +
> +if(LUAJIT_ENABLE_PERF)
> + add_subdirectory(perf)
> +endif()
> +
> # --- Misc rules ---------------------------------------------------------------
>
> # XXX: Implement <uninstall> target using the following recipe:
> diff --git a/perf/CMakeLists.txt b/perf/CMakeLists.txt
> new file mode 100644
> index 00000000..1c2f8e8e
> --- /dev/null
> +++ b/perf/CMakeLists.txt
> @@ -0,0 +1,99 @@
> +# Running various bench suites against LuaJIT.
> +
> +include(MakeLuaPath)
> +
> +if(CMAKE_BUILD_TYPE STREQUAL "Debug")
> + message(WARNING "LuaJIT and perf tests are built in the Debug mode. "
> + "Timings may be affected.")
> +endif()
> +
> +set(PERF_OUTPUT_DIR ${PROJECT_BINARY_DIR}/perf/output)
> +file(MAKE_DIRECTORY ${PERF_OUTPUT_DIR})
> +
> +# List of paths that will be used for each suite.
> +make_lua_path(LUA_PATH_BENCH_BASE
> + PATHS
> + # Use of the bench module.
> + ${CMAKE_CURRENT_SOURCE_DIR}/utils/?.lua
> + # Simple usage with `jit.dump()`, etc.
> + ${LUAJIT_SOURCE_DIR}/?.lua
> + ${LUAJIT_BINARY_DIR}/?.lua
> +)
> +
> +make_lua_path(LUA_CPATH
> + PATHS
> + # XXX: Some arches may have installed the cjson module here.
> + /usr/lib64/lua/5.1/?.so
> +)
> +
> +# Produce the pair:
> +# Target to run for reporting and target to inspect from the
> +# console, runnable by the CTest.
> +macro(AddBenchTarget perf_suite)
> + file(MAKE_DIRECTORY "${PERF_OUTPUT_DIR}/${perf_suite}/")
> + message(STATUS "Add perf suite ${perf_suite}")
> + add_custom_target(${perf_suite})
> + add_custom_target(${perf_suite}-console
> + COMMAND ${CMAKE_CTEST_COMMAND}
> + -L ${perf_suite}
use long option "--label-regex" instead "-L"
> + --parallel 1
> + --verbose
> + --output-on-failure
> + --no-tests=error
> + )
> + add_dependencies(${perf_suite}-console luajit-main)
> +endmacro()
> +
> +# Add the bench to the pair of targets created by the call above.
> +macro(AddBench bench_name bench_path perf_suite LUA_PATH)
> + set(bench_title "perf/${perf_suite}/${bench_name}")
> + get_filename_component(bench_name_stripped ${bench_name} NAME_WE)
> + set(bench_out_file
> + ${PERF_OUTPUT_DIR}/${perf_suite}/${bench_name_stripped}.json
> + )
> + set(bench_command "${LUAJIT_BINARY} ${bench_path}")
> + if(${ARGC} GREATER 4)
> + set(input_file ${ARGV4})
> + set(bench_command "${bench_command} < ${input_file}")
> + endif()
> + set(BENCH_FLAGS
> + "--benchmark_out_format=json --benchmark_out=${bench_out_file}"
Why BENCH_FLAGS is in uppercase and bench_command_flags in lower-case?
What is the difference?
Also, it seems these variables are unused and can be removed:
@ -56,10 +58,6 @@ macro(AddBench bench_name bench_path perf_suite LUA_PATH)
set(input_file ${ARGV4})
set(bench_command "${bench_command} < ${input_file}")
endif()
- set(BENCH_FLAGS
- "--benchmark_out_format=json --benchmark_out=${bench_out_file}"
- )
- set(bench_command_flags ${bench_command} ${BENCH_FLAGS})
separate_arguments(bench_command_separated UNIX_COMMAND
${bench_command})
add_custom_command(
COMMAND ${CMAKE_COMMAND} -E env
> + )
> + set(bench_command_flags ${bench_command} ${BENCH_FLAGS})
> + separate_arguments(bench_command_separated UNIX_COMMAND ${bench_command})
> + add_custom_command(
> + COMMAND ${CMAKE_COMMAND} -E env
> + LUA_PATH="${LUA_PATH}"
> + LUA_CPATH="${LUA_CPATH}"
> + ${bench_command_separated}
> + --benchmark_out_format=json
> + --benchmark_out="${bench_out_file}"
> + OUTPUT ${bench_out_file}
> + DEPENDS luajit-main
> + COMMENT
> + "Running benchmark ${bench_title} saving results in ${bench_out_file}."
> + )
> + add_custom_target(${bench_name} DEPENDS ${bench_out_file})
> + add_dependencies(${perf_suite} ${bench_name})
> +
> + # Report in the console.
> + add_test(NAME ${bench_title}
> + COMMAND sh -c "${bench_command}"
I propose to use find_program for shell executable, like we do in
tarantool's root cmakelists.txt:
--- a/perf/CMakeLists.txt
+++ b/perf/CMakeLists.txt
@@ -1,5 +1,7 @@
# Running various bench suites against LuaJIT.
+find_program(SHELL sh)
+
include(MakeLuaPath)
if(CMAKE_BUILD_TYPE STREQUAL "Debug")
@@ -78,7 +80,7 @@ macro(AddBench bench_name bench_path perf_suite LUA_PATH)
# Report in the console.
add_test(NAME ${bench_title}
- COMMAND sh -c "${bench_command}"
+ COMMAND ${SHELL} -c "${bench_command}"
)
set_tests_properties(${bench_title} PROPERTIES
ENVIRONMENT "LUA_PATH=${LUA_PATH}"
> + )
> + set_tests_properties(${bench_title} PROPERTIES
> + ENVIRONMENT "LUA_PATH=${LUA_PATH}"
> + LABELS ${perf_suite}
> + DEPENDS luajit-main
> + )
> + unset(input_file)
> +endmacro()
> +
> +add_subdirectory(LuaJIT-benches)
> +
> +add_custom_target(${PROJECT_NAME}-perf
> + DEPENDS LuaJIT-benches
> +)
> +
> +add_custom_target(${PROJECT_NAME}-perf-console
> + DEPENDS LuaJIT-benches-console
> +)
> diff --git a/perf/LuaJIT-benches/CMakeLists.txt b/perf/LuaJIT-benches/CMakeLists.txt
> new file mode 100644
> index 00000000..d9909f36
> --- /dev/null
> +++ b/perf/LuaJIT-benches/CMakeLists.txt
> @@ -0,0 +1,52 @@
> +set(PERF_SUITE_NAME LuaJIT-benches)
> +set(LUA_BENCH_SUFFIX .lua)
> +
> +AddBenchTarget(${PERF_SUITE_NAME})
> +
> +# Input for the k-nucleotide and revcomp benchmarks.
> +set(FASTA_NAME ${CMAKE_CURRENT_BINARY_DIR}/FASTA_5000000)
> +add_custom_target(FASTA_5000000
> + COMMAND ${LUAJIT_BINARY}
> + ${CMAKE_CURRENT_SOURCE_DIR}/libs/fasta.lua 5000000 > ${FASTA_NAME}
> + OUTPUT ${FASTA_NAME}
> + DEPENDS luajit-main
> + COMMENT "Generate ${FASTA_NAME}."
> +)
> +
> +make_lua_path(LUA_PATH
> + PATHS
> + ${LUA_PATH_BENCH_BASE}
> + ${CMAKE_CURRENT_SOURCE_DIR}/libs/?.lua
> +)
> +
> +# Input for the <sum-file.lua> benchmark.
> +set(SUM_NAME ${CMAKE_CURRENT_BINARY_DIR}/SUMCOL_5000.txt)
> +# Remove possibly existing file.
> +file(REMOVE ${SUM_NAME})
> +
> +set(SUMCOL_FILE ${CMAKE_CURRENT_SOURCE_DIR}/SUMCOL_1.txt)
> +file(READ ${SUMCOL_FILE} SUMCOL_CONTENT)
> +foreach(_unused RANGE 4999)
> + file(APPEND ${SUM_NAME} "${SUMCOL_CONTENT}")
> +endforeach()
> +
> +file(GLOB benches "${CMAKE_CURRENT_SOURCE_DIR}/*${LUA_BENCH_SUFFIX}")
> +foreach(bench_path ${benches})
> + file(RELATIVE_PATH bench_name ${CMAKE_CURRENT_SOURCE_DIR} ${bench_path})
> + set(bench_title "perf/${PERF_SUITE_NAME}/${bench_name}")
> + if(bench_name MATCHES "k-nucleotide" OR bench_name MATCHES "revcomp")
> + AddBench(${bench_name}
> + ${bench_path} ${PERF_SUITE_NAME} "${LUA_PATH}" ${FASTA_NAME}
> + )
> + add_dependencies(${bench_name} FASTA_5000000)
> + elseif(bench_name MATCHES "sum-file")
> + AddBench(${bench_name}
> + ${bench_path} ${PERF_SUITE_NAME} "${LUA_PATH}" ${SUM_NAME}
> + )
> + else()
> + AddBench(${bench_name} ${bench_path} ${PERF_SUITE_NAME} "${LUA_PATH}")
> + endif()
> +endforeach()
> +
> +# We need to generate the file before we run tests.
> +add_dependencies(${PERF_SUITE_NAME}-console FASTA_5000000)
>
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <https://lists.tarantool.org/pipermail/tarantool-patches/attachments/20260113/6cb1803a/attachment.htm>
More information about the Tarantool-patches
mailing list