diff --git a/CMakeLists.txt b/CMakeLists.txt index 03478720..0a39dd57 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -67,7 +67,7 @@ option(BUILD_SHARED_LIBS "Build using shared libraries" ON) set(CMAKE_WINDOWS_EXPORT_ALL_SYMBOLS FALSE) -option(BUILD_BENCHMARKING "Build CasADi and Sleipnir benchmarks" OFF) +option(BUILD_BENCHMARKS "Build CasADi and Sleipnir benchmarks" OFF) option(BUILD_EXAMPLES "Build examples" OFF) option(BUILD_PYTHON "Build Python module" OFF) option(DISABLE_DIAGNOSTICS "Disable diagnostics support at compile-time" OFF) @@ -177,7 +177,7 @@ install( ) # Add benchmark executables -if(BUILD_BENCHMARKING) +if(BUILD_BENCHMARKS) # Perf benchmark foreach(benchmark "CartPole" "Flywheel") file( diff --git a/README.md b/README.md index 48642bce..2cc547ac 100644 --- a/README.md +++ b/README.md @@ -236,7 +236,7 @@ Some test problems generate CSV files containing their solutions. These can be p Benchmark projects are in the [benchmarks folder](https://github.com/SleipnirGroup/Sleipnir/tree/main/benchmarks). To compile and run them, run the following in the repository root: ```bash # Install CasADi and [matplotlib, numpy, scipy] pip packages first -cmake -B build -S . -DBUILD_BENCHMARKING=ON +cmake -B build -S . -DBUILD_BENCHMARKS=ON cmake --build build ./tools/generate-scalability-results.sh ``` diff --git a/tools/perf-benchmark.sh b/tools/perf-benchmark.sh index 395f9f4e..6d846c14 100755 --- a/tools/perf-benchmark.sh +++ b/tools/perf-benchmark.sh @@ -6,7 +6,7 @@ if [[ $# -ne 1 ]] || [[ "$1" != "CartPole" && "$1" != "Flywheel" ]]; then exit 1 fi -cmake -B build-perf -S . -DCMAKE_BUILD_TYPE=Perf -DBUILD_BENCHMARKING=ON +cmake -B build-perf -S . -DCMAKE_BUILD_TYPE=Perf -DBUILD_BENCHMARKS=ON cmake --build build-perf --target $1PerfBenchmark ./tools/perf-record.sh ./build-perf/$1PerfBenchmark ./tools/perf-report.sh