tunectl 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,623 @@
1
+ #!/usr/bin/env bash
2
+ set -uo pipefail
3
+
4
+ # benchmark.sh — Performance benchmarking for tunectl
5
+ # Uses sysbench and fio to measure CPU, memory, and disk I/O performance.
6
+ #
7
+ # Modes:
8
+ # (default) Run all benchmarks and display current results
9
+ # --baseline Run benchmarks and save results for later comparison
10
+ # --compare Run benchmarks and compare against saved baseline
11
+ #
12
+ # Environment:
13
+ # BENCHMARK_DIR Override results directory (default: /var/lib/tunectl/benchmarks)
14
+ #
15
+ # Exit codes: 0=success, 1=operational failure, 2=usage error
16
+
17
+ SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
18
+
19
+ # Results directory — can be overridden via environment for testing
20
+ BENCHMARK_DIR="${BENCHMARK_DIR:-/var/lib/tunectl/benchmarks}"
21
+ BASELINE_FILE="$BENCHMARK_DIR/baseline.json"
22
+
23
+ # Bounded resource parameters (safe for small systems, VAL-BENCH-017)
24
+ CPU_TEST_TIME=10 # seconds per CPU test (≤30s)
25
+ MEMORY_TEST_TIME=10 # seconds per memory test (≤30s)
26
+ FIO_RUNTIME=10 # seconds for fio test (≤30s)
27
+ FIO_SIZE="64M" # fio file size (≤256MB)
28
+ FIO_IODEPTH=16 # fio queue depth (≤64)
29
+
30
+ # Temp files to track for cleanup
31
+ CLEANUP_FILES=()
32
+
33
+ # -------------------------------------------------------
34
+ # Cleanup handler — remove all temp files (VAL-BENCH-016)
35
+ # -------------------------------------------------------
36
+ cleanup() {
37
+ for f in "${CLEANUP_FILES[@]}"; do
38
+ rm -f "$f" 2>/dev/null
39
+ done
40
+ }
41
+ trap cleanup EXIT
42
+
43
+ # -------------------------------------------------------
44
+ # Usage
45
+ # -------------------------------------------------------
46
+ usage() {
47
+ cat <<EOF
48
+ Usage: benchmark.sh [OPTIONS]
49
+
50
+ Performance benchmarking using sysbench and fio.
51
+
52
+ Options:
53
+ --baseline Run benchmarks and save results for later comparison
54
+ --compare Run benchmarks and compare against saved baseline
55
+ --help Show this help message
56
+
57
+ Modes:
58
+ (default) Run benchmarks and display current results
59
+ --baseline Save results to $BENCHMARK_DIR/
60
+ --compare Load saved baseline and show comparison table
61
+
62
+ Tests:
63
+ CPU single-thread sysbench cpu --threads=1
64
+ CPU multi-thread sysbench cpu --threads=N (auto-detected)
65
+ Memory read sysbench memory --memory-oper=read
66
+ Memory write sysbench memory --memory-oper=write
67
+ Disk random read fio --rw=randread --bs=4k on disk-backed storage
68
+ EOF
69
+ exit 0
70
+ }
71
+
72
+ # -------------------------------------------------------
73
+ # Parse arguments
74
+ # -------------------------------------------------------
75
+ MODE="default"
76
+
77
+ while [[ $# -gt 0 ]]; do
78
+ case "$1" in
79
+ --baseline)
80
+ MODE="baseline"
81
+ shift
82
+ ;;
83
+ --compare)
84
+ MODE="compare"
85
+ shift
86
+ ;;
87
+ --help)
88
+ usage
89
+ ;;
90
+ *)
91
+ echo "Error: Unknown argument '$1'" >&2
92
+ echo "Usage: benchmark.sh [--baseline|--compare|--help]" >&2
93
+ exit 2
94
+ ;;
95
+ esac
96
+ done
97
+
98
+ # -------------------------------------------------------
99
+ # Check dependencies
100
+ # -------------------------------------------------------
101
+ check_dependencies() {
102
+ local missing=()
103
+
104
+ if ! command -v sysbench &>/dev/null; then
105
+ missing+=("sysbench")
106
+ fi
107
+
108
+ if ! command -v fio &>/dev/null; then
109
+ missing+=("fio")
110
+ fi
111
+
112
+ if [[ ${#missing[@]} -gt 0 ]]; then
113
+ echo "Installing missing dependencies: ${missing[*]}..." >&2
114
+ if command -v apt-get &>/dev/null; then
115
+ if [[ $EUID -eq 0 ]]; then
116
+ apt-get update -qq >/dev/null 2>&1
117
+ apt-get install -y -qq "${missing[@]}" >/dev/null 2>&1
118
+ else
119
+ echo "Error: Cannot install ${missing[*]} without root. Run: sudo apt-get install ${missing[*]}" >&2
120
+ exit 1
121
+ fi
122
+ else
123
+ echo "Error: apt-get not available. Install manually: ${missing[*]}" >&2
124
+ exit 1
125
+ fi
126
+
127
+ # Verify installation
128
+ for tool in "${missing[@]}"; do
129
+ if ! command -v "$tool" &>/dev/null; then
130
+ echo "Error: Failed to install $tool" >&2
131
+ exit 1
132
+ fi
133
+ done
134
+ fi
135
+ }
136
+
137
+ # -------------------------------------------------------
138
+ # Find a disk-backed path for fio tests (NOT tmpfs)
139
+ # VAL-BENCH-006: Must use actual block storage
140
+ # -------------------------------------------------------
141
+ find_disk_backed_path() {
142
+ # Check common paths, prefer /var/tmp (usually disk-backed) or /home
143
+ local candidates=("/var/tmp" "/home" "/var/lib" "/opt")
144
+
145
+ for candidate in "${candidates[@]}"; do
146
+ if [[ -d "$candidate" && -w "$candidate" ]]; then
147
+ local fstype
148
+ fstype=$(stat -f -c '%T' "$candidate" 2>/dev/null || findmnt -n -o FSTYPE --target "$candidate" 2>/dev/null || echo "unknown")
149
+ # tmpfs has a specific magic number 0x01021994 which stat -f shows as "tmpfs"
150
+ if [[ "$fstype" != "tmpfs" ]]; then
151
+ echo "$candidate"
152
+ return 0
153
+ fi
154
+ fi
155
+ done
156
+
157
+ # Fallback: use /var/tmp even if we can't detect type
158
+ if [[ -d "/var/tmp" && -w "/var/tmp" ]]; then
159
+ echo "/var/tmp"
160
+ return 0
161
+ fi
162
+
163
+ echo "Error: Cannot find a disk-backed writable directory for fio tests" >&2
164
+ return 1
165
+ }
166
+
167
+ # -------------------------------------------------------
168
+ # CPU benchmark — single thread
169
+ # -------------------------------------------------------
170
+ run_cpu_single() {
171
+ local raw cmd_exit=0
172
+ raw=$(sysbench cpu --threads=1 --time="$CPU_TEST_TIME" run 2>/dev/null) || cmd_exit=$?
173
+
174
+ if [[ $cmd_exit -ne 0 || -z "$raw" ]]; then
175
+ echo "Error: CPU single-thread benchmark failed (sysbench exit code $cmd_exit)" >&2
176
+ echo "ERROR"
177
+ return 0
178
+ fi
179
+
180
+ # Parse "events per second: NNN.NN"
181
+ local eps
182
+ eps=$(echo "$raw" | grep -oP 'events per second:\s*\K[0-9]+(\.[0-9]+)?' || true)
183
+
184
+ if [[ -z "$eps" ]]; then
185
+ echo "Error: CPU single-thread benchmark produced no parseable output" >&2
186
+ echo "ERROR"
187
+ return 0
188
+ fi
189
+
190
+ echo "$eps"
191
+ }
192
+
193
+ # -------------------------------------------------------
194
+ # CPU benchmark — multi thread
195
+ # -------------------------------------------------------
196
+ run_cpu_multi() {
197
+ local threads
198
+ threads=$(nproc 2>/dev/null || echo 1)
199
+ local raw cmd_exit=0
200
+ raw=$(sysbench cpu --threads="$threads" --time="$CPU_TEST_TIME" run 2>/dev/null) || cmd_exit=$?
201
+
202
+ if [[ $cmd_exit -ne 0 || -z "$raw" ]]; then
203
+ echo "Error: CPU multi-thread benchmark failed (sysbench exit code $cmd_exit)" >&2
204
+ echo "ERROR"
205
+ return 0
206
+ fi
207
+
208
+ local eps
209
+ eps=$(echo "$raw" | grep -oP 'events per second:\s*\K[0-9]+(\.[0-9]+)?' || true)
210
+
211
+ if [[ -z "$eps" ]]; then
212
+ echo "Error: CPU multi-thread benchmark produced no parseable output" >&2
213
+ echo "ERROR"
214
+ return 0
215
+ fi
216
+
217
+ echo "$eps"
218
+ }
219
+
220
+ # -------------------------------------------------------
221
+ # Memory benchmark — read
222
+ # -------------------------------------------------------
223
+ run_mem_read() {
224
+ local raw cmd_exit=0
225
+ raw=$(sysbench memory --memory-oper=read --threads=1 --time="$MEMORY_TEST_TIME" run 2>/dev/null) || cmd_exit=$?
226
+
227
+ if [[ $cmd_exit -ne 0 || -z "$raw" ]]; then
228
+ echo "Error: Memory read benchmark failed (sysbench exit code $cmd_exit)" >&2
229
+ echo "ERROR"
230
+ return 0
231
+ fi
232
+
233
+ # Parse "NNNN.NN MiB transferred (NNNN.NN MiB/sec)"
234
+ local mib_sec
235
+ mib_sec=$(echo "$raw" | grep -oP '\(([0-9]+(\.[0-9]+)?)\s*MiB/sec\)' | grep -oP '[0-9]+(\.[0-9]+)?' | head -1 || true)
236
+
237
+ if [[ -z "$mib_sec" ]]; then
238
+ echo "Error: Memory read benchmark produced no parseable output" >&2
239
+ echo "ERROR"
240
+ return 0
241
+ fi
242
+
243
+ echo "$mib_sec"
244
+ }
245
+
246
+ # -------------------------------------------------------
247
+ # Memory benchmark — write
248
+ # -------------------------------------------------------
249
+ run_mem_write() {
250
+ local raw cmd_exit=0
251
+ raw=$(sysbench memory --memory-oper=write --threads=1 --time="$MEMORY_TEST_TIME" run 2>/dev/null) || cmd_exit=$?
252
+
253
+ if [[ $cmd_exit -ne 0 || -z "$raw" ]]; then
254
+ echo "Error: Memory write benchmark failed (sysbench exit code $cmd_exit)" >&2
255
+ echo "ERROR"
256
+ return 0
257
+ fi
258
+
259
+ local mib_sec
260
+ mib_sec=$(echo "$raw" | grep -oP '\(([0-9]+(\.[0-9]+)?)\s*MiB/sec\)' | grep -oP '[0-9]+(\.[0-9]+)?' | head -1 || true)
261
+
262
+ if [[ -z "$mib_sec" ]]; then
263
+ echo "Error: Memory write benchmark produced no parseable output" >&2
264
+ echo "ERROR"
265
+ return 0
266
+ fi
267
+
268
+ echo "$mib_sec"
269
+ }
270
+
271
+ # -------------------------------------------------------
272
+ # Disk benchmark — random read IOPS
273
+ # -------------------------------------------------------
274
+ run_disk_rand_read() {
275
+ local disk_path
276
+ disk_path=$(find_disk_backed_path) || {
277
+ echo "Error: Disk random read benchmark failed (no disk-backed path found)" >&2
278
+ echo "ERROR"
279
+ return 0
280
+ }
281
+
282
+ local fio_file="${disk_path}/benchmark_fio_testfile"
283
+ CLEANUP_FILES+=("$fio_file")
284
+
285
+ local raw cmd_exit=0
286
+ raw=$(fio --name=benchmark_fio_test \
287
+ --filename="$fio_file" \
288
+ --rw=randread \
289
+ --bs=4k \
290
+ --direct=1 \
291
+ --ioengine=libaio \
292
+ --iodepth="$FIO_IODEPTH" \
293
+ --size="$FIO_SIZE" \
294
+ --runtime="$FIO_RUNTIME" \
295
+ --time_based \
296
+ --output-format=json 2>/dev/null) || cmd_exit=$?
297
+
298
+ # Cleanup the fio test file immediately
299
+ rm -f "$fio_file" 2>/dev/null
300
+
301
+ if [[ $cmd_exit -ne 0 || -z "$raw" ]]; then
302
+ echo "Error: Disk random read benchmark failed (fio exit code $cmd_exit)" >&2
303
+ echo "ERROR"
304
+ return 0
305
+ fi
306
+
307
+ # Parse IOPS from JSON output
308
+ local iops
309
+ iops=$(echo "$raw" | python3 -c "
310
+ import json, sys
311
+ try:
312
+ d = json.load(sys.stdin)
313
+ print(f\"{d['jobs'][0]['read']['iops']:.2f}\")
314
+ except Exception:
315
+ print('')
316
+ " 2>/dev/null)
317
+
318
+ if [[ -z "$iops" ]]; then
319
+ echo "Error: Disk random read benchmark produced no parseable IOPS output" >&2
320
+ echo "ERROR"
321
+ return 0
322
+ fi
323
+
324
+ echo "$iops"
325
+ }
326
+
327
+ # -------------------------------------------------------
328
+ # Run all benchmarks and collect results
329
+ # Returns results as key=value pairs
330
+ # Exits 1 if any benchmark command failed (ERROR sentinel)
331
+ # -------------------------------------------------------
332
+ run_all_benchmarks() {
333
+ local failed_benchmarks=()
334
+
335
+ echo "Running benchmarks..." >&2
336
+
337
+ echo " [1/5] CPU single-thread..." >&2
338
+ local cpu_st
339
+ cpu_st=$(run_cpu_single)
340
+ [[ "$cpu_st" == "ERROR" ]] && failed_benchmarks+=("CPU single-thread")
341
+
342
+ echo " [2/5] CPU multi-thread..." >&2
343
+ local cpu_mt
344
+ cpu_mt=$(run_cpu_multi)
345
+ [[ "$cpu_mt" == "ERROR" ]] && failed_benchmarks+=("CPU multi-thread")
346
+
347
+ echo " [3/5] Memory read..." >&2
348
+ local mem_read
349
+ mem_read=$(run_mem_read)
350
+ [[ "$mem_read" == "ERROR" ]] && failed_benchmarks+=("Memory read")
351
+
352
+ echo " [4/5] Memory write..." >&2
353
+ local mem_write
354
+ mem_write=$(run_mem_write)
355
+ [[ "$mem_write" == "ERROR" ]] && failed_benchmarks+=("Memory write")
356
+
357
+ echo " [5/5] Disk random read..." >&2
358
+ local disk_rr
359
+ disk_rr=$(run_disk_rand_read)
360
+ [[ "$disk_rr" == "ERROR" ]] && failed_benchmarks+=("Disk random read")
361
+
362
+ echo "Done." >&2
363
+
364
+ # Output results as structured key-value lines
365
+ cat <<EOF
366
+ cpu_single_thread $cpu_st events/s
367
+ cpu_multi_thread $cpu_mt events/s
368
+ mem_read $mem_read MiB/s
369
+ mem_write $mem_write MiB/s
370
+ disk_rand_read_iops $disk_rr IOPS
371
+ EOF
372
+
373
+ # Check if any benchmarks failed
374
+ if [[ ${#failed_benchmarks[@]} -gt 0 ]]; then
375
+ echo "" >&2
376
+ echo "Error: The following benchmark(s) failed:" >&2
377
+ for b in "${failed_benchmarks[@]}"; do
378
+ echo " - $b" >&2
379
+ done
380
+ return 1
381
+ fi
382
+
383
+ return 0
384
+ }
385
+
386
+ # -------------------------------------------------------
387
+ # Display results in structured format (VAL-BENCH-011)
388
+ # -------------------------------------------------------
389
+ display_results() {
390
+ local results="$1"
391
+
392
+ echo ""
393
+ echo "===== tunectl Benchmark Results ====="
394
+ echo ""
395
+ printf " %-30s %15s %s\n" "Metric" "Value" "Unit"
396
+ printf " %-30s %15s %s\n" "------------------------------" "---------------" "----"
397
+
398
+ while IFS=' ' read -r metric value unit; do
399
+ [[ -z "$metric" ]] && continue
400
+ local label
401
+ case "$metric" in
402
+ cpu_single_thread) label="CPU Single-Thread" ;;
403
+ cpu_multi_thread) label="CPU Multi-Thread" ;;
404
+ mem_read) label="Memory Read" ;;
405
+ mem_write) label="Memory Write" ;;
406
+ disk_rand_read_iops) label="Disk Random Read" ;;
407
+ *) label="$metric" ;;
408
+ esac
409
+ # Show N/A for ERROR values instead of the sentinel
410
+ if [[ "$value" == "ERROR" ]]; then
411
+ printf " %-30s %15s %s\n" "$label" "N/A" "(failed)"
412
+ else
413
+ printf " %-30s %15s %s\n" "$label" "$value" "$unit"
414
+ fi
415
+ done <<< "$results"
416
+
417
+ echo ""
418
+ echo "====================================="
419
+ }
420
+
421
+ # -------------------------------------------------------
422
+ # Save results as JSON (for --baseline mode)
423
+ # -------------------------------------------------------
424
+ save_results_json() {
425
+ local results="$1"
426
+ local outfile="$2"
427
+
428
+ # Ensure output directory exists
429
+ local outdir
430
+ outdir=$(dirname "$outfile")
431
+ mkdir -p "$outdir" 2>/dev/null || {
432
+ echo "Error: Cannot create directory $outdir" >&2
433
+ return 1
434
+ }
435
+
436
+ # Build JSON — skip ERROR metrics (don't save bogus values)
437
+ python3 -c "
438
+ import json, sys, datetime
439
+
440
+ results = {}
441
+ for line in sys.stdin:
442
+ parts = line.strip().split(None, 2)
443
+ if len(parts) >= 2:
444
+ metric = parts[0]
445
+ if parts[1] == 'ERROR':
446
+ results[metric] = {'value': None, 'unit': 'ERROR'}
447
+ continue
448
+ value = float(parts[1])
449
+ unit = parts[2] if len(parts) > 2 else ''
450
+ results[metric] = {'value': value, 'unit': unit}
451
+
452
+ output = {
453
+ 'timestamp': datetime.datetime.now().isoformat(),
454
+ 'metrics': results
455
+ }
456
+
457
+ with open('$outfile', 'w') as f:
458
+ json.dump(output, f, indent=2)
459
+ " <<< "$results"
460
+ }
461
+
462
+ # -------------------------------------------------------
463
+ # Load baseline JSON
464
+ # -------------------------------------------------------
465
+ load_baseline() {
466
+ local file="$1"
467
+ if [[ ! -f "$file" ]]; then
468
+ return 1
469
+ fi
470
+ python3 -c "
471
+ import json, sys
472
+ try:
473
+ d = json.load(open('$file'))
474
+ for metric, data in d.get('metrics', d).items():
475
+ if isinstance(data, dict):
476
+ val = data['value']
477
+ unit = data.get('unit', '')
478
+ # Handle ERROR metrics saved with None value
479
+ if val is None or unit == 'ERROR':
480
+ print(f'{metric} ERROR {unit}')
481
+ else:
482
+ print(f'{metric} {val} {unit}')
483
+ else:
484
+ print(f'{metric} {data}')
485
+ except Exception as e:
486
+ print(f'Error loading baseline: {e}', file=sys.stderr)
487
+ sys.exit(1)
488
+ " 2>/dev/null
489
+ }
490
+
491
+ # -------------------------------------------------------
492
+ # Display comparison table (VAL-BENCH-003)
493
+ # -------------------------------------------------------
494
+ display_comparison() {
495
+ local baseline_results="$1"
496
+ local current_results="$2"
497
+
498
+ echo ""
499
+ echo "===== tunectl Benchmark Comparison ====="
500
+ echo ""
501
+ printf " %-25s %12s %12s %12s %8s\n" "Metric" "Baseline" "Current" "Change" "%Change"
502
+ printf " %-25s %12s %12s %12s %8s\n" "-------------------------" "------------" "------------" "------------" "--------"
503
+
504
+ # Build associative arrays
505
+ declare -A baseline_vals
506
+ declare -A baseline_units
507
+ while IFS=' ' read -r metric value unit; do
508
+ [[ -z "$metric" ]] && continue
509
+ baseline_vals["$metric"]="$value"
510
+ baseline_units["$metric"]="$unit"
511
+ done <<< "$baseline_results"
512
+
513
+ while IFS=' ' read -r metric value unit; do
514
+ [[ -z "$metric" ]] && continue
515
+
516
+ local label
517
+ case "$metric" in
518
+ cpu_single_thread) label="CPU Single-Thread" ;;
519
+ cpu_multi_thread) label="CPU Multi-Thread" ;;
520
+ mem_read) label="Memory Read" ;;
521
+ mem_write) label="Memory Write" ;;
522
+ disk_rand_read_iops) label="Disk Random Read" ;;
523
+ *) label="$metric" ;;
524
+ esac
525
+
526
+ # Handle ERROR in current value — show N/A instead of bogus computation
527
+ if [[ "$value" == "ERROR" ]]; then
528
+ local bval="${baseline_vals[$metric]:-N/A}"
529
+ printf " %-25s %12s %12s %12s %8s\n" "$label" "$bval" "N/A" "N/A" "N/A"
530
+ continue
531
+ fi
532
+
533
+ local bval="${baseline_vals[$metric]:-}"
534
+ # Handle missing or ERROR baseline — show N/A for comparison
535
+ if [[ -z "$bval" || "$bval" == "None" || "$bval" == "ERROR" ]]; then
536
+ printf " %-25s %12s %12s %12s %8s\n" "$label" "N/A" "$value" "N/A" "N/A"
537
+ continue
538
+ fi
539
+
540
+ # Calculate change and percentage
541
+ local change pct sign
542
+ change=$(python3 -c "b=$bval; c=$value; print(f'{c-b:.2f}')" 2>/dev/null || echo "0")
543
+ pct=$(python3 -c "
544
+ b=$bval; c=$value
545
+ if b == 0:
546
+ print('N/A')
547
+ else:
548
+ pct = ((c - b) / b) * 100
549
+ print(f'{pct:+.1f}%')
550
+ " 2>/dev/null || echo "N/A")
551
+
552
+ # Add sign to change for display
553
+ sign=$(python3 -c "c=$change; print('+' if c > 0 else '')" 2>/dev/null || echo "")
554
+ printf " %-25s %12s %12s %12s %8s\n" "$label" "$bval" "$value" "${sign}${change}" "$pct"
555
+
556
+ done <<< "$current_results"
557
+
558
+ echo ""
559
+ echo "========================================="
560
+ }
561
+
562
+ # -------------------------------------------------------
563
+ # Main
564
+ # -------------------------------------------------------
565
+ main() {
566
+ check_dependencies
567
+
568
+ case "$MODE" in
569
+ default)
570
+ local results benchmark_rc=0
571
+ results=$(run_all_benchmarks) || benchmark_rc=$?
572
+ display_results "$results"
573
+ if [[ $benchmark_rc -ne 0 ]]; then
574
+ exit 1
575
+ fi
576
+ ;;
577
+
578
+ baseline)
579
+ local results benchmark_rc=0
580
+ results=$(run_all_benchmarks) || benchmark_rc=$?
581
+ display_results "$results"
582
+
583
+ if [[ $benchmark_rc -ne 0 ]]; then
584
+ echo "Warning: Some benchmarks failed. Saving partial results." >&2
585
+ fi
586
+
587
+ save_results_json "$results" "$BASELINE_FILE" || {
588
+ echo "Error: Failed to save baseline results" >&2
589
+ exit 1
590
+ }
591
+ echo "Baseline saved to: $BASELINE_FILE"
592
+
593
+ if [[ $benchmark_rc -ne 0 ]]; then
594
+ exit 1
595
+ fi
596
+ ;;
597
+
598
+ compare)
599
+ if [[ ! -f "$BASELINE_FILE" ]]; then
600
+ echo "Error: No baseline found at $BASELINE_FILE" >&2
601
+ echo "Run 'benchmark.sh --baseline' first to capture a baseline." >&2
602
+ exit 1
603
+ fi
604
+
605
+ local baseline_results
606
+ baseline_results=$(load_baseline "$BASELINE_FILE") || {
607
+ echo "Error: Failed to load baseline from $BASELINE_FILE" >&2
608
+ exit 1
609
+ }
610
+
611
+ local current_results benchmark_rc=0
612
+ current_results=$(run_all_benchmarks) || benchmark_rc=$?
613
+
614
+ display_comparison "$baseline_results" "$current_results"
615
+
616
+ if [[ $benchmark_rc -ne 0 ]]; then
617
+ exit 1
618
+ fi
619
+ ;;
620
+ esac
621
+ }
622
+
623
+ main