redis-benchmarks-specification 0.1.273__py3-none-any.whl → 0.1.275__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of redis-benchmarks-specification might be problematic. Click here for more details.
- redis_benchmarks_specification/__common__/timeseries.py +28 -6
- redis_benchmarks_specification/__runner__/args.py +36 -0
- redis_benchmarks_specification/__runner__/remote_profiling.py +329 -0
- redis_benchmarks_specification/__runner__/runner.py +488 -52
- redis_benchmarks_specification/test-suites/defaults.yml +3 -0
- {redis_benchmarks_specification-0.1.273.dist-info → redis_benchmarks_specification-0.1.275.dist-info}/METADATA +1 -1
- {redis_benchmarks_specification-0.1.273.dist-info → redis_benchmarks_specification-0.1.275.dist-info}/RECORD +10 -9
- {redis_benchmarks_specification-0.1.273.dist-info → redis_benchmarks_specification-0.1.275.dist-info}/LICENSE +0 -0
- {redis_benchmarks_specification-0.1.273.dist-info → redis_benchmarks_specification-0.1.275.dist-info}/WHEEL +0 -0
- {redis_benchmarks_specification-0.1.273.dist-info → redis_benchmarks_specification-0.1.275.dist-info}/entry_points.txt +0 -0
|
@@ -133,6 +133,9 @@ def extract_results_table(
|
|
|
133
133
|
use_metric_context_path = False
|
|
134
134
|
if len(find_res) > 1:
|
|
135
135
|
use_metric_context_path = True
|
|
136
|
+
# Always use context path for precision_summary metrics to show actual precision levels
|
|
137
|
+
if "precision_summary" in metric_jsonpath and "*" in metric_jsonpath:
|
|
138
|
+
use_metric_context_path = True
|
|
136
139
|
for metric in find_res:
|
|
137
140
|
metric_name = str(metric.path)
|
|
138
141
|
metric_value = float(metric.value)
|
|
@@ -142,15 +145,34 @@ def extract_results_table(
|
|
|
142
145
|
if metric_jsonpath[0] == ".":
|
|
143
146
|
metric_jsonpath = metric_jsonpath[1:]
|
|
144
147
|
|
|
148
|
+
# For precision_summary metrics, construct the full resolved path for display
|
|
149
|
+
display_path = metric_jsonpath
|
|
150
|
+
if "precision_summary" in metric_jsonpath and "*" in metric_jsonpath and use_metric_context_path:
|
|
151
|
+
# Replace the wildcard with the actual precision level
|
|
152
|
+
display_path = metric_jsonpath.replace("*", metric_context_path)
|
|
153
|
+
|
|
145
154
|
# retro-compatible naming
|
|
146
155
|
if use_metric_context_path is False:
|
|
147
156
|
metric_name = metric_jsonpath
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
157
|
+
else:
|
|
158
|
+
# For display purposes, use the resolved path for precision_summary
|
|
159
|
+
if "precision_summary" in metric_jsonpath and "*" in metric_jsonpath:
|
|
160
|
+
metric_name = display_path
|
|
161
|
+
else:
|
|
162
|
+
# Clean up the metric name for other cases
|
|
163
|
+
metric_name = metric_name.replace("'", "")
|
|
164
|
+
metric_name = metric_name.replace('"', "")
|
|
165
|
+
metric_name = metric_name.replace("(", "")
|
|
166
|
+
metric_name = metric_name.replace(")", "")
|
|
167
|
+
metric_name = metric_name.replace(" ", "_")
|
|
168
|
+
|
|
169
|
+
# Apply standard cleaning to all metric names
|
|
170
|
+
if not ("precision_summary" in metric_jsonpath and "*" in metric_jsonpath and use_metric_context_path):
|
|
171
|
+
metric_name = metric_name.replace("'", "")
|
|
172
|
+
metric_name = metric_name.replace('"', "")
|
|
173
|
+
metric_name = metric_name.replace("(", "")
|
|
174
|
+
metric_name = metric_name.replace(")", "")
|
|
175
|
+
metric_name = metric_name.replace(" ", "_")
|
|
154
176
|
|
|
155
177
|
results_matrix.append(
|
|
156
178
|
[
|
|
@@ -225,4 +225,40 @@ def create_client_runner_args(project_name):
|
|
|
225
225
|
default="",
|
|
226
226
|
help="UNIX Domain socket name",
|
|
227
227
|
)
|
|
228
|
+
parser.add_argument(
|
|
229
|
+
"--enable-remote-profiling",
|
|
230
|
+
default=False,
|
|
231
|
+
action="store_true",
|
|
232
|
+
help="Enable remote profiling of Redis processes via HTTP GET endpoint. Profiles are collected in folded format during benchmark execution.",
|
|
233
|
+
)
|
|
234
|
+
parser.add_argument(
|
|
235
|
+
"--remote-profile-host",
|
|
236
|
+
type=str,
|
|
237
|
+
default="localhost",
|
|
238
|
+
help="Host for remote profiling HTTP endpoint. Default is localhost.",
|
|
239
|
+
)
|
|
240
|
+
parser.add_argument(
|
|
241
|
+
"--remote-profile-port",
|
|
242
|
+
type=int,
|
|
243
|
+
default=8080,
|
|
244
|
+
help="Port for remote profiling HTTP endpoint. Default is 8080.",
|
|
245
|
+
)
|
|
246
|
+
parser.add_argument(
|
|
247
|
+
"--remote-profile-output-dir",
|
|
248
|
+
type=str,
|
|
249
|
+
default="profiles",
|
|
250
|
+
help="Directory to store remote profiling output files. Default is 'profiles/'.",
|
|
251
|
+
)
|
|
252
|
+
parser.add_argument(
|
|
253
|
+
"--remote-profile-username",
|
|
254
|
+
type=str,
|
|
255
|
+
default=None,
|
|
256
|
+
help="Username for HTTP basic authentication to remote profiling endpoint. Optional.",
|
|
257
|
+
)
|
|
258
|
+
parser.add_argument(
|
|
259
|
+
"--remote-profile-password",
|
|
260
|
+
type=str,
|
|
261
|
+
default=None,
|
|
262
|
+
help="Password for HTTP basic authentication to remote profiling endpoint. Optional.",
|
|
263
|
+
)
|
|
228
264
|
return parser
|
|
@@ -0,0 +1,329 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Remote profiling utilities for Redis benchmark runner.
|
|
3
|
+
|
|
4
|
+
This module provides functionality to trigger remote profiling of Redis processes
|
|
5
|
+
via HTTP GET endpoints during benchmark execution. Profiles are collected in
|
|
6
|
+
folded format for performance analysis.
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
import datetime
|
|
10
|
+
import logging
|
|
11
|
+
import os
|
|
12
|
+
import threading
|
|
13
|
+
import time
|
|
14
|
+
from pathlib import Path
|
|
15
|
+
from typing import Optional, Dict, Any
|
|
16
|
+
import requests
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
def extract_redis_pid(redis_conn) -> Optional[int]:
|
|
20
|
+
"""
|
|
21
|
+
Extract Redis process ID from Redis INFO command.
|
|
22
|
+
|
|
23
|
+
Args:
|
|
24
|
+
redis_conn: Redis connection object
|
|
25
|
+
|
|
26
|
+
Returns:
|
|
27
|
+
Redis process ID as integer, or None if not found
|
|
28
|
+
"""
|
|
29
|
+
try:
|
|
30
|
+
redis_info = redis_conn.info()
|
|
31
|
+
pid = redis_info.get("process_id")
|
|
32
|
+
if pid is not None:
|
|
33
|
+
logging.info(f"Extracted Redis PID: {pid}")
|
|
34
|
+
return int(pid)
|
|
35
|
+
else:
|
|
36
|
+
logging.warning("Redis process_id not found in INFO command")
|
|
37
|
+
return None
|
|
38
|
+
except Exception as e:
|
|
39
|
+
logging.error(f"Failed to extract Redis PID: {e}")
|
|
40
|
+
return None
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
def extract_redis_metadata(redis_conn) -> Dict[str, Any]:
|
|
44
|
+
"""
|
|
45
|
+
Extract Redis metadata for profile comments.
|
|
46
|
+
|
|
47
|
+
Args:
|
|
48
|
+
redis_conn: Redis connection object
|
|
49
|
+
|
|
50
|
+
Returns:
|
|
51
|
+
Dictionary containing Redis metadata
|
|
52
|
+
"""
|
|
53
|
+
try:
|
|
54
|
+
redis_info = redis_conn.info()
|
|
55
|
+
metadata = {
|
|
56
|
+
"redis_version": redis_info.get("redis_version", "unknown"),
|
|
57
|
+
"redis_git_sha1": redis_info.get("redis_git_sha1", "unknown"),
|
|
58
|
+
"redis_git_dirty": redis_info.get("redis_git_dirty", "unknown"),
|
|
59
|
+
"redis_build_id": redis_info.get("redis_build_id", "unknown"),
|
|
60
|
+
"process_id": redis_info.get("process_id", "unknown"),
|
|
61
|
+
"tcp_port": redis_info.get("tcp_port", "unknown"),
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
# Use build_id if git_sha1 is empty or 0
|
|
65
|
+
if metadata["redis_git_sha1"] in ("", 0, "0"):
|
|
66
|
+
metadata["redis_git_sha1"] = metadata["redis_build_id"]
|
|
67
|
+
|
|
68
|
+
logging.info(f"Extracted Redis metadata: version={metadata['redis_version']}, sha={metadata['redis_git_sha1']}, pid={metadata['process_id']}")
|
|
69
|
+
return metadata
|
|
70
|
+
except Exception as e:
|
|
71
|
+
logging.error(f"Failed to extract Redis metadata: {e}")
|
|
72
|
+
return {
|
|
73
|
+
"redis_version": "unknown",
|
|
74
|
+
"redis_git_sha1": "unknown",
|
|
75
|
+
"redis_git_dirty": "unknown",
|
|
76
|
+
"redis_build_id": "unknown",
|
|
77
|
+
"process_id": "unknown",
|
|
78
|
+
"tcp_port": "unknown",
|
|
79
|
+
}
|
|
80
|
+
|
|
81
|
+
|
|
82
|
+
def calculate_profile_duration(benchmark_duration_seconds: int) -> int:
|
|
83
|
+
"""
|
|
84
|
+
Calculate profiling duration based on benchmark duration.
|
|
85
|
+
|
|
86
|
+
Args:
|
|
87
|
+
benchmark_duration_seconds: Expected benchmark duration in seconds
|
|
88
|
+
|
|
89
|
+
Returns:
|
|
90
|
+
Profiling duration in seconds (minimum: benchmark duration, maximum: 30)
|
|
91
|
+
"""
|
|
92
|
+
# Minimum duration is the benchmark duration, maximum is 30 seconds
|
|
93
|
+
duration = min(max(benchmark_duration_seconds, 10), 30)
|
|
94
|
+
logging.info(f"Calculated profile duration: {duration}s (benchmark: {benchmark_duration_seconds}s)")
|
|
95
|
+
return duration
|
|
96
|
+
|
|
97
|
+
|
|
98
|
+
def trigger_remote_profile(
|
|
99
|
+
host: str,
|
|
100
|
+
port: int,
|
|
101
|
+
pid: int,
|
|
102
|
+
duration: int,
|
|
103
|
+
timeout: int = 60,
|
|
104
|
+
username: Optional[str] = None,
|
|
105
|
+
password: Optional[str] = None
|
|
106
|
+
) -> Optional[str]:
|
|
107
|
+
"""
|
|
108
|
+
Trigger remote profiling via HTTP GET request.
|
|
109
|
+
|
|
110
|
+
Args:
|
|
111
|
+
host: Remote host address
|
|
112
|
+
port: Remote port number
|
|
113
|
+
pid: Redis process ID
|
|
114
|
+
duration: Profiling duration in seconds
|
|
115
|
+
timeout: HTTP request timeout in seconds
|
|
116
|
+
username: Optional username for HTTP basic authentication
|
|
117
|
+
password: Optional password for HTTP basic authentication
|
|
118
|
+
|
|
119
|
+
Returns:
|
|
120
|
+
Profile content in folded format, or None if failed
|
|
121
|
+
"""
|
|
122
|
+
url = f"http://{host}:{port}/debug/folded/profile"
|
|
123
|
+
params = {
|
|
124
|
+
"pid": pid,
|
|
125
|
+
"seconds": duration
|
|
126
|
+
}
|
|
127
|
+
|
|
128
|
+
# Prepare authentication if provided
|
|
129
|
+
auth = None
|
|
130
|
+
if username is not None and password is not None:
|
|
131
|
+
auth = (username, password)
|
|
132
|
+
logging.info(f"Using HTTP basic authentication with username: {username}")
|
|
133
|
+
|
|
134
|
+
try:
|
|
135
|
+
logging.info(f"Triggering remote profile: {url} with PID={pid}, duration={duration}s")
|
|
136
|
+
response = requests.get(url, params=params, timeout=timeout, auth=auth)
|
|
137
|
+
response.raise_for_status()
|
|
138
|
+
|
|
139
|
+
profile_content = response.text
|
|
140
|
+
logging.info(f"Successfully collected profile: {len(profile_content)} characters")
|
|
141
|
+
return profile_content
|
|
142
|
+
|
|
143
|
+
except requests.exceptions.Timeout:
|
|
144
|
+
logging.error(f"Remote profiling request timed out after {timeout}s")
|
|
145
|
+
return None
|
|
146
|
+
except requests.exceptions.ConnectionError:
|
|
147
|
+
logging.error(f"Failed to connect to remote profiling endpoint: {host}:{port}")
|
|
148
|
+
return None
|
|
149
|
+
except requests.exceptions.HTTPError as e:
|
|
150
|
+
logging.error(f"HTTP error during remote profiling: {e}")
|
|
151
|
+
if e.response.status_code == 401:
|
|
152
|
+
logging.error("Authentication failed - check username and password")
|
|
153
|
+
return None
|
|
154
|
+
except Exception as e:
|
|
155
|
+
logging.error(f"Unexpected error during remote profiling: {e}")
|
|
156
|
+
return None
|
|
157
|
+
|
|
158
|
+
|
|
159
|
+
def save_profile_with_metadata(
|
|
160
|
+
profile_content: str,
|
|
161
|
+
benchmark_name: str,
|
|
162
|
+
output_dir: str,
|
|
163
|
+
redis_metadata: Dict[str, Any],
|
|
164
|
+
duration: int
|
|
165
|
+
) -> Optional[str]:
|
|
166
|
+
"""
|
|
167
|
+
Save profile content to file with metadata comments.
|
|
168
|
+
|
|
169
|
+
Args:
|
|
170
|
+
profile_content: Profile data in folded format
|
|
171
|
+
benchmark_name: Name of the benchmark
|
|
172
|
+
output_dir: Output directory path
|
|
173
|
+
redis_metadata: Redis metadata dictionary
|
|
174
|
+
duration: Profiling duration in seconds
|
|
175
|
+
|
|
176
|
+
Returns:
|
|
177
|
+
Path to saved file, or None if failed
|
|
178
|
+
"""
|
|
179
|
+
try:
|
|
180
|
+
# Create output directory if it doesn't exist
|
|
181
|
+
Path(output_dir).mkdir(parents=True, exist_ok=True)
|
|
182
|
+
|
|
183
|
+
# Generate filename
|
|
184
|
+
filename = f"{benchmark_name}.folded"
|
|
185
|
+
filepath = os.path.join(output_dir, filename)
|
|
186
|
+
|
|
187
|
+
# Generate timestamp
|
|
188
|
+
timestamp = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
|
189
|
+
|
|
190
|
+
# Create metadata comment
|
|
191
|
+
metadata_comment = (
|
|
192
|
+
f"# profile from redis sha = {redis_metadata['redis_git_sha1']} "
|
|
193
|
+
f"and pid {redis_metadata['process_id']} for duration of {duration}s. "
|
|
194
|
+
f"collection in date {timestamp}\n"
|
|
195
|
+
f"# redis_version: {redis_metadata['redis_version']}\n"
|
|
196
|
+
f"# redis_git_dirty: {redis_metadata['redis_git_dirty']}\n"
|
|
197
|
+
f"# tcp_port: {redis_metadata['tcp_port']}\n"
|
|
198
|
+
)
|
|
199
|
+
|
|
200
|
+
# Write file with metadata and profile content
|
|
201
|
+
with open(filepath, 'w') as f:
|
|
202
|
+
f.write(metadata_comment)
|
|
203
|
+
f.write(profile_content)
|
|
204
|
+
|
|
205
|
+
logging.info(f"Saved profile to: {filepath}")
|
|
206
|
+
return filepath
|
|
207
|
+
|
|
208
|
+
except Exception as e:
|
|
209
|
+
logging.error(f"Failed to save profile file: {e}")
|
|
210
|
+
return None
|
|
211
|
+
|
|
212
|
+
|
|
213
|
+
class RemoteProfiler:
|
|
214
|
+
"""
|
|
215
|
+
Remote profiler class to handle threaded profiling execution.
|
|
216
|
+
"""
|
|
217
|
+
|
|
218
|
+
def __init__(self, host: str, port: int, output_dir: str, username: Optional[str] = None, password: Optional[str] = None):
|
|
219
|
+
self.host = host
|
|
220
|
+
self.port = port
|
|
221
|
+
self.output_dir = output_dir
|
|
222
|
+
self.username = username
|
|
223
|
+
self.password = password
|
|
224
|
+
self.profile_thread = None
|
|
225
|
+
self.profile_result = None
|
|
226
|
+
self.profile_error = None
|
|
227
|
+
|
|
228
|
+
def start_profiling(
|
|
229
|
+
self,
|
|
230
|
+
redis_conn,
|
|
231
|
+
benchmark_name: str,
|
|
232
|
+
benchmark_duration_seconds: int
|
|
233
|
+
) -> bool:
|
|
234
|
+
"""
|
|
235
|
+
Start profiling in a separate thread.
|
|
236
|
+
|
|
237
|
+
Args:
|
|
238
|
+
redis_conn: Redis connection object
|
|
239
|
+
benchmark_name: Name of the benchmark
|
|
240
|
+
benchmark_duration_seconds: Expected benchmark duration
|
|
241
|
+
|
|
242
|
+
Returns:
|
|
243
|
+
True if profiling thread started successfully, False otherwise
|
|
244
|
+
"""
|
|
245
|
+
try:
|
|
246
|
+
# Extract Redis metadata and PID
|
|
247
|
+
redis_metadata = extract_redis_metadata(redis_conn)
|
|
248
|
+
pid = redis_metadata.get("process_id")
|
|
249
|
+
|
|
250
|
+
if pid == "unknown" or pid is None:
|
|
251
|
+
logging.error("Cannot start remote profiling: Redis PID not available")
|
|
252
|
+
return False
|
|
253
|
+
|
|
254
|
+
# Calculate profiling duration
|
|
255
|
+
duration = calculate_profile_duration(benchmark_duration_seconds)
|
|
256
|
+
|
|
257
|
+
# Start profiling thread
|
|
258
|
+
self.profile_thread = threading.Thread(
|
|
259
|
+
target=self._profile_worker,
|
|
260
|
+
args=(pid, duration, benchmark_name, redis_metadata),
|
|
261
|
+
daemon=True
|
|
262
|
+
)
|
|
263
|
+
self.profile_thread.start()
|
|
264
|
+
|
|
265
|
+
logging.info(f"Started remote profiling thread for benchmark: {benchmark_name}")
|
|
266
|
+
return True
|
|
267
|
+
|
|
268
|
+
except Exception as e:
|
|
269
|
+
logging.error(f"Failed to start remote profiling: {e}")
|
|
270
|
+
return False
|
|
271
|
+
|
|
272
|
+
def _profile_worker(self, pid: int, duration: int, benchmark_name: str, redis_metadata: Dict[str, Any]):
|
|
273
|
+
"""
|
|
274
|
+
Worker function for profiling thread.
|
|
275
|
+
"""
|
|
276
|
+
try:
|
|
277
|
+
# Trigger remote profiling
|
|
278
|
+
profile_content = trigger_remote_profile(
|
|
279
|
+
self.host, self.port, pid, duration,
|
|
280
|
+
username=self.username, password=self.password
|
|
281
|
+
)
|
|
282
|
+
|
|
283
|
+
if profile_content:
|
|
284
|
+
# Save profile with metadata
|
|
285
|
+
filepath = save_profile_with_metadata(
|
|
286
|
+
profile_content, benchmark_name, self.output_dir, redis_metadata, duration
|
|
287
|
+
)
|
|
288
|
+
self.profile_result = filepath
|
|
289
|
+
else:
|
|
290
|
+
self.profile_error = "Failed to collect profile content"
|
|
291
|
+
|
|
292
|
+
except Exception as e:
|
|
293
|
+
self.profile_error = f"Profile worker error: {e}"
|
|
294
|
+
logging.error(self.profile_error)
|
|
295
|
+
|
|
296
|
+
def wait_for_completion(self, timeout: int = 60) -> bool:
|
|
297
|
+
"""
|
|
298
|
+
Wait for profiling thread to complete.
|
|
299
|
+
|
|
300
|
+
Args:
|
|
301
|
+
timeout: Maximum time to wait in seconds
|
|
302
|
+
|
|
303
|
+
Returns:
|
|
304
|
+
True if completed successfully, False if timed out or failed
|
|
305
|
+
"""
|
|
306
|
+
if self.profile_thread is None:
|
|
307
|
+
return False
|
|
308
|
+
|
|
309
|
+
try:
|
|
310
|
+
self.profile_thread.join(timeout=timeout)
|
|
311
|
+
|
|
312
|
+
if self.profile_thread.is_alive():
|
|
313
|
+
logging.warning(f"Remote profiling thread did not complete within {timeout}s")
|
|
314
|
+
return False
|
|
315
|
+
|
|
316
|
+
if self.profile_error:
|
|
317
|
+
logging.error(f"Remote profiling failed: {self.profile_error}")
|
|
318
|
+
return False
|
|
319
|
+
|
|
320
|
+
if self.profile_result:
|
|
321
|
+
logging.info(f"Remote profiling completed successfully: {self.profile_result}")
|
|
322
|
+
return True
|
|
323
|
+
else:
|
|
324
|
+
logging.warning("Remote profiling completed but no result available")
|
|
325
|
+
return False
|
|
326
|
+
|
|
327
|
+
except Exception as e:
|
|
328
|
+
logging.error(f"Error waiting for remote profiling completion: {e}")
|
|
329
|
+
return False
|
|
@@ -62,6 +62,7 @@ from redis_benchmarks_specification.__common__.spec import (
|
|
|
62
62
|
extract_client_tools,
|
|
63
63
|
)
|
|
64
64
|
from redis_benchmarks_specification.__runner__.args import create_client_runner_args
|
|
65
|
+
from redis_benchmarks_specification.__runner__.remote_profiling import RemoteProfiler
|
|
65
66
|
|
|
66
67
|
|
|
67
68
|
def parse_size(size):
|
|
@@ -91,6 +92,31 @@ def parse_size(size):
|
|
|
91
92
|
return int(number * units[unit])
|
|
92
93
|
|
|
93
94
|
|
|
95
|
+
def extract_expected_benchmark_duration(benchmark_command_str, override_memtier_test_time):
|
|
96
|
+
"""
|
|
97
|
+
Extract expected benchmark duration from command string or override.
|
|
98
|
+
|
|
99
|
+
Args:
|
|
100
|
+
benchmark_command_str: The benchmark command string
|
|
101
|
+
override_memtier_test_time: Override test time value
|
|
102
|
+
|
|
103
|
+
Returns:
|
|
104
|
+
Expected duration in seconds, or 30 as default
|
|
105
|
+
"""
|
|
106
|
+
if override_memtier_test_time > 0:
|
|
107
|
+
return override_memtier_test_time
|
|
108
|
+
|
|
109
|
+
# Try to extract test-time from command string
|
|
110
|
+
if "test-time" in benchmark_command_str:
|
|
111
|
+
# Handle both --test-time (memtier) and -test-time (pubsub-sub-bench)
|
|
112
|
+
test_time_match = re.search(r"--?test-time[=\s]+(\d+)", benchmark_command_str)
|
|
113
|
+
if test_time_match:
|
|
114
|
+
return int(test_time_match.group(1))
|
|
115
|
+
|
|
116
|
+
# Default duration if not found
|
|
117
|
+
return 30
|
|
118
|
+
|
|
119
|
+
|
|
94
120
|
def run_multiple_clients(
|
|
95
121
|
benchmark_config,
|
|
96
122
|
docker_client,
|
|
@@ -181,6 +207,30 @@ def run_multiple_clients(
|
|
|
181
207
|
unix_socket,
|
|
182
208
|
None, # username
|
|
183
209
|
)
|
|
210
|
+
elif "vector-db-benchmark" in client_tool:
|
|
211
|
+
(
|
|
212
|
+
_,
|
|
213
|
+
benchmark_command_str,
|
|
214
|
+
arbitrary_command,
|
|
215
|
+
client_env_vars,
|
|
216
|
+
) = prepare_vector_db_benchmark_parameters(
|
|
217
|
+
client_config,
|
|
218
|
+
client_tool,
|
|
219
|
+
port,
|
|
220
|
+
host,
|
|
221
|
+
password,
|
|
222
|
+
local_benchmark_output_filename,
|
|
223
|
+
oss_cluster_api_enabled,
|
|
224
|
+
tls_enabled,
|
|
225
|
+
tls_skip_verify,
|
|
226
|
+
test_tls_cert,
|
|
227
|
+
test_tls_key,
|
|
228
|
+
test_tls_cacert,
|
|
229
|
+
resp_version,
|
|
230
|
+
override_memtier_test_time,
|
|
231
|
+
unix_socket,
|
|
232
|
+
None, # username
|
|
233
|
+
)
|
|
184
234
|
else:
|
|
185
235
|
# Handle other benchmark tools
|
|
186
236
|
(
|
|
@@ -224,23 +274,51 @@ def run_multiple_clients(
|
|
|
224
274
|
# Start container (detached)
|
|
225
275
|
import os
|
|
226
276
|
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
|
|
277
|
+
# Set working directory based on tool
|
|
278
|
+
working_dir = benchmark_tool_workdir
|
|
279
|
+
if "vector-db-benchmark" in client_tool:
|
|
280
|
+
working_dir = "/app" # vector-db-benchmark needs to run from /app
|
|
281
|
+
|
|
282
|
+
# Prepare container arguments
|
|
283
|
+
volumes = {
|
|
284
|
+
temporary_dir_client: {
|
|
285
|
+
"bind": client_mnt_point,
|
|
286
|
+
"mode": "rw",
|
|
234
287
|
},
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
|
|
288
|
+
}
|
|
289
|
+
|
|
290
|
+
# For vector-db-benchmark, also mount the results directory
|
|
291
|
+
if "vector-db-benchmark" in client_tool:
|
|
292
|
+
volumes[temporary_dir_client] = {
|
|
293
|
+
"bind": "/app/results",
|
|
294
|
+
"mode": "rw",
|
|
295
|
+
}
|
|
296
|
+
|
|
297
|
+
container_kwargs = {
|
|
298
|
+
"image": client_image,
|
|
299
|
+
"volumes": volumes,
|
|
300
|
+
"auto_remove": False,
|
|
301
|
+
"privileged": True,
|
|
302
|
+
"working_dir": working_dir,
|
|
303
|
+
"command": benchmark_command_str,
|
|
304
|
+
"network_mode": "host",
|
|
305
|
+
"detach": True,
|
|
306
|
+
"cpuset_cpus": client_cpuset_cpus,
|
|
307
|
+
}
|
|
308
|
+
|
|
309
|
+
# Only add user for non-vector-db-benchmark tools to avoid permission issues
|
|
310
|
+
if "vector-db-benchmark" not in client_tool:
|
|
311
|
+
container_kwargs["user"] = f"{os.getuid()}:{os.getgid()}"
|
|
312
|
+
|
|
313
|
+
# Add environment variables for vector-db-benchmark
|
|
314
|
+
if "vector-db-benchmark" in client_tool:
|
|
315
|
+
try:
|
|
316
|
+
container_kwargs["environment"] = client_env_vars
|
|
317
|
+
except NameError:
|
|
318
|
+
# client_env_vars not defined, skip environment variables
|
|
319
|
+
pass
|
|
320
|
+
|
|
321
|
+
container = docker_client.containers.run(**container_kwargs)
|
|
244
322
|
|
|
245
323
|
containers.append(
|
|
246
324
|
{
|
|
@@ -334,6 +412,7 @@ def run_multiple_clients(
|
|
|
334
412
|
aggregated_json = {}
|
|
335
413
|
memtier_json = None
|
|
336
414
|
pubsub_json = None
|
|
415
|
+
vector_json = None
|
|
337
416
|
|
|
338
417
|
for result in successful_results:
|
|
339
418
|
client_index = result["client_index"]
|
|
@@ -360,6 +439,19 @@ def run_multiple_clients(
|
|
|
360
439
|
logging.info(
|
|
361
440
|
f"Successfully read pubsub-sub-bench JSON output from client {client_index}"
|
|
362
441
|
)
|
|
442
|
+
elif "vector-db-benchmark" in tool:
|
|
443
|
+
# For vector-db-benchmark, look for summary JSON file
|
|
444
|
+
summary_files = [f for f in os.listdir(temporary_dir_client) if f.endswith("-summary.json")]
|
|
445
|
+
if summary_files:
|
|
446
|
+
summary_filepath = os.path.join(temporary_dir_client, summary_files[0])
|
|
447
|
+
try:
|
|
448
|
+
with open(summary_filepath, 'r') as f:
|
|
449
|
+
vector_json = json.load(f)
|
|
450
|
+
logging.info(f"Successfully read vector-db-benchmark JSON output from {summary_files[0]}")
|
|
451
|
+
except Exception as e:
|
|
452
|
+
logging.warning(f"Failed to read vector-db-benchmark JSON from {summary_files[0]}: {e}")
|
|
453
|
+
else:
|
|
454
|
+
logging.warning(f"No vector-db-benchmark summary JSON file found for client {client_index}")
|
|
363
455
|
|
|
364
456
|
logging.info(
|
|
365
457
|
f"Successfully read JSON output from client {client_index} ({tool})"
|
|
@@ -376,16 +468,32 @@ def run_multiple_clients(
|
|
|
376
468
|
f"JSON output file not found for client {client_index}: {json_filepath}"
|
|
377
469
|
)
|
|
378
470
|
|
|
379
|
-
# Merge JSON outputs from
|
|
380
|
-
if memtier_json and pubsub_json:
|
|
471
|
+
# Merge JSON outputs from all tools
|
|
472
|
+
if memtier_json and pubsub_json and vector_json:
|
|
473
|
+
# Use memtier as base and add other metrics
|
|
474
|
+
aggregated_json = memtier_json.copy()
|
|
475
|
+
aggregated_json.update(pubsub_json)
|
|
476
|
+
aggregated_json.update(vector_json)
|
|
477
|
+
aggregated_stdout = json.dumps(aggregated_json, indent=2)
|
|
478
|
+
logging.info("Using merged JSON results from memtier, pubsub-sub-bench, and vector-db-benchmark clients")
|
|
479
|
+
elif memtier_json and pubsub_json:
|
|
381
480
|
# Use memtier as base and add pubsub metrics
|
|
382
481
|
aggregated_json = memtier_json.copy()
|
|
383
|
-
# Add pubsub metrics to the aggregated result
|
|
384
482
|
aggregated_json.update(pubsub_json)
|
|
385
483
|
aggregated_stdout = json.dumps(aggregated_json, indent=2)
|
|
386
|
-
logging.info(
|
|
387
|
-
|
|
388
|
-
|
|
484
|
+
logging.info("Using merged JSON results from memtier and pubsub-sub-bench clients")
|
|
485
|
+
elif memtier_json and vector_json:
|
|
486
|
+
# Use memtier as base and add vector metrics
|
|
487
|
+
aggregated_json = memtier_json.copy()
|
|
488
|
+
aggregated_json.update(vector_json)
|
|
489
|
+
aggregated_stdout = json.dumps(aggregated_json, indent=2)
|
|
490
|
+
logging.info("Using merged JSON results from memtier and vector-db-benchmark clients")
|
|
491
|
+
elif pubsub_json and vector_json:
|
|
492
|
+
# Use pubsub as base and add vector metrics
|
|
493
|
+
aggregated_json = pubsub_json.copy()
|
|
494
|
+
aggregated_json.update(vector_json)
|
|
495
|
+
aggregated_stdout = json.dumps(aggregated_json, indent=2)
|
|
496
|
+
logging.info("Using merged JSON results from pubsub-sub-bench and vector-db-benchmark clients")
|
|
389
497
|
elif memtier_json:
|
|
390
498
|
# Only memtier available
|
|
391
499
|
aggregated_json = memtier_json
|
|
@@ -396,12 +504,15 @@ def run_multiple_clients(
|
|
|
396
504
|
aggregated_json = pubsub_json
|
|
397
505
|
aggregated_stdout = json.dumps(aggregated_json, indent=2)
|
|
398
506
|
logging.info("Using JSON results from pubsub-sub-bench client only")
|
|
507
|
+
elif vector_json:
|
|
508
|
+
# Only vector-db-benchmark available
|
|
509
|
+
aggregated_json = vector_json
|
|
510
|
+
aggregated_stdout = json.dumps(aggregated_json, indent=2)
|
|
511
|
+
logging.info("Using JSON results from vector-db-benchmark client only")
|
|
399
512
|
else:
|
|
400
513
|
# Fall back to concatenated stdout
|
|
401
514
|
aggregated_stdout = "\n".join([r["stdout"] for r in successful_results])
|
|
402
|
-
logging.warning(
|
|
403
|
-
"No JSON results found, falling back to concatenated stdout"
|
|
404
|
-
)
|
|
515
|
+
logging.warning("No JSON results found, falling back to concatenated stdout")
|
|
405
516
|
|
|
406
517
|
return aggregated_stdout, results
|
|
407
518
|
|
|
@@ -665,6 +776,71 @@ def prepare_memtier_benchmark_parameters(
|
|
|
665
776
|
return None, benchmark_command_str, arbitrary_command
|
|
666
777
|
|
|
667
778
|
|
|
779
|
+
def prepare_vector_db_benchmark_parameters(
|
|
780
|
+
clientconfig,
|
|
781
|
+
full_benchmark_path,
|
|
782
|
+
port,
|
|
783
|
+
server,
|
|
784
|
+
password,
|
|
785
|
+
local_benchmark_output_filename,
|
|
786
|
+
oss_cluster_api_enabled=False,
|
|
787
|
+
tls_enabled=False,
|
|
788
|
+
tls_skip_verify=False,
|
|
789
|
+
tls_cert=None,
|
|
790
|
+
tls_key=None,
|
|
791
|
+
tls_cacert=None,
|
|
792
|
+
resp_version=None,
|
|
793
|
+
override_test_time=0,
|
|
794
|
+
unix_socket="",
|
|
795
|
+
username=None,
|
|
796
|
+
):
|
|
797
|
+
"""
|
|
798
|
+
Prepare vector-db-benchmark command parameters
|
|
799
|
+
"""
|
|
800
|
+
arbitrary_command = False
|
|
801
|
+
|
|
802
|
+
benchmark_command = [
|
|
803
|
+
"/app/run.py",
|
|
804
|
+
"--host",
|
|
805
|
+
f"{server}",
|
|
806
|
+
]
|
|
807
|
+
|
|
808
|
+
# Add port as environment variable (vector-db-benchmark uses env vars)
|
|
809
|
+
env_vars = {}
|
|
810
|
+
if port is not None:
|
|
811
|
+
env_vars["REDIS_PORT"] = str(port)
|
|
812
|
+
if password is not None:
|
|
813
|
+
env_vars["REDIS_AUTH"] = password
|
|
814
|
+
if username is not None:
|
|
815
|
+
env_vars["REDIS_USER"] = username
|
|
816
|
+
|
|
817
|
+
# Add engines parameter
|
|
818
|
+
engines = clientconfig.get("engines", "vectorsets-fp32-default")
|
|
819
|
+
benchmark_command.extend(["--engines", engines])
|
|
820
|
+
|
|
821
|
+
# Add datasets parameter
|
|
822
|
+
datasets = clientconfig.get("datasets", "random-100")
|
|
823
|
+
benchmark_command.extend(["--datasets", datasets])
|
|
824
|
+
|
|
825
|
+
# Add other optional parameters
|
|
826
|
+
if "parallels" in clientconfig:
|
|
827
|
+
benchmark_command.extend(["--parallels", str(clientconfig["parallels"])])
|
|
828
|
+
|
|
829
|
+
if "queries" in clientconfig:
|
|
830
|
+
benchmark_command.extend(["--queries", str(clientconfig["queries"])])
|
|
831
|
+
|
|
832
|
+
if "timeout" in clientconfig:
|
|
833
|
+
benchmark_command.extend(["--timeout", str(clientconfig["timeout"])])
|
|
834
|
+
|
|
835
|
+
# Add custom arguments if specified
|
|
836
|
+
if "arguments" in clientconfig:
|
|
837
|
+
benchmark_command_str = " ".join(benchmark_command) + " " + clientconfig["arguments"]
|
|
838
|
+
else:
|
|
839
|
+
benchmark_command_str = " ".join(benchmark_command)
|
|
840
|
+
|
|
841
|
+
return benchmark_command, benchmark_command_str, arbitrary_command, env_vars
|
|
842
|
+
|
|
843
|
+
|
|
668
844
|
def prepare_pubsub_sub_bench_parameters(
|
|
669
845
|
clientconfig,
|
|
670
846
|
full_benchmark_path,
|
|
@@ -899,6 +1075,23 @@ def process_self_contained_coordinator_stream(
|
|
|
899
1075
|
redis_pid = conn.info()["process_id"]
|
|
900
1076
|
redis_pids.append(redis_pid)
|
|
901
1077
|
|
|
1078
|
+
# Check if all tested commands are supported by this Redis instance
|
|
1079
|
+
supported_commands = get_supported_redis_commands(redis_conns)
|
|
1080
|
+
commands_supported, unsupported_commands = check_test_command_support(
|
|
1081
|
+
benchmark_config, supported_commands
|
|
1082
|
+
)
|
|
1083
|
+
|
|
1084
|
+
if not commands_supported:
|
|
1085
|
+
logging.warning(
|
|
1086
|
+
f"Skipping test {test_name} due to unsupported commands: {unsupported_commands}"
|
|
1087
|
+
)
|
|
1088
|
+
delete_temporary_files(
|
|
1089
|
+
temporary_dir_client=temporary_dir_client,
|
|
1090
|
+
full_result_path=None,
|
|
1091
|
+
benchmark_tool_global=benchmark_tool_global,
|
|
1092
|
+
)
|
|
1093
|
+
continue
|
|
1094
|
+
|
|
902
1095
|
github_actor = f"{tf_triggering_env}-{running_platform}"
|
|
903
1096
|
dso = "redis-server"
|
|
904
1097
|
profilers_artifacts_matrix = []
|
|
@@ -1208,6 +1401,30 @@ def process_self_contained_coordinator_stream(
|
|
|
1208
1401
|
unix_socket,
|
|
1209
1402
|
None, # username
|
|
1210
1403
|
)
|
|
1404
|
+
elif "vector-db-benchmark" in benchmark_tool:
|
|
1405
|
+
(
|
|
1406
|
+
_,
|
|
1407
|
+
benchmark_command_str,
|
|
1408
|
+
arbitrary_command,
|
|
1409
|
+
env_vars,
|
|
1410
|
+
) = prepare_vector_db_benchmark_parameters(
|
|
1411
|
+
benchmark_config["clientconfig"],
|
|
1412
|
+
full_benchmark_path,
|
|
1413
|
+
port,
|
|
1414
|
+
host,
|
|
1415
|
+
password,
|
|
1416
|
+
local_benchmark_output_filename,
|
|
1417
|
+
oss_cluster_api_enabled,
|
|
1418
|
+
tls_enabled,
|
|
1419
|
+
tls_skip_verify,
|
|
1420
|
+
test_tls_cert,
|
|
1421
|
+
test_tls_key,
|
|
1422
|
+
test_tls_cacert,
|
|
1423
|
+
resp_version,
|
|
1424
|
+
override_memtier_test_time,
|
|
1425
|
+
unix_socket,
|
|
1426
|
+
None, # username
|
|
1427
|
+
)
|
|
1211
1428
|
else:
|
|
1212
1429
|
# prepare the benchmark command for other tools
|
|
1213
1430
|
(
|
|
@@ -1241,6 +1458,40 @@ def process_self_contained_coordinator_stream(
|
|
|
1241
1458
|
profiler_call_graph_mode,
|
|
1242
1459
|
)
|
|
1243
1460
|
|
|
1461
|
+
# start remote profiling if enabled
|
|
1462
|
+
remote_profiler = None
|
|
1463
|
+
if args.enable_remote_profiling:
|
|
1464
|
+
try:
|
|
1465
|
+
remote_profiler = RemoteProfiler(
|
|
1466
|
+
args.remote_profile_host,
|
|
1467
|
+
args.remote_profile_port,
|
|
1468
|
+
args.remote_profile_output_dir,
|
|
1469
|
+
args.remote_profile_username,
|
|
1470
|
+
args.remote_profile_password
|
|
1471
|
+
)
|
|
1472
|
+
|
|
1473
|
+
# Extract expected benchmark duration
|
|
1474
|
+
expected_duration = extract_expected_benchmark_duration(
|
|
1475
|
+
benchmark_command_str, override_memtier_test_time
|
|
1476
|
+
)
|
|
1477
|
+
|
|
1478
|
+
# Start remote profiling
|
|
1479
|
+
profiling_started = remote_profiler.start_profiling(
|
|
1480
|
+
redis_conns[0] if redis_conns else None,
|
|
1481
|
+
test_name,
|
|
1482
|
+
expected_duration
|
|
1483
|
+
)
|
|
1484
|
+
|
|
1485
|
+
if profiling_started:
|
|
1486
|
+
logging.info(f"Started remote profiling for test: {test_name}")
|
|
1487
|
+
else:
|
|
1488
|
+
logging.warning(f"Failed to start remote profiling for test: {test_name}")
|
|
1489
|
+
remote_profiler = None
|
|
1490
|
+
|
|
1491
|
+
except Exception as e:
|
|
1492
|
+
logging.error(f"Error starting remote profiling: {e}")
|
|
1493
|
+
remote_profiler = None
|
|
1494
|
+
|
|
1244
1495
|
# run the benchmark
|
|
1245
1496
|
benchmark_start_time = datetime.datetime.now()
|
|
1246
1497
|
|
|
@@ -1305,22 +1556,53 @@ def process_self_contained_coordinator_stream(
|
|
|
1305
1556
|
)
|
|
1306
1557
|
|
|
1307
1558
|
# Use explicit container management for single client
|
|
1308
|
-
|
|
1309
|
-
|
|
1310
|
-
|
|
1311
|
-
|
|
1312
|
-
|
|
1313
|
-
|
|
1314
|
-
|
|
1559
|
+
import os
|
|
1560
|
+
|
|
1561
|
+
# Set working directory based on tool
|
|
1562
|
+
working_dir = benchmark_tool_workdir
|
|
1563
|
+
if "vector-db-benchmark" in benchmark_tool:
|
|
1564
|
+
working_dir = "/app" # vector-db-benchmark needs to run from /app
|
|
1565
|
+
|
|
1566
|
+
# Prepare volumes
|
|
1567
|
+
volumes = {
|
|
1568
|
+
temporary_dir_client: {
|
|
1569
|
+
"bind": client_mnt_point,
|
|
1570
|
+
"mode": "rw",
|
|
1315
1571
|
},
|
|
1316
|
-
|
|
1317
|
-
|
|
1318
|
-
|
|
1319
|
-
|
|
1320
|
-
|
|
1321
|
-
|
|
1322
|
-
|
|
1323
|
-
|
|
1572
|
+
}
|
|
1573
|
+
|
|
1574
|
+
# For vector-db-benchmark, also mount the results directory
|
|
1575
|
+
if "vector-db-benchmark" in benchmark_tool:
|
|
1576
|
+
volumes[temporary_dir_client] = {
|
|
1577
|
+
"bind": "/app/results",
|
|
1578
|
+
"mode": "rw",
|
|
1579
|
+
}
|
|
1580
|
+
|
|
1581
|
+
container_kwargs = {
|
|
1582
|
+
"image": client_container_image,
|
|
1583
|
+
"volumes": volumes,
|
|
1584
|
+
"auto_remove": False,
|
|
1585
|
+
"privileged": True,
|
|
1586
|
+
"working_dir": working_dir,
|
|
1587
|
+
"command": benchmark_command_str,
|
|
1588
|
+
"network_mode": "host",
|
|
1589
|
+
"detach": True,
|
|
1590
|
+
"cpuset_cpus": client_cpuset_cpus,
|
|
1591
|
+
}
|
|
1592
|
+
|
|
1593
|
+
# Only add user for non-vector-db-benchmark tools to avoid permission issues
|
|
1594
|
+
if "vector-db-benchmark" not in benchmark_tool:
|
|
1595
|
+
container_kwargs["user"] = f"{os.getuid()}:{os.getgid()}"
|
|
1596
|
+
|
|
1597
|
+
# Add environment variables for vector-db-benchmark
|
|
1598
|
+
if "vector-db-benchmark" in benchmark_tool:
|
|
1599
|
+
try:
|
|
1600
|
+
container_kwargs["environment"] = env_vars
|
|
1601
|
+
except NameError:
|
|
1602
|
+
# env_vars not defined, skip environment variables
|
|
1603
|
+
pass
|
|
1604
|
+
|
|
1605
|
+
container = docker_client.containers.run(**container_kwargs)
|
|
1324
1606
|
|
|
1325
1607
|
# Wait for container and get output
|
|
1326
1608
|
try:
|
|
@@ -1370,7 +1652,25 @@ def process_self_contained_coordinator_stream(
|
|
|
1370
1652
|
test_name,
|
|
1371
1653
|
)
|
|
1372
1654
|
|
|
1655
|
+
# wait for remote profiling completion
|
|
1656
|
+
if remote_profiler is not None:
|
|
1657
|
+
try:
|
|
1658
|
+
logging.info("Waiting for remote profiling to complete...")
|
|
1659
|
+
profiling_success = remote_profiler.wait_for_completion(timeout=60)
|
|
1660
|
+
if profiling_success:
|
|
1661
|
+
logging.info("Remote profiling completed successfully")
|
|
1662
|
+
else:
|
|
1663
|
+
logging.warning("Remote profiling did not complete successfully")
|
|
1664
|
+
except Exception as e:
|
|
1665
|
+
logging.error(f"Error waiting for remote profiling completion: {e}")
|
|
1666
|
+
|
|
1373
1667
|
logging.info("Printing client tool stdout output")
|
|
1668
|
+
if client_container_stdout:
|
|
1669
|
+
print("=== Container Output ===")
|
|
1670
|
+
print(client_container_stdout)
|
|
1671
|
+
print("=== End Container Output ===")
|
|
1672
|
+
else:
|
|
1673
|
+
logging.warning("No container output captured")
|
|
1374
1674
|
|
|
1375
1675
|
used_memory_check(
|
|
1376
1676
|
test_name,
|
|
@@ -1428,13 +1728,30 @@ def process_self_contained_coordinator_stream(
|
|
|
1428
1728
|
full_result_path = "{}/{}".format(
|
|
1429
1729
|
temporary_dir_client, local_benchmark_output_filename
|
|
1430
1730
|
)
|
|
1731
|
+
elif "vector-db-benchmark" in benchmark_tool:
|
|
1732
|
+
# For vector-db-benchmark, look for summary JSON file
|
|
1733
|
+
import os
|
|
1734
|
+
summary_files = [f for f in os.listdir(temporary_dir_client) if f.endswith("-summary.json")]
|
|
1735
|
+
if summary_files:
|
|
1736
|
+
full_result_path = os.path.join(temporary_dir_client, summary_files[0])
|
|
1737
|
+
logging.info(f"Found vector-db-benchmark summary file: {summary_files[0]}")
|
|
1738
|
+
else:
|
|
1739
|
+
logging.warning("No vector-db-benchmark summary JSON file found")
|
|
1740
|
+
# Create empty results dict to avoid crash
|
|
1741
|
+
results_dict = {}
|
|
1742
|
+
|
|
1431
1743
|
logging.info(f"Reading results json from {full_result_path}")
|
|
1432
1744
|
|
|
1433
|
-
|
|
1434
|
-
|
|
1435
|
-
|
|
1436
|
-
|
|
1437
|
-
|
|
1745
|
+
if "vector-db-benchmark" in benchmark_tool and not os.path.exists(full_result_path):
|
|
1746
|
+
# Handle case where vector-db-benchmark didn't produce results
|
|
1747
|
+
results_dict = {}
|
|
1748
|
+
logging.warning("Vector-db-benchmark did not produce results file")
|
|
1749
|
+
else:
|
|
1750
|
+
with open(
|
|
1751
|
+
full_result_path,
|
|
1752
|
+
"r",
|
|
1753
|
+
) as json_file:
|
|
1754
|
+
results_dict = json.load(json_file)
|
|
1438
1755
|
print_results_table_stdout(
|
|
1439
1756
|
benchmark_config,
|
|
1440
1757
|
default_metrics,
|
|
@@ -1661,7 +1978,32 @@ def print_results_table_stdout(
|
|
|
1661
1978
|
]
|
|
1662
1979
|
results_matrix = extract_results_table(metrics, results_dict)
|
|
1663
1980
|
|
|
1664
|
-
|
|
1981
|
+
# Use resolved metric name for precision_summary metrics, otherwise use original path
|
|
1982
|
+
def get_display_name(x):
|
|
1983
|
+
# For precision_summary metrics with wildcards, construct the resolved path
|
|
1984
|
+
if (len(x) > 1 and
|
|
1985
|
+
isinstance(x[0], str) and
|
|
1986
|
+
"precision_summary" in x[0] and
|
|
1987
|
+
"*" in x[0]):
|
|
1988
|
+
|
|
1989
|
+
# Look for the precision level in the cleaned metrics logs
|
|
1990
|
+
# We need to find the corresponding cleaned metric to get the precision level
|
|
1991
|
+
# For now, let's extract it from the time series logs that we know are working
|
|
1992
|
+
# The pattern is: replace "*" with the actual precision level
|
|
1993
|
+
|
|
1994
|
+
# Since we know from logs that the precision level is available,
|
|
1995
|
+
# let's reconstruct it from the metric context path (x[1]) if available
|
|
1996
|
+
if len(x) > 1 and isinstance(x[1], str) and x[1].startswith("'") and x[1].endswith("'"):
|
|
1997
|
+
precision_level = x[1] # This should be something like "'1.0000'"
|
|
1998
|
+
resolved_path = x[0].replace("*", precision_level)
|
|
1999
|
+
return resolved_path
|
|
2000
|
+
|
|
2001
|
+
return x[0] # Use original path
|
|
2002
|
+
|
|
2003
|
+
results_matrix = [
|
|
2004
|
+
[get_display_name(x), f"{x[3]:.3f}"]
|
|
2005
|
+
for x in results_matrix
|
|
2006
|
+
]
|
|
1665
2007
|
writer = MarkdownTableWriter(
|
|
1666
2008
|
table_name=table_name,
|
|
1667
2009
|
headers=results_matrix_headers,
|
|
@@ -1675,14 +2017,19 @@ def print_redis_info_section(redis_conns):
|
|
|
1675
2017
|
if redis_conns is not None and len(redis_conns) > 0:
|
|
1676
2018
|
try:
|
|
1677
2019
|
redis_info = redis_conns[0].info()
|
|
2020
|
+
server_name = "redis"
|
|
2021
|
+
if "server_name" in redis_info:
|
|
2022
|
+
server_name = redis_info['server_name']
|
|
1678
2023
|
|
|
1679
2024
|
print("\n# Redis Server Information")
|
|
1680
2025
|
redis_info_data = [
|
|
1681
|
-
["
|
|
1682
|
-
["
|
|
1683
|
-
["
|
|
1684
|
-
["
|
|
1685
|
-
["
|
|
2026
|
+
[f"{server_name} version", redis_info.get(f"{server_name}_version", "unknown")],
|
|
2027
|
+
["redis version", redis_info.get("redis_version", "unknown")],
|
|
2028
|
+
["io_threads_active", redis_info.get("io_threads_active", "unknown")],
|
|
2029
|
+
[f"{server_name} Git SHA1", redis_info.get("redis_git_sha1", "unknown")],
|
|
2030
|
+
[f"{server_name} Git Dirty", str(redis_info.get("redis_git_dirty", "unknown"))],
|
|
2031
|
+
[f"{server_name} Build ID", redis_info.get("redis_build_id", "unknown")],
|
|
2032
|
+
[f"{server_name} Mode", redis_info.get("redis_mode", "unknown")],
|
|
1686
2033
|
["OS", redis_info.get("os", "unknown")],
|
|
1687
2034
|
["Arch Bits", str(redis_info.get("arch_bits", "unknown"))],
|
|
1688
2035
|
["GCC Version", redis_info.get("gcc_version", "unknown")],
|
|
@@ -1710,6 +2057,78 @@ def print_redis_info_section(redis_conns):
|
|
|
1710
2057
|
logging.warning(f"Failed to collect Redis server information: {e}")
|
|
1711
2058
|
|
|
1712
2059
|
|
|
2060
|
+
def get_supported_redis_commands(redis_conns):
|
|
2061
|
+
"""Get list of supported Redis commands from the server"""
|
|
2062
|
+
if redis_conns is not None and len(redis_conns) > 0:
|
|
2063
|
+
try:
|
|
2064
|
+
# Execute COMMAND to get all supported commands
|
|
2065
|
+
commands_info = redis_conns[0].execute_command("COMMAND")
|
|
2066
|
+
logging.info(f"COMMAND response type: {type(commands_info)}, length: {len(commands_info) if hasattr(commands_info, '__len__') else 'N/A'}")
|
|
2067
|
+
|
|
2068
|
+
# Extract command names
|
|
2069
|
+
supported_commands = set()
|
|
2070
|
+
|
|
2071
|
+
if isinstance(commands_info, dict):
|
|
2072
|
+
# COMMAND response is a dict with command names as keys
|
|
2073
|
+
for cmd_name in commands_info.keys():
|
|
2074
|
+
if isinstance(cmd_name, bytes):
|
|
2075
|
+
cmd_name = cmd_name.decode('utf-8')
|
|
2076
|
+
supported_commands.add(str(cmd_name).upper())
|
|
2077
|
+
elif isinstance(commands_info, (list, tuple)):
|
|
2078
|
+
# Fallback for list format (first element of each command info array)
|
|
2079
|
+
for cmd_info in commands_info:
|
|
2080
|
+
if isinstance(cmd_info, (list, tuple)) and len(cmd_info) > 0:
|
|
2081
|
+
cmd_name = cmd_info[0]
|
|
2082
|
+
if isinstance(cmd_name, bytes):
|
|
2083
|
+
cmd_name = cmd_name.decode('utf-8')
|
|
2084
|
+
supported_commands.add(str(cmd_name).upper())
|
|
2085
|
+
|
|
2086
|
+
logging.info(f"Retrieved {len(supported_commands)} supported Redis commands")
|
|
2087
|
+
|
|
2088
|
+
# Log some sample commands for debugging
|
|
2089
|
+
if supported_commands:
|
|
2090
|
+
sample_commands = sorted(list(supported_commands))[:10]
|
|
2091
|
+
logging.info(f"Sample commands: {sample_commands}")
|
|
2092
|
+
|
|
2093
|
+
# Check specifically for vector commands
|
|
2094
|
+
vector_commands = [cmd for cmd in supported_commands if cmd.startswith('V')]
|
|
2095
|
+
if vector_commands:
|
|
2096
|
+
logging.info(f"Vector commands found: {sorted(vector_commands)}")
|
|
2097
|
+
|
|
2098
|
+
return supported_commands
|
|
2099
|
+
except Exception as e:
|
|
2100
|
+
logging.warning(f"Failed to get supported Redis commands: {e}")
|
|
2101
|
+
logging.warning("Proceeding without command validation")
|
|
2102
|
+
return None
|
|
2103
|
+
return None
|
|
2104
|
+
|
|
2105
|
+
|
|
2106
|
+
def check_test_command_support(benchmark_config, supported_commands):
|
|
2107
|
+
"""Check if all tested-commands in the benchmark config are supported"""
|
|
2108
|
+
if supported_commands is None:
|
|
2109
|
+
logging.warning("No supported commands list available, skipping command check")
|
|
2110
|
+
return True, []
|
|
2111
|
+
|
|
2112
|
+
if "tested-commands" not in benchmark_config:
|
|
2113
|
+
logging.info("No tested-commands specified in benchmark config")
|
|
2114
|
+
return True, []
|
|
2115
|
+
|
|
2116
|
+
tested_commands = benchmark_config["tested-commands"]
|
|
2117
|
+
unsupported_commands = []
|
|
2118
|
+
|
|
2119
|
+
for cmd in tested_commands:
|
|
2120
|
+
cmd_upper = cmd.upper()
|
|
2121
|
+
if cmd_upper not in supported_commands:
|
|
2122
|
+
unsupported_commands.append(cmd)
|
|
2123
|
+
|
|
2124
|
+
if unsupported_commands:
|
|
2125
|
+
logging.warning(f"Unsupported commands found: {unsupported_commands}")
|
|
2126
|
+
return False, unsupported_commands
|
|
2127
|
+
else:
|
|
2128
|
+
logging.info(f"All tested commands are supported: {tested_commands}")
|
|
2129
|
+
return True, []
|
|
2130
|
+
|
|
2131
|
+
|
|
1713
2132
|
def prepare_overall_total_test_results(
|
|
1714
2133
|
benchmark_config,
|
|
1715
2134
|
default_metrics,
|
|
@@ -1728,8 +2147,25 @@ def prepare_overall_total_test_results(
|
|
|
1728
2147
|
None,
|
|
1729
2148
|
)
|
|
1730
2149
|
current_test_results_matrix = extract_results_table(metrics, results_dict)
|
|
2150
|
+
|
|
2151
|
+
# Use the same display name logic as in the individual test results
|
|
2152
|
+
def get_overall_display_name(x):
|
|
2153
|
+
# For precision_summary metrics with wildcards, construct the resolved path
|
|
2154
|
+
if (len(x) > 1 and
|
|
2155
|
+
isinstance(x[0], str) and
|
|
2156
|
+
"precision_summary" in x[0] and
|
|
2157
|
+
"*" in x[0]):
|
|
2158
|
+
|
|
2159
|
+
# Reconstruct resolved path from metric context path (x[1]) if available
|
|
2160
|
+
if len(x) > 1 and isinstance(x[1], str) and x[1].startswith("'") and x[1].endswith("'"):
|
|
2161
|
+
precision_level = x[1] # This should be something like "'1.0000'"
|
|
2162
|
+
resolved_path = x[0].replace("*", precision_level)
|
|
2163
|
+
return resolved_path
|
|
2164
|
+
|
|
2165
|
+
return x[0] # Use original path
|
|
2166
|
+
|
|
1731
2167
|
current_test_results_matrix = [
|
|
1732
|
-
[test_name, x
|
|
2168
|
+
[test_name, get_overall_display_name(x), f"{x[3]:.3f}"] for x in current_test_results_matrix
|
|
1733
2169
|
]
|
|
1734
2170
|
overall_results_matrix.extend(current_test_results_matrix)
|
|
1735
2171
|
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: redis-benchmarks-specification
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.275
|
|
4
4
|
Summary: The Redis benchmarks specification describes the cross-language/tools requirements and expectations to foster performance and observability standards around redis related technologies. Members from both industry and academia, including organizations and individuals are encouraged to contribute.
|
|
5
5
|
Author: filipecosta90
|
|
6
6
|
Author-email: filipecosta.90@gmail.com
|
|
@@ -17,14 +17,15 @@ redis_benchmarks_specification/__common__/github.py,sha256=9TZtnISsSgXTSAN_VQejo
|
|
|
17
17
|
redis_benchmarks_specification/__common__/package.py,sha256=4uVt1BAZ999LV2rZkq--Tk6otAVIf9YR3g3KGeUpiW4,834
|
|
18
18
|
redis_benchmarks_specification/__common__/runner.py,sha256=2IpMl0IEHi2IZvfLc4_h0e-E3ZfnlB8EkCA_SE8VDCY,7033
|
|
19
19
|
redis_benchmarks_specification/__common__/spec.py,sha256=D_SN48wg6NMthW_-OS1H5bydSDiuZpfd4WPPj7Vfwmc,5760
|
|
20
|
-
redis_benchmarks_specification/__common__/timeseries.py,sha256=
|
|
20
|
+
redis_benchmarks_specification/__common__/timeseries.py,sha256=Jd8kGrLiuIs_1508F0MfiM4qlN_6gZWazG8arHxz9xA,52643
|
|
21
21
|
redis_benchmarks_specification/__compare__/__init__.py,sha256=DtBXRp0Q01XgCFmY-1OIePMyyYihVNAjZ1Y8zwqSDN0,101
|
|
22
22
|
redis_benchmarks_specification/__compare__/args.py,sha256=FlKD1wutBoKxeahpXw1gY2H_1FOPH5y-o5QsIPfFsT0,6802
|
|
23
23
|
redis_benchmarks_specification/__compare__/compare.py,sha256=O6ZuB6Ln5xkTX5jRaizpj1PTPhmoETcf-_PY-A_CGr8,57179
|
|
24
24
|
redis_benchmarks_specification/__init__.py,sha256=YQIEx2sLPPA0JR9OuCuMNMNtm-f_gqDKgzvNJnkGNKY,491
|
|
25
25
|
redis_benchmarks_specification/__runner__/__init__.py,sha256=l-G1z-t6twUgi8QLueqoTQLvJmv3hJoEYskGm6H7L6M,83
|
|
26
|
-
redis_benchmarks_specification/__runner__/args.py,sha256=
|
|
27
|
-
redis_benchmarks_specification/__runner__/
|
|
26
|
+
redis_benchmarks_specification/__runner__/args.py,sha256=HAqOfkjSU4Y6odJRvhm0q8G9v46_g4uKTj0rDTaCgQg,8991
|
|
27
|
+
redis_benchmarks_specification/__runner__/remote_profiling.py,sha256=338UHq6gDH4dy6q8nj2ewipaHVj5fPmsNiHbFfaELfk,11378
|
|
28
|
+
redis_benchmarks_specification/__runner__/runner.py,sha256=x3rIb5zJd0BK6hsWHgTCT6JcdFMAksZMRlWuUqdaNWg,94332
|
|
28
29
|
redis_benchmarks_specification/__self_contained_coordinator__/__init__.py,sha256=l-G1z-t6twUgi8QLueqoTQLvJmv3hJoEYskGm6H7L6M,83
|
|
29
30
|
redis_benchmarks_specification/__self_contained_coordinator__/args.py,sha256=uxBjdQ78klvsVi6lOfGYQVaWIxc8OI-DwYKY16SgvCY,5952
|
|
30
31
|
redis_benchmarks_specification/__self_contained_coordinator__/artifacts.py,sha256=OVHqJzDgeSSRfUSiKp1ZTAVv14PvSbk-5yJsAAoUfpw,936
|
|
@@ -50,7 +51,7 @@ redis_benchmarks_specification/setups/builders/gcc:8.5.0-amd64-debian-buster-def
|
|
|
50
51
|
redis_benchmarks_specification/setups/builders/gcc:8.5.0-arm64-debian-buster-default.yml,sha256=I6qEO7MZKduVx6xbBrRniE1i6NK9R8-uQXdQJT9o5G4,511
|
|
51
52
|
redis_benchmarks_specification/setups/platforms/aws-ec2-1node-c5.4xlarge.yml,sha256=l7HsjccpebwZXeutnt3SHSETw4iiRwQ9dCDXLOySSRQ,622
|
|
52
53
|
redis_benchmarks_specification/setups/topologies/topologies.yml,sha256=N2UOKA8tG_pLpaSFtn7WdUmDNYwxRyTv9Ln_PCOPTco,3261
|
|
53
|
-
redis_benchmarks_specification/test-suites/defaults.yml,sha256=
|
|
54
|
+
redis_benchmarks_specification/test-suites/defaults.yml,sha256=EJHv9INdjoNVMOgHY8qo4IVCHfvXVz5sv7Vxtr3DAIE,1392
|
|
54
55
|
redis_benchmarks_specification/test-suites/generate.py,sha256=1QJBuWiouJ5OLil_r4OMG_UtZkmA8TLcyPlQAUuxCUw,4175
|
|
55
56
|
redis_benchmarks_specification/test-suites/memtier_benchmark-100Kkeys-hash-hgetall-50-fields-100B-values.yml,sha256=BR5hCMZtC_rrbWtGuGs96yZXIxeJobB59MY1hqR0m0E,2009
|
|
56
57
|
redis_benchmarks_specification/test-suites/memtier_benchmark-100Kkeys-load-hash-20-fields-with-1B-values-pipeline-30.yml,sha256=46e6GJWv9pDzd4YZXM62HWuhAX0kWtq5WH4F7d2Iqb0,1758
|
|
@@ -274,8 +275,8 @@ redis_benchmarks_specification/test-suites/memtier_benchmark-nokeys-pubsub-publi
|
|
|
274
275
|
redis_benchmarks_specification/test-suites/memtier_benchmark-nokeys-server-time-pipeline-10.yml,sha256=rJuWWXubUeRKQ2GSfHlbPMLeOyM9Eu_MzvN2vgKcAhA,672
|
|
275
276
|
redis_benchmarks_specification/test-suites/template.txt,sha256=d_edIE7Sxa5X7I2yG-Io0bPdbDIHR0oWFoCA3XUt_EU,435
|
|
276
277
|
redis_benchmarks_specification/vector-search-test-suites/vector_db_benchmark_test.yml,sha256=uhaSP6YUVmPvZU-qMtPPGdvNEUgUBqOfveUbeJ9WsbI,972
|
|
277
|
-
redis_benchmarks_specification-0.1.
|
|
278
|
-
redis_benchmarks_specification-0.1.
|
|
279
|
-
redis_benchmarks_specification-0.1.
|
|
280
|
-
redis_benchmarks_specification-0.1.
|
|
281
|
-
redis_benchmarks_specification-0.1.
|
|
278
|
+
redis_benchmarks_specification-0.1.275.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
|
|
279
|
+
redis_benchmarks_specification-0.1.275.dist-info/METADATA,sha256=2VD-N9KbazAqJWh-P0qpG0QDliIZWFsmYogwkrk_TL8,22726
|
|
280
|
+
redis_benchmarks_specification-0.1.275.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
|
|
281
|
+
redis_benchmarks_specification-0.1.275.dist-info/entry_points.txt,sha256=x5WBXCZsnDRTZxV7SBGmC65L2k-ygdDOxV8vuKN00Nk,715
|
|
282
|
+
redis_benchmarks_specification-0.1.275.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|