redis-benchmarks-specification 0.1.333__py3-none-any.whl → 0.1.334__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of redis-benchmarks-specification might be problematic. Click here for more details.
- redis_benchmarks_specification/__runner__/runner.py +17 -1
- redis_benchmarks_specification/__self_contained_coordinator__/self_contained_coordinator.py +312 -42
- {redis_benchmarks_specification-0.1.333.dist-info → redis_benchmarks_specification-0.1.334.dist-info}/METADATA +1 -1
- {redis_benchmarks_specification-0.1.333.dist-info → redis_benchmarks_specification-0.1.334.dist-info}/RECORD +7 -7
- {redis_benchmarks_specification-0.1.333.dist-info → redis_benchmarks_specification-0.1.334.dist-info}/LICENSE +0 -0
- {redis_benchmarks_specification-0.1.333.dist-info → redis_benchmarks_specification-0.1.334.dist-info}/WHEEL +0 -0
- {redis_benchmarks_specification-0.1.333.dist-info → redis_benchmarks_specification-0.1.334.dist-info}/entry_points.txt +0 -0
|
@@ -182,7 +182,7 @@ def validate_benchmark_metrics(
|
|
|
182
182
|
"all_stats.totals.ops/sec",
|
|
183
183
|
]
|
|
184
184
|
|
|
185
|
-
latency_patterns = ["
|
|
185
|
+
latency_patterns = ["p50", "p95", "p99", "p999", "percentile"]
|
|
186
186
|
|
|
187
187
|
validation_errors = []
|
|
188
188
|
|
|
@@ -195,6 +195,22 @@ def validate_benchmark_metrics(
|
|
|
195
195
|
elif isinstance(data, (int, float)):
|
|
196
196
|
metric_path_lower = path.lower()
|
|
197
197
|
|
|
198
|
+
# Skip Waits metrics as they can legitimately be 0
|
|
199
|
+
if "waits" in metric_path_lower:
|
|
200
|
+
return
|
|
201
|
+
|
|
202
|
+
# Skip general latency metrics that can legitimately be 0
|
|
203
|
+
# Only validate specific percentile latencies (p50, p95, etc.)
|
|
204
|
+
if any(
|
|
205
|
+
pattern in metric_path_lower
|
|
206
|
+
for pattern in [
|
|
207
|
+
"average latency",
|
|
208
|
+
"totals.latency",
|
|
209
|
+
"all_stats.totals.latency",
|
|
210
|
+
]
|
|
211
|
+
):
|
|
212
|
+
return
|
|
213
|
+
|
|
198
214
|
# Check throughput metrics
|
|
199
215
|
for pattern in throughput_patterns:
|
|
200
216
|
if pattern in metric_path_lower:
|
|
@@ -191,6 +191,22 @@ class CoordinatorHTTPHandler(BaseHTTPRequestHandler):
|
|
|
191
191
|
"service": "redis-benchmarks-self-contained-coordinator",
|
|
192
192
|
}
|
|
193
193
|
self.wfile.write(json.dumps(response).encode())
|
|
194
|
+
|
|
195
|
+
elif parsed_path.path == "/containers":
|
|
196
|
+
# Check for stuck containers
|
|
197
|
+
stuck_containers = self._check_stuck_containers()
|
|
198
|
+
|
|
199
|
+
self.send_response(200)
|
|
200
|
+
self.send_header("Content-type", "application/json")
|
|
201
|
+
self.end_headers()
|
|
202
|
+
response = {
|
|
203
|
+
"status": "success",
|
|
204
|
+
"stuck_containers": stuck_containers,
|
|
205
|
+
"total_stuck": len(stuck_containers),
|
|
206
|
+
"timestamp": datetime.datetime.utcnow().isoformat(),
|
|
207
|
+
}
|
|
208
|
+
self.wfile.write(json.dumps(response).encode())
|
|
209
|
+
|
|
194
210
|
else:
|
|
195
211
|
self.send_response(404)
|
|
196
212
|
self.send_header("Content-type", "application/json")
|
|
@@ -259,7 +275,9 @@ class CoordinatorHTTPHandler(BaseHTTPRequestHandler):
|
|
|
259
275
|
flush_time = datetime.datetime.utcnow()
|
|
260
276
|
_flush_timestamp = flush_time
|
|
261
277
|
|
|
262
|
-
logging.info(
|
|
278
|
+
logging.info(
|
|
279
|
+
"Flush requested via HTTP endpoint - stopping all containers and processes"
|
|
280
|
+
)
|
|
263
281
|
|
|
264
282
|
# Perform flush cleanup
|
|
265
283
|
self._perform_flush_cleanup()
|
|
@@ -271,7 +289,7 @@ class CoordinatorHTTPHandler(BaseHTTPRequestHandler):
|
|
|
271
289
|
"status": "success",
|
|
272
290
|
"message": "Flush completed - all containers stopped and processes killed",
|
|
273
291
|
"flush_timestamp": flush_time.isoformat(),
|
|
274
|
-
"timestamp": datetime.datetime.utcnow().isoformat()
|
|
292
|
+
"timestamp": datetime.datetime.utcnow().isoformat(),
|
|
275
293
|
}
|
|
276
294
|
self.wfile.write(json.dumps(response).encode())
|
|
277
295
|
|
|
@@ -283,7 +301,7 @@ class CoordinatorHTTPHandler(BaseHTTPRequestHandler):
|
|
|
283
301
|
response = {
|
|
284
302
|
"status": "error",
|
|
285
303
|
"message": f"Flush failed: {str(e)}",
|
|
286
|
-
"timestamp": datetime.datetime.utcnow().isoformat()
|
|
304
|
+
"timestamp": datetime.datetime.utcnow().isoformat(),
|
|
287
305
|
}
|
|
288
306
|
self.wfile.write(json.dumps(response).encode())
|
|
289
307
|
|
|
@@ -300,27 +318,114 @@ class CoordinatorHTTPHandler(BaseHTTPRequestHandler):
|
|
|
300
318
|
# Kill all memtier processes
|
|
301
319
|
try:
|
|
302
320
|
logging.info("Killing all memtier_benchmark processes")
|
|
303
|
-
subprocess.run(
|
|
304
|
-
|
|
321
|
+
result = subprocess.run(
|
|
322
|
+
["pkill", "-f", "memtier_benchmark"], capture_output=True, text=True
|
|
323
|
+
)
|
|
324
|
+
if result.returncode == 0:
|
|
325
|
+
logging.info("Successfully killed memtier_benchmark processes")
|
|
326
|
+
else:
|
|
327
|
+
logging.info("No memtier_benchmark processes found to kill")
|
|
328
|
+
|
|
329
|
+
result = subprocess.run(
|
|
330
|
+
["pkill", "-f", "memtier"], capture_output=True, text=True
|
|
331
|
+
)
|
|
332
|
+
if result.returncode == 0:
|
|
333
|
+
logging.info("Successfully killed memtier processes")
|
|
334
|
+
else:
|
|
335
|
+
logging.info("No memtier processes found to kill")
|
|
305
336
|
except Exception as e:
|
|
306
337
|
logging.warning(f"Error killing memtier processes: {e}")
|
|
307
338
|
|
|
308
|
-
# Stop all Docker containers
|
|
339
|
+
# Stop all Docker containers with force if needed
|
|
309
340
|
try:
|
|
310
341
|
logging.info("Stopping all Docker containers")
|
|
311
342
|
client = docker.from_env()
|
|
312
343
|
containers = client.containers.list()
|
|
344
|
+
|
|
345
|
+
if not containers:
|
|
346
|
+
logging.info("No running containers found")
|
|
347
|
+
return
|
|
348
|
+
|
|
349
|
+
logging.info(f"Found {len(containers)} running containers")
|
|
350
|
+
|
|
313
351
|
for container in containers:
|
|
314
352
|
try:
|
|
315
|
-
|
|
316
|
-
container.
|
|
353
|
+
# Get container info
|
|
354
|
+
created_time = container.attrs["Created"]
|
|
355
|
+
uptime = (
|
|
356
|
+
datetime.datetime.utcnow()
|
|
357
|
+
- datetime.datetime.fromisoformat(
|
|
358
|
+
created_time.replace("Z", "+00:00")
|
|
359
|
+
)
|
|
360
|
+
)
|
|
361
|
+
|
|
362
|
+
logging.info(
|
|
363
|
+
f"Stopping container: {container.name} ({container.id[:12]}) - uptime: {uptime}"
|
|
364
|
+
)
|
|
365
|
+
|
|
366
|
+
# Try graceful stop first
|
|
367
|
+
container.stop(timeout=10)
|
|
368
|
+
logging.info(f"Successfully stopped container: {container.name}")
|
|
369
|
+
|
|
317
370
|
except Exception as e:
|
|
318
371
|
logging.warning(f"Error stopping container {container.name}: {e}")
|
|
372
|
+
try:
|
|
373
|
+
# Force kill if graceful stop failed
|
|
374
|
+
logging.info(f"Force killing container: {container.name}")
|
|
375
|
+
container.kill()
|
|
376
|
+
logging.info(
|
|
377
|
+
f"Successfully force killed container: {container.name}"
|
|
378
|
+
)
|
|
379
|
+
except Exception as e2:
|
|
380
|
+
logging.error(
|
|
381
|
+
f"Failed to force kill container {container.name}: {e2}"
|
|
382
|
+
)
|
|
383
|
+
|
|
319
384
|
except Exception as e:
|
|
320
385
|
logging.warning(f"Error accessing Docker client: {e}")
|
|
321
386
|
|
|
322
387
|
logging.info("Flush cleanup completed")
|
|
323
388
|
|
|
389
|
+
def _check_stuck_containers(self, max_hours=2):
|
|
390
|
+
"""Check for containers running longer than max_hours and return info"""
|
|
391
|
+
try:
|
|
392
|
+
client = docker.from_env()
|
|
393
|
+
containers = client.containers.list()
|
|
394
|
+
stuck_containers = []
|
|
395
|
+
|
|
396
|
+
for container in containers:
|
|
397
|
+
try:
|
|
398
|
+
created_time = container.attrs["Created"]
|
|
399
|
+
uptime = (
|
|
400
|
+
datetime.datetime.utcnow()
|
|
401
|
+
- datetime.datetime.fromisoformat(
|
|
402
|
+
created_time.replace("Z", "+00:00")
|
|
403
|
+
)
|
|
404
|
+
)
|
|
405
|
+
uptime_hours = uptime.total_seconds() / 3600
|
|
406
|
+
|
|
407
|
+
if uptime_hours > max_hours:
|
|
408
|
+
stuck_containers.append(
|
|
409
|
+
{
|
|
410
|
+
"name": container.name,
|
|
411
|
+
"id": container.id[:12],
|
|
412
|
+
"image": (
|
|
413
|
+
container.image.tags[0]
|
|
414
|
+
if container.image.tags
|
|
415
|
+
else "unknown"
|
|
416
|
+
),
|
|
417
|
+
"uptime_hours": round(uptime_hours, 2),
|
|
418
|
+
"status": container.status,
|
|
419
|
+
}
|
|
420
|
+
)
|
|
421
|
+
except Exception as e:
|
|
422
|
+
logging.warning(f"Error checking container {container.name}: {e}")
|
|
423
|
+
|
|
424
|
+
return stuck_containers
|
|
425
|
+
except Exception as e:
|
|
426
|
+
logging.warning(f"Error accessing Docker client: {e}")
|
|
427
|
+
return []
|
|
428
|
+
|
|
324
429
|
|
|
325
430
|
def start_http_server(port=8080):
|
|
326
431
|
"""Start the HTTP server in a separate thread"""
|
|
@@ -331,6 +436,7 @@ def start_http_server(port=8080):
|
|
|
331
436
|
logging.info(f"Starting HTTP server on port {port}")
|
|
332
437
|
logging.info(f"Available endpoints:")
|
|
333
438
|
logging.info(f" GET /ping - Health check")
|
|
439
|
+
logging.info(f" GET /containers - Check for stuck containers")
|
|
334
440
|
logging.info(
|
|
335
441
|
f" POST /reset-queue - Reset pending streams and skip running tests"
|
|
336
442
|
)
|
|
@@ -865,7 +971,11 @@ def process_self_contained_coordinator_stream(
|
|
|
865
971
|
|
|
866
972
|
# Check if this work should be ignored due to flush
|
|
867
973
|
global _flush_timestamp
|
|
868
|
-
if
|
|
974
|
+
if (
|
|
975
|
+
_flush_timestamp is not None
|
|
976
|
+
and use_git_timestamp
|
|
977
|
+
and git_timestamp_ms is not None
|
|
978
|
+
):
|
|
869
979
|
# Convert flush timestamp to milliseconds for comparison
|
|
870
980
|
flush_timestamp_ms = int(_flush_timestamp.timestamp() * 1000)
|
|
871
981
|
if git_timestamp_ms < flush_timestamp_ms:
|
|
@@ -1045,7 +1155,9 @@ def process_self_contained_coordinator_stream(
|
|
|
1045
1155
|
command_regexp,
|
|
1046
1156
|
)
|
|
1047
1157
|
|
|
1048
|
-
logging.info(
|
|
1158
|
+
logging.info(
|
|
1159
|
+
f"Adding {len(filtered_test_files)} tests to pending test list"
|
|
1160
|
+
)
|
|
1049
1161
|
|
|
1050
1162
|
# Use pipeline for efficient bulk operations
|
|
1051
1163
|
pipeline = github_event_conn.pipeline()
|
|
@@ -1068,7 +1180,9 @@ def process_self_contained_coordinator_stream(
|
|
|
1068
1180
|
pipeline.expire(stream_test_list_pending, REDIS_BINS_EXPIRE_SECS)
|
|
1069
1181
|
pipeline.execute()
|
|
1070
1182
|
|
|
1071
|
-
logging.info(
|
|
1183
|
+
logging.info(
|
|
1184
|
+
f"Successfully added {len(test_names_added)} tests to pending test list in key {stream_test_list_pending}"
|
|
1185
|
+
)
|
|
1072
1186
|
|
|
1073
1187
|
pending_tests = len(filtered_test_files)
|
|
1074
1188
|
failed_tests = 0
|
|
@@ -1426,25 +1540,111 @@ def process_self_contained_coordinator_stream(
|
|
|
1426
1540
|
)
|
|
1427
1541
|
# run the benchmark
|
|
1428
1542
|
benchmark_start_time = datetime.datetime.now()
|
|
1543
|
+
|
|
1544
|
+
# Calculate container timeout
|
|
1545
|
+
container_timeout = 300 # 5 minutes default
|
|
1546
|
+
buffer_timeout = 60 # Default buffer
|
|
1547
|
+
|
|
1548
|
+
# Try to extract test time from command and add buffer
|
|
1549
|
+
import re
|
|
1550
|
+
|
|
1551
|
+
test_time_match = re.search(
|
|
1552
|
+
r"--?test-time[=\s]+(\d+)", benchmark_command_str
|
|
1553
|
+
)
|
|
1554
|
+
if test_time_match:
|
|
1555
|
+
test_time = int(test_time_match.group(1))
|
|
1556
|
+
container_timeout = test_time + buffer_timeout
|
|
1557
|
+
logging.info(
|
|
1558
|
+
f"Set container timeout to {container_timeout}s (test-time: {test_time}s + {buffer_timeout}s buffer)"
|
|
1559
|
+
)
|
|
1560
|
+
else:
|
|
1561
|
+
logging.info(
|
|
1562
|
+
f"Using default container timeout: {container_timeout}s"
|
|
1563
|
+
)
|
|
1564
|
+
|
|
1429
1565
|
try:
|
|
1430
|
-
|
|
1431
|
-
|
|
1432
|
-
|
|
1433
|
-
|
|
1434
|
-
|
|
1435
|
-
|
|
1436
|
-
|
|
1437
|
-
},
|
|
1566
|
+
# Start container with detach=True to enable timeout handling
|
|
1567
|
+
container = docker_client.containers.run(
|
|
1568
|
+
image=client_container_image,
|
|
1569
|
+
volumes={
|
|
1570
|
+
temporary_dir_client: {
|
|
1571
|
+
"bind": client_mnt_point,
|
|
1572
|
+
"mode": "rw",
|
|
1438
1573
|
},
|
|
1439
|
-
|
|
1440
|
-
|
|
1441
|
-
|
|
1442
|
-
|
|
1443
|
-
|
|
1444
|
-
|
|
1445
|
-
|
|
1446
|
-
|
|
1574
|
+
},
|
|
1575
|
+
auto_remove=False, # Don't auto-remove so we can get logs if timeout
|
|
1576
|
+
privileged=True,
|
|
1577
|
+
working_dir=benchmark_tool_workdir,
|
|
1578
|
+
command=benchmark_command_str,
|
|
1579
|
+
network_mode="host",
|
|
1580
|
+
detach=True, # Detach to enable timeout
|
|
1581
|
+
cpuset_cpus=client_cpuset_cpus,
|
|
1582
|
+
)
|
|
1583
|
+
|
|
1584
|
+
logging.info(
|
|
1585
|
+
f"Started container {container.name} ({container.id[:12]}) with {container_timeout}s timeout"
|
|
1447
1586
|
)
|
|
1587
|
+
|
|
1588
|
+
# Wait for container with timeout
|
|
1589
|
+
try:
|
|
1590
|
+
result = container.wait(
|
|
1591
|
+
timeout=container_timeout
|
|
1592
|
+
)
|
|
1593
|
+
client_container_stdout = container.logs(
|
|
1594
|
+
stdout=True, stderr=False
|
|
1595
|
+
).decode("utf-8")
|
|
1596
|
+
container_stderr = container.logs(
|
|
1597
|
+
stdout=False, stderr=True
|
|
1598
|
+
).decode("utf-8")
|
|
1599
|
+
|
|
1600
|
+
# Check exit code
|
|
1601
|
+
if result["StatusCode"] != 0:
|
|
1602
|
+
logging.error(
|
|
1603
|
+
f"Container exited with code {result['StatusCode']}"
|
|
1604
|
+
)
|
|
1605
|
+
logging.error(
|
|
1606
|
+
f"Container stderr: {container_stderr}"
|
|
1607
|
+
)
|
|
1608
|
+
raise docker.errors.ContainerError(
|
|
1609
|
+
container,
|
|
1610
|
+
result["StatusCode"],
|
|
1611
|
+
benchmark_command_str,
|
|
1612
|
+
client_container_stdout,
|
|
1613
|
+
container_stderr,
|
|
1614
|
+
)
|
|
1615
|
+
|
|
1616
|
+
logging.info(
|
|
1617
|
+
f"Container {container.name} completed successfully"
|
|
1618
|
+
)
|
|
1619
|
+
|
|
1620
|
+
except Exception as timeout_error:
|
|
1621
|
+
if "timeout" in str(timeout_error).lower():
|
|
1622
|
+
logging.error(
|
|
1623
|
+
f"Container {container.name} timed out after {container_timeout}s"
|
|
1624
|
+
)
|
|
1625
|
+
# Get logs before killing
|
|
1626
|
+
try:
|
|
1627
|
+
timeout_logs = container.logs(
|
|
1628
|
+
stdout=True, stderr=True
|
|
1629
|
+
).decode("utf-8")
|
|
1630
|
+
logging.error(
|
|
1631
|
+
f"Container logs before timeout: {timeout_logs}"
|
|
1632
|
+
)
|
|
1633
|
+
except:
|
|
1634
|
+
pass
|
|
1635
|
+
# Kill the container
|
|
1636
|
+
container.kill()
|
|
1637
|
+
raise Exception(
|
|
1638
|
+
f"Container timed out after {container_timeout} seconds"
|
|
1639
|
+
)
|
|
1640
|
+
else:
|
|
1641
|
+
raise timeout_error
|
|
1642
|
+
finally:
|
|
1643
|
+
# Clean up container
|
|
1644
|
+
try:
|
|
1645
|
+
container.remove(force=True)
|
|
1646
|
+
except:
|
|
1647
|
+
pass
|
|
1448
1648
|
except docker.errors.ContainerError as e:
|
|
1449
1649
|
logging.info(
|
|
1450
1650
|
"stdout: {}".format(
|
|
@@ -2208,22 +2408,92 @@ def data_prepopulation_step(
|
|
|
2208
2408
|
# run the benchmark
|
|
2209
2409
|
preload_start_time = datetime.datetime.now()
|
|
2210
2410
|
|
|
2211
|
-
|
|
2212
|
-
|
|
2213
|
-
|
|
2214
|
-
|
|
2215
|
-
|
|
2216
|
-
|
|
2411
|
+
# Set preload timeout (preload can take longer than benchmarks)
|
|
2412
|
+
preload_timeout = 1800 # 30 minutes default for data loading
|
|
2413
|
+
logging.info(f"Starting preload container with {preload_timeout}s timeout")
|
|
2414
|
+
|
|
2415
|
+
try:
|
|
2416
|
+
# Start container with detach=True to enable timeout handling
|
|
2417
|
+
container = docker_client.containers.run(
|
|
2418
|
+
image=preload_image,
|
|
2419
|
+
volumes={
|
|
2420
|
+
temporary_dir: {
|
|
2421
|
+
"bind": client_mnt_point,
|
|
2422
|
+
"mode": "rw",
|
|
2423
|
+
},
|
|
2217
2424
|
},
|
|
2218
|
-
|
|
2219
|
-
|
|
2220
|
-
|
|
2221
|
-
|
|
2222
|
-
|
|
2223
|
-
|
|
2224
|
-
|
|
2225
|
-
|
|
2226
|
-
|
|
2425
|
+
auto_remove=False, # Don't auto-remove so we can get logs if timeout
|
|
2426
|
+
privileged=True,
|
|
2427
|
+
working_dir=benchmark_tool_workdir,
|
|
2428
|
+
command=preload_command_str,
|
|
2429
|
+
network_mode="host",
|
|
2430
|
+
detach=True, # Detach to enable timeout
|
|
2431
|
+
cpuset_cpus=client_cpuset_cpus,
|
|
2432
|
+
)
|
|
2433
|
+
|
|
2434
|
+
logging.info(
|
|
2435
|
+
f"Started preload container {container.name} ({container.id[:12]}) with {preload_timeout}s timeout"
|
|
2436
|
+
)
|
|
2437
|
+
|
|
2438
|
+
# Wait for container with timeout
|
|
2439
|
+
try:
|
|
2440
|
+
result = container.wait(timeout=preload_timeout)
|
|
2441
|
+
client_container_stdout = container.logs(
|
|
2442
|
+
stdout=True, stderr=False
|
|
2443
|
+
).decode("utf-8")
|
|
2444
|
+
container_stderr = container.logs(stdout=False, stderr=True).decode(
|
|
2445
|
+
"utf-8"
|
|
2446
|
+
)
|
|
2447
|
+
|
|
2448
|
+
# Check exit code
|
|
2449
|
+
if result["StatusCode"] != 0:
|
|
2450
|
+
logging.error(
|
|
2451
|
+
f"Preload container exited with code {result['StatusCode']}"
|
|
2452
|
+
)
|
|
2453
|
+
logging.error(f"Preload container stderr: {container_stderr}")
|
|
2454
|
+
raise docker.errors.ContainerError(
|
|
2455
|
+
container,
|
|
2456
|
+
result["StatusCode"],
|
|
2457
|
+
preload_command_str,
|
|
2458
|
+
client_container_stdout,
|
|
2459
|
+
container_stderr,
|
|
2460
|
+
)
|
|
2461
|
+
|
|
2462
|
+
logging.info(
|
|
2463
|
+
f"Preload container {container.name} completed successfully"
|
|
2464
|
+
)
|
|
2465
|
+
|
|
2466
|
+
except Exception as timeout_error:
|
|
2467
|
+
if "timeout" in str(timeout_error).lower():
|
|
2468
|
+
logging.error(
|
|
2469
|
+
f"Preload container {container.name} timed out after {preload_timeout}s"
|
|
2470
|
+
)
|
|
2471
|
+
# Get logs before killing
|
|
2472
|
+
try:
|
|
2473
|
+
timeout_logs = container.logs(stdout=True, stderr=True).decode(
|
|
2474
|
+
"utf-8"
|
|
2475
|
+
)
|
|
2476
|
+
logging.error(
|
|
2477
|
+
f"Preload container logs before timeout: {timeout_logs}"
|
|
2478
|
+
)
|
|
2479
|
+
except:
|
|
2480
|
+
pass
|
|
2481
|
+
# Kill the container
|
|
2482
|
+
container.kill()
|
|
2483
|
+
raise Exception(
|
|
2484
|
+
f"Preload container timed out after {preload_timeout} seconds"
|
|
2485
|
+
)
|
|
2486
|
+
else:
|
|
2487
|
+
raise timeout_error
|
|
2488
|
+
finally:
|
|
2489
|
+
# Clean up container
|
|
2490
|
+
try:
|
|
2491
|
+
container.remove(force=True)
|
|
2492
|
+
except:
|
|
2493
|
+
pass
|
|
2494
|
+
except Exception as e:
|
|
2495
|
+
logging.error(f"Preload container failed: {e}")
|
|
2496
|
+
raise e
|
|
2227
2497
|
|
|
2228
2498
|
preload_end_time = datetime.datetime.now()
|
|
2229
2499
|
preload_duration_seconds = calculate_client_tool_duration_and_check(
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: redis-benchmarks-specification
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.334
|
|
4
4
|
Summary: The Redis benchmarks specification describes the cross-language/tools requirements and expectations to foster performance and observability standards around redis related technologies. Members from both industry and academia, including organizations and individuals are encouraged to contribute.
|
|
5
5
|
Author: filipecosta90
|
|
6
6
|
Author-email: filipecosta.90@gmail.com
|
|
@@ -26,7 +26,7 @@ redis_benchmarks_specification/__init__.py,sha256=YQIEx2sLPPA0JR9OuCuMNMNtm-f_gq
|
|
|
26
26
|
redis_benchmarks_specification/__runner__/__init__.py,sha256=l-G1z-t6twUgi8QLueqoTQLvJmv3hJoEYskGm6H7L6M,83
|
|
27
27
|
redis_benchmarks_specification/__runner__/args.py,sha256=K3VGmBC0-9lSv9H6VDp0N-6FGMWvc_4H0pG_TOXN5u8,11312
|
|
28
28
|
redis_benchmarks_specification/__runner__/remote_profiling.py,sha256=R7obNQju8mmY9oKkcndjI4aAuxi84OCLhDSqqaYu1SU,18610
|
|
29
|
-
redis_benchmarks_specification/__runner__/runner.py,sha256
|
|
29
|
+
redis_benchmarks_specification/__runner__/runner.py,sha256=V6PnV5Tt2qupLcfJg0yhj_AqI5pIzFj-0EXc_JtYo84,156096
|
|
30
30
|
redis_benchmarks_specification/__self_contained_coordinator__/__init__.py,sha256=l-G1z-t6twUgi8QLueqoTQLvJmv3hJoEYskGm6H7L6M,83
|
|
31
31
|
redis_benchmarks_specification/__self_contained_coordinator__/args.py,sha256=1LePhRkDsoMPFclM_DoXBIoMBN8zcVoQMnm9wTK5Uqw,6961
|
|
32
32
|
redis_benchmarks_specification/__self_contained_coordinator__/artifacts.py,sha256=OVHqJzDgeSSRfUSiKp1ZTAVv14PvSbk-5yJsAAoUfpw,936
|
|
@@ -37,7 +37,7 @@ redis_benchmarks_specification/__self_contained_coordinator__/docker.py,sha256=0
|
|
|
37
37
|
redis_benchmarks_specification/__self_contained_coordinator__/post_processing.py,sha256=sVLKNnWdAqYY9DjVdqRC5tDaIrVSaI3Ca7w8-DQ-LRM,776
|
|
38
38
|
redis_benchmarks_specification/__self_contained_coordinator__/prepopulation.py,sha256=1UeFr2T1ZQBcHCSd4W1ZtaWgXyFPfjLyDi_DgDc1eTA,2957
|
|
39
39
|
redis_benchmarks_specification/__self_contained_coordinator__/runners.py,sha256=F11zO_ILnpmiVwTeCQnP5nDHQk3kNnajPftwKsbhlXE,30209
|
|
40
|
-
redis_benchmarks_specification/__self_contained_coordinator__/self_contained_coordinator.py,sha256=
|
|
40
|
+
redis_benchmarks_specification/__self_contained_coordinator__/self_contained_coordinator.py,sha256=2QdjjUyGB-GCyyJtSe984GY634i3qE019qWCnDBuI0U,111825
|
|
41
41
|
redis_benchmarks_specification/__setups__/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
42
42
|
redis_benchmarks_specification/__setups__/topologies.py,sha256=xQ1IJkcTji_ZjLiJd3vOxZpvbNtBLZw9cPkw5hGJKHU,481
|
|
43
43
|
redis_benchmarks_specification/__spec__/__init__.py,sha256=l-G1z-t6twUgi8QLueqoTQLvJmv3hJoEYskGm6H7L6M,83
|
|
@@ -282,8 +282,8 @@ redis_benchmarks_specification/test-suites/memtier_benchmark-playbook-session-st
|
|
|
282
282
|
redis_benchmarks_specification/test-suites/memtier_benchmark-playbook-session-storage-1k-sessions.yml,sha256=2egtIxPxCze2jlbAfgsk4v9JSQHNMoPLbDWFEW8olDg,7006
|
|
283
283
|
redis_benchmarks_specification/test-suites/template.txt,sha256=ezqGiRPOvuSDO0iG7GEf-AGXNfHbgXI89_G0RUEzL88,481
|
|
284
284
|
redis_benchmarks_specification/vector-search-test-suites/vector_db_benchmark_test.yml,sha256=PD7ow-k4Ll2BkhEC3aIqiaCZt8Hc4aJIp96Lw3J3mcI,791
|
|
285
|
-
redis_benchmarks_specification-0.1.
|
|
286
|
-
redis_benchmarks_specification-0.1.
|
|
287
|
-
redis_benchmarks_specification-0.1.
|
|
288
|
-
redis_benchmarks_specification-0.1.
|
|
289
|
-
redis_benchmarks_specification-0.1.
|
|
285
|
+
redis_benchmarks_specification-0.1.334.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
|
|
286
|
+
redis_benchmarks_specification-0.1.334.dist-info/METADATA,sha256=dE2ReU_PQYen1kcQ2YRh5AfPmkb1Hrx-j15Qj2vt508,22768
|
|
287
|
+
redis_benchmarks_specification-0.1.334.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
|
|
288
|
+
redis_benchmarks_specification-0.1.334.dist-info/entry_points.txt,sha256=x5WBXCZsnDRTZxV7SBGmC65L2k-ygdDOxV8vuKN00Nk,715
|
|
289
|
+
redis_benchmarks_specification-0.1.334.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|