redis-benchmarks-specification 0.1.333__py3-none-any.whl → 0.1.335__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of redis-benchmarks-specification might be problematic. Click here for more details.

@@ -1067,6 +1067,9 @@ def add_standardized_metric_bybranch(
1067
1067
  labels["deployment_name+branch"] = "{} {}".format(
1068
1068
  deployment_name, tf_github_branch
1069
1069
  )
1070
+ labels["running_platform+branch"] = "{} {}".format(
1071
+ running_platform, tf_github_branch
1072
+ )
1070
1073
  labels["test_name"] = str(test_name)
1071
1074
  labels["metric"] = str(metric_name)
1072
1075
  logging.info(
@@ -1137,6 +1140,9 @@ def add_standardized_metric_byversion(
1137
1140
  labels["deployment_name+version"] = "{} {}".format(
1138
1141
  deployment_name, artifact_version
1139
1142
  )
1143
+ labels["running_platform+version"] = "{} {}".format(
1144
+ running_platform, artifact_version
1145
+ )
1140
1146
  labels["test_name"] = str(test_name)
1141
1147
  labels["metric"] = str(metric_name)
1142
1148
  logging.info(
@@ -182,7 +182,7 @@ def validate_benchmark_metrics(
182
182
  "all_stats.totals.ops/sec",
183
183
  ]
184
184
 
185
- latency_patterns = ["latency", "p50", "p95", "p99", "p999", "usec", "msec"]
185
+ latency_patterns = ["p50", "p95", "p99", "p999", "percentile"]
186
186
 
187
187
  validation_errors = []
188
188
 
@@ -195,6 +195,22 @@ def validate_benchmark_metrics(
195
195
  elif isinstance(data, (int, float)):
196
196
  metric_path_lower = path.lower()
197
197
 
198
+ # Skip Waits metrics as they can legitimately be 0
199
+ if "waits" in metric_path_lower:
200
+ return
201
+
202
+ # Skip general latency metrics that can legitimately be 0
203
+ # Only validate specific percentile latencies (p50, p95, etc.)
204
+ if any(
205
+ pattern in metric_path_lower
206
+ for pattern in [
207
+ "average latency",
208
+ "totals.latency",
209
+ "all_stats.totals.latency",
210
+ ]
211
+ ):
212
+ return
213
+
198
214
  # Check throughput metrics
199
215
  for pattern in throughput_patterns:
200
216
  if pattern in metric_path_lower:
@@ -1460,14 +1476,13 @@ def process_self_contained_coordinator_stream(
1460
1476
  dry_run_include_preload = args.dry_run_include_preload
1461
1477
  defaults_filename = args.defaults_filename
1462
1478
  override_test_runs = args.override_test_runs
1463
- (
1464
- _,
1465
- _,
1466
- default_metrics,
1467
- _,
1468
- _,
1469
- _,
1470
- ) = get_defaults(defaults_filename)
1479
+ get_defaults_result = get_defaults(defaults_filename)
1480
+ # Handle variable number of return values from get_defaults
1481
+ if len(get_defaults_result) >= 3:
1482
+ default_metrics = get_defaults_result[2]
1483
+ else:
1484
+ default_metrics = []
1485
+ logging.warning("get_defaults returned fewer values than expected, using empty default_metrics")
1471
1486
 
1472
1487
  # For memory comparison mode, analyze datasets before starting
1473
1488
  if memory_comparison_only:
@@ -191,6 +191,22 @@ class CoordinatorHTTPHandler(BaseHTTPRequestHandler):
191
191
  "service": "redis-benchmarks-self-contained-coordinator",
192
192
  }
193
193
  self.wfile.write(json.dumps(response).encode())
194
+
195
+ elif parsed_path.path == "/containers":
196
+ # Check for stuck containers
197
+ stuck_containers = self._check_stuck_containers()
198
+
199
+ self.send_response(200)
200
+ self.send_header("Content-type", "application/json")
201
+ self.end_headers()
202
+ response = {
203
+ "status": "success",
204
+ "stuck_containers": stuck_containers,
205
+ "total_stuck": len(stuck_containers),
206
+ "timestamp": datetime.datetime.utcnow().isoformat(),
207
+ }
208
+ self.wfile.write(json.dumps(response).encode())
209
+
194
210
  else:
195
211
  self.send_response(404)
196
212
  self.send_header("Content-type", "application/json")
@@ -259,7 +275,9 @@ class CoordinatorHTTPHandler(BaseHTTPRequestHandler):
259
275
  flush_time = datetime.datetime.utcnow()
260
276
  _flush_timestamp = flush_time
261
277
 
262
- logging.info("Flush requested via HTTP endpoint - stopping all containers and processes")
278
+ logging.info(
279
+ "Flush requested via HTTP endpoint - stopping all containers and processes"
280
+ )
263
281
 
264
282
  # Perform flush cleanup
265
283
  self._perform_flush_cleanup()
@@ -271,7 +289,7 @@ class CoordinatorHTTPHandler(BaseHTTPRequestHandler):
271
289
  "status": "success",
272
290
  "message": "Flush completed - all containers stopped and processes killed",
273
291
  "flush_timestamp": flush_time.isoformat(),
274
- "timestamp": datetime.datetime.utcnow().isoformat()
292
+ "timestamp": datetime.datetime.utcnow().isoformat(),
275
293
  }
276
294
  self.wfile.write(json.dumps(response).encode())
277
295
 
@@ -283,7 +301,7 @@ class CoordinatorHTTPHandler(BaseHTTPRequestHandler):
283
301
  response = {
284
302
  "status": "error",
285
303
  "message": f"Flush failed: {str(e)}",
286
- "timestamp": datetime.datetime.utcnow().isoformat()
304
+ "timestamp": datetime.datetime.utcnow().isoformat(),
287
305
  }
288
306
  self.wfile.write(json.dumps(response).encode())
289
307
 
@@ -300,27 +318,114 @@ class CoordinatorHTTPHandler(BaseHTTPRequestHandler):
300
318
  # Kill all memtier processes
301
319
  try:
302
320
  logging.info("Killing all memtier_benchmark processes")
303
- subprocess.run(["pkill", "-f", "memtier_benchmark"], check=False)
304
- subprocess.run(["pkill", "-f", "memtier"], check=False)
321
+ result = subprocess.run(
322
+ ["pkill", "-f", "memtier_benchmark"], capture_output=True, text=True
323
+ )
324
+ if result.returncode == 0:
325
+ logging.info("Successfully killed memtier_benchmark processes")
326
+ else:
327
+ logging.info("No memtier_benchmark processes found to kill")
328
+
329
+ result = subprocess.run(
330
+ ["pkill", "-f", "memtier"], capture_output=True, text=True
331
+ )
332
+ if result.returncode == 0:
333
+ logging.info("Successfully killed memtier processes")
334
+ else:
335
+ logging.info("No memtier processes found to kill")
305
336
  except Exception as e:
306
337
  logging.warning(f"Error killing memtier processes: {e}")
307
338
 
308
- # Stop all Docker containers
339
+ # Stop all Docker containers with force if needed
309
340
  try:
310
341
  logging.info("Stopping all Docker containers")
311
342
  client = docker.from_env()
312
343
  containers = client.containers.list()
344
+
345
+ if not containers:
346
+ logging.info("No running containers found")
347
+ return
348
+
349
+ logging.info(f"Found {len(containers)} running containers")
350
+
313
351
  for container in containers:
314
352
  try:
315
- logging.info(f"Stopping container: {container.name} ({container.id[:12]})")
316
- container.stop(timeout=5)
353
+ # Get container info
354
+ created_time = container.attrs["Created"]
355
+ uptime = (
356
+ datetime.datetime.utcnow()
357
+ - datetime.datetime.fromisoformat(
358
+ created_time.replace("Z", "+00:00")
359
+ )
360
+ )
361
+
362
+ logging.info(
363
+ f"Stopping container: {container.name} ({container.id[:12]}) - uptime: {uptime}"
364
+ )
365
+
366
+ # Try graceful stop first
367
+ container.stop(timeout=10)
368
+ logging.info(f"Successfully stopped container: {container.name}")
369
+
317
370
  except Exception as e:
318
371
  logging.warning(f"Error stopping container {container.name}: {e}")
372
+ try:
373
+ # Force kill if graceful stop failed
374
+ logging.info(f"Force killing container: {container.name}")
375
+ container.kill()
376
+ logging.info(
377
+ f"Successfully force killed container: {container.name}"
378
+ )
379
+ except Exception as e2:
380
+ logging.error(
381
+ f"Failed to force kill container {container.name}: {e2}"
382
+ )
383
+
319
384
  except Exception as e:
320
385
  logging.warning(f"Error accessing Docker client: {e}")
321
386
 
322
387
  logging.info("Flush cleanup completed")
323
388
 
389
+ def _check_stuck_containers(self, max_hours=2):
390
+ """Check for containers running longer than max_hours and return info"""
391
+ try:
392
+ client = docker.from_env()
393
+ containers = client.containers.list()
394
+ stuck_containers = []
395
+
396
+ for container in containers:
397
+ try:
398
+ created_time = container.attrs["Created"]
399
+ uptime = (
400
+ datetime.datetime.utcnow()
401
+ - datetime.datetime.fromisoformat(
402
+ created_time.replace("Z", "+00:00")
403
+ )
404
+ )
405
+ uptime_hours = uptime.total_seconds() / 3600
406
+
407
+ if uptime_hours > max_hours:
408
+ stuck_containers.append(
409
+ {
410
+ "name": container.name,
411
+ "id": container.id[:12],
412
+ "image": (
413
+ container.image.tags[0]
414
+ if container.image.tags
415
+ else "unknown"
416
+ ),
417
+ "uptime_hours": round(uptime_hours, 2),
418
+ "status": container.status,
419
+ }
420
+ )
421
+ except Exception as e:
422
+ logging.warning(f"Error checking container {container.name}: {e}")
423
+
424
+ return stuck_containers
425
+ except Exception as e:
426
+ logging.warning(f"Error accessing Docker client: {e}")
427
+ return []
428
+
324
429
 
325
430
  def start_http_server(port=8080):
326
431
  """Start the HTTP server in a separate thread"""
@@ -331,6 +436,7 @@ def start_http_server(port=8080):
331
436
  logging.info(f"Starting HTTP server on port {port}")
332
437
  logging.info(f"Available endpoints:")
333
438
  logging.info(f" GET /ping - Health check")
439
+ logging.info(f" GET /containers - Check for stuck containers")
334
440
  logging.info(
335
441
  f" POST /reset-queue - Reset pending streams and skip running tests"
336
442
  )
@@ -568,14 +674,13 @@ def main():
568
674
  grafana_profile_dashboard = args.grafana_profile_dashboard
569
675
 
570
676
  defaults_filename = args.defaults_filename
571
- (
572
- _,
573
- _,
574
- default_metrics,
575
- _,
576
- _,
577
- _,
578
- ) = get_defaults(defaults_filename)
677
+ get_defaults_result = get_defaults(defaults_filename)
678
+ # Handle variable number of return values from get_defaults
679
+ if len(get_defaults_result) >= 3:
680
+ default_metrics = get_defaults_result[2]
681
+ else:
682
+ default_metrics = []
683
+ logging.warning("get_defaults returned fewer values than expected, using empty default_metrics")
579
684
 
580
685
  # Consumer id
581
686
  consumer_pos = args.consumer_pos
@@ -865,7 +970,11 @@ def process_self_contained_coordinator_stream(
865
970
 
866
971
  # Check if this work should be ignored due to flush
867
972
  global _flush_timestamp
868
- if _flush_timestamp is not None and use_git_timestamp and git_timestamp_ms is not None:
973
+ if (
974
+ _flush_timestamp is not None
975
+ and use_git_timestamp
976
+ and git_timestamp_ms is not None
977
+ ):
869
978
  # Convert flush timestamp to milliseconds for comparison
870
979
  flush_timestamp_ms = int(_flush_timestamp.timestamp() * 1000)
871
980
  if git_timestamp_ms < flush_timestamp_ms:
@@ -1045,7 +1154,9 @@ def process_self_contained_coordinator_stream(
1045
1154
  command_regexp,
1046
1155
  )
1047
1156
 
1048
- logging.info(f"Adding {len(filtered_test_files)} tests to pending test list")
1157
+ logging.info(
1158
+ f"Adding {len(filtered_test_files)} tests to pending test list"
1159
+ )
1049
1160
 
1050
1161
  # Use pipeline for efficient bulk operations
1051
1162
  pipeline = github_event_conn.pipeline()
@@ -1068,7 +1179,9 @@ def process_self_contained_coordinator_stream(
1068
1179
  pipeline.expire(stream_test_list_pending, REDIS_BINS_EXPIRE_SECS)
1069
1180
  pipeline.execute()
1070
1181
 
1071
- logging.info(f"Successfully added {len(test_names_added)} tests to pending test list in key {stream_test_list_pending}")
1182
+ logging.info(
1183
+ f"Successfully added {len(test_names_added)} tests to pending test list in key {stream_test_list_pending}"
1184
+ )
1072
1185
 
1073
1186
  pending_tests = len(filtered_test_files)
1074
1187
  failed_tests = 0
@@ -1426,25 +1539,111 @@ def process_self_contained_coordinator_stream(
1426
1539
  )
1427
1540
  # run the benchmark
1428
1541
  benchmark_start_time = datetime.datetime.now()
1542
+
1543
+ # Calculate container timeout
1544
+ container_timeout = 300 # 5 minutes default
1545
+ buffer_timeout = 60 # Default buffer
1546
+
1547
+ # Try to extract test time from command and add buffer
1548
+ import re
1549
+
1550
+ test_time_match = re.search(
1551
+ r"--?test-time[=\s]+(\d+)", benchmark_command_str
1552
+ )
1553
+ if test_time_match:
1554
+ test_time = int(test_time_match.group(1))
1555
+ container_timeout = test_time + buffer_timeout
1556
+ logging.info(
1557
+ f"Set container timeout to {container_timeout}s (test-time: {test_time}s + {buffer_timeout}s buffer)"
1558
+ )
1559
+ else:
1560
+ logging.info(
1561
+ f"Using default container timeout: {container_timeout}s"
1562
+ )
1563
+
1429
1564
  try:
1430
- client_container_stdout = (
1431
- docker_client.containers.run(
1432
- image=client_container_image,
1433
- volumes={
1434
- temporary_dir_client: {
1435
- "bind": client_mnt_point,
1436
- "mode": "rw",
1437
- },
1565
+ # Start container with detach=True to enable timeout handling
1566
+ container = docker_client.containers.run(
1567
+ image=client_container_image,
1568
+ volumes={
1569
+ temporary_dir_client: {
1570
+ "bind": client_mnt_point,
1571
+ "mode": "rw",
1438
1572
  },
1439
- auto_remove=True,
1440
- privileged=True,
1441
- working_dir=benchmark_tool_workdir,
1442
- command=benchmark_command_str,
1443
- network_mode="host",
1444
- detach=False,
1445
- cpuset_cpus=client_cpuset_cpus,
1446
- )
1573
+ },
1574
+ auto_remove=False, # Don't auto-remove so we can get logs if timeout
1575
+ privileged=True,
1576
+ working_dir=benchmark_tool_workdir,
1577
+ command=benchmark_command_str,
1578
+ network_mode="host",
1579
+ detach=True, # Detach to enable timeout
1580
+ cpuset_cpus=client_cpuset_cpus,
1447
1581
  )
1582
+
1583
+ logging.info(
1584
+ f"Started container {container.name} ({container.id[:12]}) with {container_timeout}s timeout"
1585
+ )
1586
+
1587
+ # Wait for container with timeout
1588
+ try:
1589
+ result = container.wait(
1590
+ timeout=container_timeout
1591
+ )
1592
+ client_container_stdout = container.logs(
1593
+ stdout=True, stderr=False
1594
+ ).decode("utf-8")
1595
+ container_stderr = container.logs(
1596
+ stdout=False, stderr=True
1597
+ ).decode("utf-8")
1598
+
1599
+ # Check exit code
1600
+ if result["StatusCode"] != 0:
1601
+ logging.error(
1602
+ f"Container exited with code {result['StatusCode']}"
1603
+ )
1604
+ logging.error(
1605
+ f"Container stderr: {container_stderr}"
1606
+ )
1607
+ raise docker.errors.ContainerError(
1608
+ container,
1609
+ result["StatusCode"],
1610
+ benchmark_command_str,
1611
+ client_container_stdout,
1612
+ container_stderr,
1613
+ )
1614
+
1615
+ logging.info(
1616
+ f"Container {container.name} completed successfully"
1617
+ )
1618
+
1619
+ except Exception as timeout_error:
1620
+ if "timeout" in str(timeout_error).lower():
1621
+ logging.error(
1622
+ f"Container {container.name} timed out after {container_timeout}s"
1623
+ )
1624
+ # Get logs before killing
1625
+ try:
1626
+ timeout_logs = container.logs(
1627
+ stdout=True, stderr=True
1628
+ ).decode("utf-8")
1629
+ logging.error(
1630
+ f"Container logs before timeout: {timeout_logs}"
1631
+ )
1632
+ except:
1633
+ pass
1634
+ # Kill the container
1635
+ container.kill()
1636
+ raise Exception(
1637
+ f"Container timed out after {container_timeout} seconds"
1638
+ )
1639
+ else:
1640
+ raise timeout_error
1641
+ finally:
1642
+ # Clean up container
1643
+ try:
1644
+ container.remove(force=True)
1645
+ except:
1646
+ pass
1448
1647
  except docker.errors.ContainerError as e:
1449
1648
  logging.info(
1450
1649
  "stdout: {}".format(
@@ -2208,22 +2407,92 @@ def data_prepopulation_step(
2208
2407
  # run the benchmark
2209
2408
  preload_start_time = datetime.datetime.now()
2210
2409
 
2211
- client_container_stdout = docker_client.containers.run(
2212
- image=preload_image,
2213
- volumes={
2214
- temporary_dir: {
2215
- "bind": client_mnt_point,
2216
- "mode": "rw",
2410
+ # Set preload timeout (preload can take longer than benchmarks)
2411
+ preload_timeout = 1800 # 30 minutes default for data loading
2412
+ logging.info(f"Starting preload container with {preload_timeout}s timeout")
2413
+
2414
+ try:
2415
+ # Start container with detach=True to enable timeout handling
2416
+ container = docker_client.containers.run(
2417
+ image=preload_image,
2418
+ volumes={
2419
+ temporary_dir: {
2420
+ "bind": client_mnt_point,
2421
+ "mode": "rw",
2422
+ },
2217
2423
  },
2218
- },
2219
- auto_remove=True,
2220
- privileged=True,
2221
- working_dir=benchmark_tool_workdir,
2222
- command=preload_command_str,
2223
- network_mode="host",
2224
- detach=False,
2225
- cpuset_cpus=client_cpuset_cpus,
2226
- )
2424
+ auto_remove=False, # Don't auto-remove so we can get logs if timeout
2425
+ privileged=True,
2426
+ working_dir=benchmark_tool_workdir,
2427
+ command=preload_command_str,
2428
+ network_mode="host",
2429
+ detach=True, # Detach to enable timeout
2430
+ cpuset_cpus=client_cpuset_cpus,
2431
+ )
2432
+
2433
+ logging.info(
2434
+ f"Started preload container {container.name} ({container.id[:12]}) with {preload_timeout}s timeout"
2435
+ )
2436
+
2437
+ # Wait for container with timeout
2438
+ try:
2439
+ result = container.wait(timeout=preload_timeout)
2440
+ client_container_stdout = container.logs(
2441
+ stdout=True, stderr=False
2442
+ ).decode("utf-8")
2443
+ container_stderr = container.logs(stdout=False, stderr=True).decode(
2444
+ "utf-8"
2445
+ )
2446
+
2447
+ # Check exit code
2448
+ if result["StatusCode"] != 0:
2449
+ logging.error(
2450
+ f"Preload container exited with code {result['StatusCode']}"
2451
+ )
2452
+ logging.error(f"Preload container stderr: {container_stderr}")
2453
+ raise docker.errors.ContainerError(
2454
+ container,
2455
+ result["StatusCode"],
2456
+ preload_command_str,
2457
+ client_container_stdout,
2458
+ container_stderr,
2459
+ )
2460
+
2461
+ logging.info(
2462
+ f"Preload container {container.name} completed successfully"
2463
+ )
2464
+
2465
+ except Exception as timeout_error:
2466
+ if "timeout" in str(timeout_error).lower():
2467
+ logging.error(
2468
+ f"Preload container {container.name} timed out after {preload_timeout}s"
2469
+ )
2470
+ # Get logs before killing
2471
+ try:
2472
+ timeout_logs = container.logs(stdout=True, stderr=True).decode(
2473
+ "utf-8"
2474
+ )
2475
+ logging.error(
2476
+ f"Preload container logs before timeout: {timeout_logs}"
2477
+ )
2478
+ except:
2479
+ pass
2480
+ # Kill the container
2481
+ container.kill()
2482
+ raise Exception(
2483
+ f"Preload container timed out after {preload_timeout} seconds"
2484
+ )
2485
+ else:
2486
+ raise timeout_error
2487
+ finally:
2488
+ # Clean up container
2489
+ try:
2490
+ container.remove(force=True)
2491
+ except:
2492
+ pass
2493
+ except Exception as e:
2494
+ logging.error(f"Preload container failed: {e}")
2495
+ raise e
2227
2496
 
2228
2497
  preload_end_time = datetime.datetime.now()
2229
2498
  preload_duration_seconds = calculate_client_tool_duration_and_check(
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: redis-benchmarks-specification
3
- Version: 0.1.333
3
+ Version: 0.1.335
4
4
  Summary: The Redis benchmarks specification describes the cross-language/tools requirements and expectations to foster performance and observability standards around redis related technologies. Members from both industry and academia, including organizations and individuals are encouraged to contribute.
5
5
  Author: filipecosta90
6
6
  Author-email: filipecosta.90@gmail.com
@@ -18,7 +18,7 @@ redis_benchmarks_specification/__common__/package.py,sha256=4uVt1BAZ999LV2rZkq--
18
18
  redis_benchmarks_specification/__common__/runner.py,sha256=TKMUFJ3nLSfmSU7P_ok9oM5-pI4L4tFxsWLUWaUHhbI,16733
19
19
  redis_benchmarks_specification/__common__/spec.py,sha256=D_SN48wg6NMthW_-OS1H5bydSDiuZpfd4WPPj7Vfwmc,5760
20
20
  redis_benchmarks_specification/__common__/suppress_warnings.py,sha256=xpOjJ_piGYWlGq9ITr-ZwSCl2GpreA9juZIBao4fDRs,691
21
- redis_benchmarks_specification/__common__/timeseries.py,sha256=P0tbmH7leEMQwvqlr4lYZgr_I6EY3chh1Kf7XWe5fDQ,54048
21
+ redis_benchmarks_specification/__common__/timeseries.py,sha256=kHpkpNwZgWpjCh_Fg0wFcxNRMTb5SoSNwd_UHUCNVhc,54283
22
22
  redis_benchmarks_specification/__compare__/__init__.py,sha256=DtBXRp0Q01XgCFmY-1OIePMyyYihVNAjZ1Y8zwqSDN0,101
23
23
  redis_benchmarks_specification/__compare__/args.py,sha256=CNtA7pI9CJDTBJPGL2pNVfis7VDdxLautwRyka7oUCI,8911
24
24
  redis_benchmarks_specification/__compare__/compare.py,sha256=_AbuV3FZxtUZIdq4qq24LNzPNIdtQQaqrk8bUjn9blk,84327
@@ -26,7 +26,7 @@ redis_benchmarks_specification/__init__.py,sha256=YQIEx2sLPPA0JR9OuCuMNMNtm-f_gq
26
26
  redis_benchmarks_specification/__runner__/__init__.py,sha256=l-G1z-t6twUgi8QLueqoTQLvJmv3hJoEYskGm6H7L6M,83
27
27
  redis_benchmarks_specification/__runner__/args.py,sha256=K3VGmBC0-9lSv9H6VDp0N-6FGMWvc_4H0pG_TOXN5u8,11312
28
28
  redis_benchmarks_specification/__runner__/remote_profiling.py,sha256=R7obNQju8mmY9oKkcndjI4aAuxi84OCLhDSqqaYu1SU,18610
29
- redis_benchmarks_specification/__runner__/runner.py,sha256=-BDFxOLgkFe4LvVX1FnqmuszuyRMR8AJZW0SvPX0utw,155496
29
+ redis_benchmarks_specification/__runner__/runner.py,sha256=bbd-7vwsu8YNhQ02J5T-Hxgt5E8n03Ty6rif9gUYi24,156323
30
30
  redis_benchmarks_specification/__self_contained_coordinator__/__init__.py,sha256=l-G1z-t6twUgi8QLueqoTQLvJmv3hJoEYskGm6H7L6M,83
31
31
  redis_benchmarks_specification/__self_contained_coordinator__/args.py,sha256=1LePhRkDsoMPFclM_DoXBIoMBN8zcVoQMnm9wTK5Uqw,6961
32
32
  redis_benchmarks_specification/__self_contained_coordinator__/artifacts.py,sha256=OVHqJzDgeSSRfUSiKp1ZTAVv14PvSbk-5yJsAAoUfpw,936
@@ -37,7 +37,7 @@ redis_benchmarks_specification/__self_contained_coordinator__/docker.py,sha256=0
37
37
  redis_benchmarks_specification/__self_contained_coordinator__/post_processing.py,sha256=sVLKNnWdAqYY9DjVdqRC5tDaIrVSaI3Ca7w8-DQ-LRM,776
38
38
  redis_benchmarks_specification/__self_contained_coordinator__/prepopulation.py,sha256=1UeFr2T1ZQBcHCSd4W1ZtaWgXyFPfjLyDi_DgDc1eTA,2957
39
39
  redis_benchmarks_specification/__self_contained_coordinator__/runners.py,sha256=F11zO_ILnpmiVwTeCQnP5nDHQk3kNnajPftwKsbhlXE,30209
40
- redis_benchmarks_specification/__self_contained_coordinator__/self_contained_coordinator.py,sha256=7MSys1oasTMTmOm_6EuGtoWft19L9LR2OkQLrlnB2w0,99326
40
+ redis_benchmarks_specification/__self_contained_coordinator__/self_contained_coordinator.py,sha256=gJPzG-L0QBJtwImf0rTMvmXua-4jfhznCrBl84XF-Fk,112052
41
41
  redis_benchmarks_specification/__setups__/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
42
42
  redis_benchmarks_specification/__setups__/topologies.py,sha256=xQ1IJkcTji_ZjLiJd3vOxZpvbNtBLZw9cPkw5hGJKHU,481
43
43
  redis_benchmarks_specification/__spec__/__init__.py,sha256=l-G1z-t6twUgi8QLueqoTQLvJmv3hJoEYskGm6H7L6M,83
@@ -282,8 +282,8 @@ redis_benchmarks_specification/test-suites/memtier_benchmark-playbook-session-st
282
282
  redis_benchmarks_specification/test-suites/memtier_benchmark-playbook-session-storage-1k-sessions.yml,sha256=2egtIxPxCze2jlbAfgsk4v9JSQHNMoPLbDWFEW8olDg,7006
283
283
  redis_benchmarks_specification/test-suites/template.txt,sha256=ezqGiRPOvuSDO0iG7GEf-AGXNfHbgXI89_G0RUEzL88,481
284
284
  redis_benchmarks_specification/vector-search-test-suites/vector_db_benchmark_test.yml,sha256=PD7ow-k4Ll2BkhEC3aIqiaCZt8Hc4aJIp96Lw3J3mcI,791
285
- redis_benchmarks_specification-0.1.333.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
286
- redis_benchmarks_specification-0.1.333.dist-info/METADATA,sha256=r3xqLRQZADSrPCQNjXzbeBh1bEmx56jXzEo0KFgGV2c,22768
287
- redis_benchmarks_specification-0.1.333.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
288
- redis_benchmarks_specification-0.1.333.dist-info/entry_points.txt,sha256=x5WBXCZsnDRTZxV7SBGmC65L2k-ygdDOxV8vuKN00Nk,715
289
- redis_benchmarks_specification-0.1.333.dist-info/RECORD,,
285
+ redis_benchmarks_specification-0.1.335.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
286
+ redis_benchmarks_specification-0.1.335.dist-info/METADATA,sha256=3NbfRGc61aaz5PFZY84H6E7hY6Qh_845lo0k4dX6vYU,22768
287
+ redis_benchmarks_specification-0.1.335.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
288
+ redis_benchmarks_specification-0.1.335.dist-info/entry_points.txt,sha256=x5WBXCZsnDRTZxV7SBGmC65L2k-ygdDOxV8vuKN00Nk,715
289
+ redis_benchmarks_specification-0.1.335.dist-info/RECORD,,