redis-benchmarks-specification 0.1.332__py3-none-any.whl → 0.1.334__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of redis-benchmarks-specification might be problematic. Click here for more details.

@@ -182,7 +182,7 @@ def validate_benchmark_metrics(
182
182
  "all_stats.totals.ops/sec",
183
183
  ]
184
184
 
185
- latency_patterns = ["latency", "p50", "p95", "p99", "p999", "usec", "msec"]
185
+ latency_patterns = ["p50", "p95", "p99", "p999", "percentile"]
186
186
 
187
187
  validation_errors = []
188
188
 
@@ -195,6 +195,22 @@ def validate_benchmark_metrics(
195
195
  elif isinstance(data, (int, float)):
196
196
  metric_path_lower = path.lower()
197
197
 
198
+ # Skip Waits metrics as they can legitimately be 0
199
+ if "waits" in metric_path_lower:
200
+ return
201
+
202
+ # Skip general latency metrics that can legitimately be 0
203
+ # Only validate specific percentile latencies (p50, p95, etc.)
204
+ if any(
205
+ pattern in metric_path_lower
206
+ for pattern in [
207
+ "average latency",
208
+ "totals.latency",
209
+ "all_stats.totals.latency",
210
+ ]
211
+ ):
212
+ return
213
+
198
214
  # Check throughput metrics
199
215
  for pattern in throughput_patterns:
200
216
  if pattern in metric_path_lower:
@@ -122,6 +122,7 @@ _reset_queue_requested = False
122
122
  _exclusive_hardware = False
123
123
  _http_auth_username = None
124
124
  _http_auth_password = None
125
+ _flush_timestamp = None
125
126
 
126
127
 
127
128
  class CoordinatorHTTPHandler(BaseHTTPRequestHandler):
@@ -190,6 +191,22 @@ class CoordinatorHTTPHandler(BaseHTTPRequestHandler):
190
191
  "service": "redis-benchmarks-self-contained-coordinator",
191
192
  }
192
193
  self.wfile.write(json.dumps(response).encode())
194
+
195
+ elif parsed_path.path == "/containers":
196
+ # Check for stuck containers
197
+ stuck_containers = self._check_stuck_containers()
198
+
199
+ self.send_response(200)
200
+ self.send_header("Content-type", "application/json")
201
+ self.end_headers()
202
+ response = {
203
+ "status": "success",
204
+ "stuck_containers": stuck_containers,
205
+ "total_stuck": len(stuck_containers),
206
+ "timestamp": datetime.datetime.utcnow().isoformat(),
207
+ }
208
+ self.wfile.write(json.dumps(response).encode())
209
+
193
210
  else:
194
211
  self.send_response(404)
195
212
  self.send_header("Content-type", "application/json")
@@ -203,7 +220,7 @@ class CoordinatorHTTPHandler(BaseHTTPRequestHandler):
203
220
  self._send_auth_required()
204
221
  return
205
222
 
206
- global _reset_queue_requested
223
+ global _reset_queue_requested, _flush_timestamp
207
224
 
208
225
  parsed_path = urlparse(self.path)
209
226
 
@@ -240,12 +257,175 @@ class CoordinatorHTTPHandler(BaseHTTPRequestHandler):
240
257
  self.send_header("Content-type", "application/json")
241
258
  self.end_headers()
242
259
  self.wfile.write(json.dumps({"error": str(e)}).encode())
260
+
261
+ elif parsed_path.path == "/flush":
262
+ try:
263
+ # Read request body (optional)
264
+ content_length = int(self.headers.get("Content-Length", 0))
265
+ if content_length > 0:
266
+ post_data = self.rfile.read(content_length)
267
+ try:
268
+ request_data = json.loads(post_data.decode())
269
+ except json.JSONDecodeError:
270
+ request_data = {}
271
+ else:
272
+ request_data = {}
273
+
274
+ # Record flush timestamp
275
+ flush_time = datetime.datetime.utcnow()
276
+ _flush_timestamp = flush_time
277
+
278
+ logging.info(
279
+ "Flush requested via HTTP endpoint - stopping all containers and processes"
280
+ )
281
+
282
+ # Perform flush cleanup
283
+ self._perform_flush_cleanup()
284
+
285
+ self.send_response(200)
286
+ self.send_header("Content-type", "application/json")
287
+ self.end_headers()
288
+ response = {
289
+ "status": "success",
290
+ "message": "Flush completed - all containers stopped and processes killed",
291
+ "flush_timestamp": flush_time.isoformat(),
292
+ "timestamp": datetime.datetime.utcnow().isoformat(),
293
+ }
294
+ self.wfile.write(json.dumps(response).encode())
295
+
296
+ except Exception as e:
297
+ logging.error(f"Error during flush operation: {e}")
298
+ self.send_response(500)
299
+ self.send_header("Content-type", "application/json")
300
+ self.end_headers()
301
+ response = {
302
+ "status": "error",
303
+ "message": f"Flush failed: {str(e)}",
304
+ "timestamp": datetime.datetime.utcnow().isoformat(),
305
+ }
306
+ self.wfile.write(json.dumps(response).encode())
307
+
243
308
  else:
244
309
  self.send_response(404)
245
310
  self.send_header("Content-type", "application/json")
246
311
  self.end_headers()
247
312
  self.wfile.write(json.dumps({"error": "Not found"}).encode())
248
313
 
314
+ def _perform_flush_cleanup(self):
315
+ """Perform flush cleanup: stop all containers and kill memtier processes"""
316
+ import subprocess
317
+
318
+ # Kill all memtier processes
319
+ try:
320
+ logging.info("Killing all memtier_benchmark processes")
321
+ result = subprocess.run(
322
+ ["pkill", "-f", "memtier_benchmark"], capture_output=True, text=True
323
+ )
324
+ if result.returncode == 0:
325
+ logging.info("Successfully killed memtier_benchmark processes")
326
+ else:
327
+ logging.info("No memtier_benchmark processes found to kill")
328
+
329
+ result = subprocess.run(
330
+ ["pkill", "-f", "memtier"], capture_output=True, text=True
331
+ )
332
+ if result.returncode == 0:
333
+ logging.info("Successfully killed memtier processes")
334
+ else:
335
+ logging.info("No memtier processes found to kill")
336
+ except Exception as e:
337
+ logging.warning(f"Error killing memtier processes: {e}")
338
+
339
+ # Stop all Docker containers with force if needed
340
+ try:
341
+ logging.info("Stopping all Docker containers")
342
+ client = docker.from_env()
343
+ containers = client.containers.list()
344
+
345
+ if not containers:
346
+ logging.info("No running containers found")
347
+ return
348
+
349
+ logging.info(f"Found {len(containers)} running containers")
350
+
351
+ for container in containers:
352
+ try:
353
+ # Get container info
354
+ created_time = container.attrs["Created"]
355
+ uptime = (
356
+ datetime.datetime.utcnow()
357
+ - datetime.datetime.fromisoformat(
358
+ created_time.replace("Z", "+00:00")
359
+ )
360
+ )
361
+
362
+ logging.info(
363
+ f"Stopping container: {container.name} ({container.id[:12]}) - uptime: {uptime}"
364
+ )
365
+
366
+ # Try graceful stop first
367
+ container.stop(timeout=10)
368
+ logging.info(f"Successfully stopped container: {container.name}")
369
+
370
+ except Exception as e:
371
+ logging.warning(f"Error stopping container {container.name}: {e}")
372
+ try:
373
+ # Force kill if graceful stop failed
374
+ logging.info(f"Force killing container: {container.name}")
375
+ container.kill()
376
+ logging.info(
377
+ f"Successfully force killed container: {container.name}"
378
+ )
379
+ except Exception as e2:
380
+ logging.error(
381
+ f"Failed to force kill container {container.name}: {e2}"
382
+ )
383
+
384
+ except Exception as e:
385
+ logging.warning(f"Error accessing Docker client: {e}")
386
+
387
+ logging.info("Flush cleanup completed")
388
+
389
+ def _check_stuck_containers(self, max_hours=2):
390
+ """Check for containers running longer than max_hours and return info"""
391
+ try:
392
+ client = docker.from_env()
393
+ containers = client.containers.list()
394
+ stuck_containers = []
395
+
396
+ for container in containers:
397
+ try:
398
+ created_time = container.attrs["Created"]
399
+ uptime = (
400
+ datetime.datetime.utcnow()
401
+ - datetime.datetime.fromisoformat(
402
+ created_time.replace("Z", "+00:00")
403
+ )
404
+ )
405
+ uptime_hours = uptime.total_seconds() / 3600
406
+
407
+ if uptime_hours > max_hours:
408
+ stuck_containers.append(
409
+ {
410
+ "name": container.name,
411
+ "id": container.id[:12],
412
+ "image": (
413
+ container.image.tags[0]
414
+ if container.image.tags
415
+ else "unknown"
416
+ ),
417
+ "uptime_hours": round(uptime_hours, 2),
418
+ "status": container.status,
419
+ }
420
+ )
421
+ except Exception as e:
422
+ logging.warning(f"Error checking container {container.name}: {e}")
423
+
424
+ return stuck_containers
425
+ except Exception as e:
426
+ logging.warning(f"Error accessing Docker client: {e}")
427
+ return []
428
+
249
429
 
250
430
  def start_http_server(port=8080):
251
431
  """Start the HTTP server in a separate thread"""
@@ -256,9 +436,13 @@ def start_http_server(port=8080):
256
436
  logging.info(f"Starting HTTP server on port {port}")
257
437
  logging.info(f"Available endpoints:")
258
438
  logging.info(f" GET /ping - Health check")
439
+ logging.info(f" GET /containers - Check for stuck containers")
259
440
  logging.info(
260
441
  f" POST /reset-queue - Reset pending streams and skip running tests"
261
442
  )
443
+ logging.info(
444
+ f" POST /flush - Stop all containers and processes, ignore work before flush time"
445
+ )
262
446
  server.serve_forever()
263
447
  except Exception as e:
264
448
  logging.error(f"HTTP server error: {e}")
@@ -784,6 +968,23 @@ def process_self_contained_coordinator_stream(
784
968
  git_timestamp_ms,
785
969
  run_arch,
786
970
  ) = extract_build_info_from_streamdata(testDetails)
971
+
972
+ # Check if this work should be ignored due to flush
973
+ global _flush_timestamp
974
+ if (
975
+ _flush_timestamp is not None
976
+ and use_git_timestamp
977
+ and git_timestamp_ms is not None
978
+ ):
979
+ # Convert flush timestamp to milliseconds for comparison
980
+ flush_timestamp_ms = int(_flush_timestamp.timestamp() * 1000)
981
+ if git_timestamp_ms < flush_timestamp_ms:
982
+ logging.info(
983
+ f"Ignoring work with git_timestamp_ms {git_timestamp_ms} "
984
+ f"(before flush timestamp {flush_timestamp_ms}). Stream id: {stream_id}"
985
+ )
986
+ return stream_id, False, 0
987
+
787
988
  tf_github_org = default_github_org
788
989
  if b"github_org" in testDetails:
789
990
  tf_github_org = testDetails[b"github_org"].decode()
@@ -954,7 +1155,9 @@ def process_self_contained_coordinator_stream(
954
1155
  command_regexp,
955
1156
  )
956
1157
 
957
- logging.info(f"Adding {len(filtered_test_files)} tests to pending test list")
1158
+ logging.info(
1159
+ f"Adding {len(filtered_test_files)} tests to pending test list"
1160
+ )
958
1161
 
959
1162
  # Use pipeline for efficient bulk operations
960
1163
  pipeline = github_event_conn.pipeline()
@@ -977,7 +1180,9 @@ def process_self_contained_coordinator_stream(
977
1180
  pipeline.expire(stream_test_list_pending, REDIS_BINS_EXPIRE_SECS)
978
1181
  pipeline.execute()
979
1182
 
980
- logging.info(f"Successfully added {len(test_names_added)} tests to pending test list in key {stream_test_list_pending}")
1183
+ logging.info(
1184
+ f"Successfully added {len(test_names_added)} tests to pending test list in key {stream_test_list_pending}"
1185
+ )
981
1186
 
982
1187
  pending_tests = len(filtered_test_files)
983
1188
  failed_tests = 0
@@ -1335,25 +1540,111 @@ def process_self_contained_coordinator_stream(
1335
1540
  )
1336
1541
  # run the benchmark
1337
1542
  benchmark_start_time = datetime.datetime.now()
1543
+
1544
+ # Calculate container timeout
1545
+ container_timeout = 300 # 5 minutes default
1546
+ buffer_timeout = 60 # Default buffer
1547
+
1548
+ # Try to extract test time from command and add buffer
1549
+ import re
1550
+
1551
+ test_time_match = re.search(
1552
+ r"--?test-time[=\s]+(\d+)", benchmark_command_str
1553
+ )
1554
+ if test_time_match:
1555
+ test_time = int(test_time_match.group(1))
1556
+ container_timeout = test_time + buffer_timeout
1557
+ logging.info(
1558
+ f"Set container timeout to {container_timeout}s (test-time: {test_time}s + {buffer_timeout}s buffer)"
1559
+ )
1560
+ else:
1561
+ logging.info(
1562
+ f"Using default container timeout: {container_timeout}s"
1563
+ )
1564
+
1338
1565
  try:
1339
- client_container_stdout = (
1340
- docker_client.containers.run(
1341
- image=client_container_image,
1342
- volumes={
1343
- temporary_dir_client: {
1344
- "bind": client_mnt_point,
1345
- "mode": "rw",
1346
- },
1566
+ # Start container with detach=True to enable timeout handling
1567
+ container = docker_client.containers.run(
1568
+ image=client_container_image,
1569
+ volumes={
1570
+ temporary_dir_client: {
1571
+ "bind": client_mnt_point,
1572
+ "mode": "rw",
1347
1573
  },
1348
- auto_remove=True,
1349
- privileged=True,
1350
- working_dir=benchmark_tool_workdir,
1351
- command=benchmark_command_str,
1352
- network_mode="host",
1353
- detach=False,
1354
- cpuset_cpus=client_cpuset_cpus,
1355
- )
1574
+ },
1575
+ auto_remove=False, # Don't auto-remove so we can get logs if timeout
1576
+ privileged=True,
1577
+ working_dir=benchmark_tool_workdir,
1578
+ command=benchmark_command_str,
1579
+ network_mode="host",
1580
+ detach=True, # Detach to enable timeout
1581
+ cpuset_cpus=client_cpuset_cpus,
1356
1582
  )
1583
+
1584
+ logging.info(
1585
+ f"Started container {container.name} ({container.id[:12]}) with {container_timeout}s timeout"
1586
+ )
1587
+
1588
+ # Wait for container with timeout
1589
+ try:
1590
+ result = container.wait(
1591
+ timeout=container_timeout
1592
+ )
1593
+ client_container_stdout = container.logs(
1594
+ stdout=True, stderr=False
1595
+ ).decode("utf-8")
1596
+ container_stderr = container.logs(
1597
+ stdout=False, stderr=True
1598
+ ).decode("utf-8")
1599
+
1600
+ # Check exit code
1601
+ if result["StatusCode"] != 0:
1602
+ logging.error(
1603
+ f"Container exited with code {result['StatusCode']}"
1604
+ )
1605
+ logging.error(
1606
+ f"Container stderr: {container_stderr}"
1607
+ )
1608
+ raise docker.errors.ContainerError(
1609
+ container,
1610
+ result["StatusCode"],
1611
+ benchmark_command_str,
1612
+ client_container_stdout,
1613
+ container_stderr,
1614
+ )
1615
+
1616
+ logging.info(
1617
+ f"Container {container.name} completed successfully"
1618
+ )
1619
+
1620
+ except Exception as timeout_error:
1621
+ if "timeout" in str(timeout_error).lower():
1622
+ logging.error(
1623
+ f"Container {container.name} timed out after {container_timeout}s"
1624
+ )
1625
+ # Get logs before killing
1626
+ try:
1627
+ timeout_logs = container.logs(
1628
+ stdout=True, stderr=True
1629
+ ).decode("utf-8")
1630
+ logging.error(
1631
+ f"Container logs before timeout: {timeout_logs}"
1632
+ )
1633
+ except:
1634
+ pass
1635
+ # Kill the container
1636
+ container.kill()
1637
+ raise Exception(
1638
+ f"Container timed out after {container_timeout} seconds"
1639
+ )
1640
+ else:
1641
+ raise timeout_error
1642
+ finally:
1643
+ # Clean up container
1644
+ try:
1645
+ container.remove(force=True)
1646
+ except:
1647
+ pass
1357
1648
  except docker.errors.ContainerError as e:
1358
1649
  logging.info(
1359
1650
  "stdout: {}".format(
@@ -2117,22 +2408,92 @@ def data_prepopulation_step(
2117
2408
  # run the benchmark
2118
2409
  preload_start_time = datetime.datetime.now()
2119
2410
 
2120
- client_container_stdout = docker_client.containers.run(
2121
- image=preload_image,
2122
- volumes={
2123
- temporary_dir: {
2124
- "bind": client_mnt_point,
2125
- "mode": "rw",
2411
+ # Set preload timeout (preload can take longer than benchmarks)
2412
+ preload_timeout = 1800 # 30 minutes default for data loading
2413
+ logging.info(f"Starting preload container with {preload_timeout}s timeout")
2414
+
2415
+ try:
2416
+ # Start container with detach=True to enable timeout handling
2417
+ container = docker_client.containers.run(
2418
+ image=preload_image,
2419
+ volumes={
2420
+ temporary_dir: {
2421
+ "bind": client_mnt_point,
2422
+ "mode": "rw",
2423
+ },
2126
2424
  },
2127
- },
2128
- auto_remove=True,
2129
- privileged=True,
2130
- working_dir=benchmark_tool_workdir,
2131
- command=preload_command_str,
2132
- network_mode="host",
2133
- detach=False,
2134
- cpuset_cpus=client_cpuset_cpus,
2135
- )
2425
+ auto_remove=False, # Don't auto-remove so we can get logs if timeout
2426
+ privileged=True,
2427
+ working_dir=benchmark_tool_workdir,
2428
+ command=preload_command_str,
2429
+ network_mode="host",
2430
+ detach=True, # Detach to enable timeout
2431
+ cpuset_cpus=client_cpuset_cpus,
2432
+ )
2433
+
2434
+ logging.info(
2435
+ f"Started preload container {container.name} ({container.id[:12]}) with {preload_timeout}s timeout"
2436
+ )
2437
+
2438
+ # Wait for container with timeout
2439
+ try:
2440
+ result = container.wait(timeout=preload_timeout)
2441
+ client_container_stdout = container.logs(
2442
+ stdout=True, stderr=False
2443
+ ).decode("utf-8")
2444
+ container_stderr = container.logs(stdout=False, stderr=True).decode(
2445
+ "utf-8"
2446
+ )
2447
+
2448
+ # Check exit code
2449
+ if result["StatusCode"] != 0:
2450
+ logging.error(
2451
+ f"Preload container exited with code {result['StatusCode']}"
2452
+ )
2453
+ logging.error(f"Preload container stderr: {container_stderr}")
2454
+ raise docker.errors.ContainerError(
2455
+ container,
2456
+ result["StatusCode"],
2457
+ preload_command_str,
2458
+ client_container_stdout,
2459
+ container_stderr,
2460
+ )
2461
+
2462
+ logging.info(
2463
+ f"Preload container {container.name} completed successfully"
2464
+ )
2465
+
2466
+ except Exception as timeout_error:
2467
+ if "timeout" in str(timeout_error).lower():
2468
+ logging.error(
2469
+ f"Preload container {container.name} timed out after {preload_timeout}s"
2470
+ )
2471
+ # Get logs before killing
2472
+ try:
2473
+ timeout_logs = container.logs(stdout=True, stderr=True).decode(
2474
+ "utf-8"
2475
+ )
2476
+ logging.error(
2477
+ f"Preload container logs before timeout: {timeout_logs}"
2478
+ )
2479
+ except:
2480
+ pass
2481
+ # Kill the container
2482
+ container.kill()
2483
+ raise Exception(
2484
+ f"Preload container timed out after {preload_timeout} seconds"
2485
+ )
2486
+ else:
2487
+ raise timeout_error
2488
+ finally:
2489
+ # Clean up container
2490
+ try:
2491
+ container.remove(force=True)
2492
+ except:
2493
+ pass
2494
+ except Exception as e:
2495
+ logging.error(f"Preload container failed: {e}")
2496
+ raise e
2136
2497
 
2137
2498
  preload_end_time = datetime.datetime.now()
2138
2499
  preload_duration_seconds = calculate_client_tool_duration_and_check(
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: redis-benchmarks-specification
3
- Version: 0.1.332
3
+ Version: 0.1.334
4
4
  Summary: The Redis benchmarks specification describes the cross-language/tools requirements and expectations to foster performance and observability standards around redis related technologies. Members from both industry and academia, including organizations and individuals are encouraged to contribute.
5
5
  Author: filipecosta90
6
6
  Author-email: filipecosta.90@gmail.com
@@ -26,7 +26,7 @@ redis_benchmarks_specification/__init__.py,sha256=YQIEx2sLPPA0JR9OuCuMNMNtm-f_gq
26
26
  redis_benchmarks_specification/__runner__/__init__.py,sha256=l-G1z-t6twUgi8QLueqoTQLvJmv3hJoEYskGm6H7L6M,83
27
27
  redis_benchmarks_specification/__runner__/args.py,sha256=K3VGmBC0-9lSv9H6VDp0N-6FGMWvc_4H0pG_TOXN5u8,11312
28
28
  redis_benchmarks_specification/__runner__/remote_profiling.py,sha256=R7obNQju8mmY9oKkcndjI4aAuxi84OCLhDSqqaYu1SU,18610
29
- redis_benchmarks_specification/__runner__/runner.py,sha256=-BDFxOLgkFe4LvVX1FnqmuszuyRMR8AJZW0SvPX0utw,155496
29
+ redis_benchmarks_specification/__runner__/runner.py,sha256=V6PnV5Tt2qupLcfJg0yhj_AqI5pIzFj-0EXc_JtYo84,156096
30
30
  redis_benchmarks_specification/__self_contained_coordinator__/__init__.py,sha256=l-G1z-t6twUgi8QLueqoTQLvJmv3hJoEYskGm6H7L6M,83
31
31
  redis_benchmarks_specification/__self_contained_coordinator__/args.py,sha256=1LePhRkDsoMPFclM_DoXBIoMBN8zcVoQMnm9wTK5Uqw,6961
32
32
  redis_benchmarks_specification/__self_contained_coordinator__/artifacts.py,sha256=OVHqJzDgeSSRfUSiKp1ZTAVv14PvSbk-5yJsAAoUfpw,936
@@ -37,7 +37,7 @@ redis_benchmarks_specification/__self_contained_coordinator__/docker.py,sha256=0
37
37
  redis_benchmarks_specification/__self_contained_coordinator__/post_processing.py,sha256=sVLKNnWdAqYY9DjVdqRC5tDaIrVSaI3Ca7w8-DQ-LRM,776
38
38
  redis_benchmarks_specification/__self_contained_coordinator__/prepopulation.py,sha256=1UeFr2T1ZQBcHCSd4W1ZtaWgXyFPfjLyDi_DgDc1eTA,2957
39
39
  redis_benchmarks_specification/__self_contained_coordinator__/runners.py,sha256=F11zO_ILnpmiVwTeCQnP5nDHQk3kNnajPftwKsbhlXE,30209
40
- redis_benchmarks_specification/__self_contained_coordinator__/self_contained_coordinator.py,sha256=3QOuaaNJ1HQl4Ld4ZtgweTu2P0Hu-l6cgp3GhGpU8rw,95332
40
+ redis_benchmarks_specification/__self_contained_coordinator__/self_contained_coordinator.py,sha256=2QdjjUyGB-GCyyJtSe984GY634i3qE019qWCnDBuI0U,111825
41
41
  redis_benchmarks_specification/__setups__/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
42
42
  redis_benchmarks_specification/__setups__/topologies.py,sha256=xQ1IJkcTji_ZjLiJd3vOxZpvbNtBLZw9cPkw5hGJKHU,481
43
43
  redis_benchmarks_specification/__spec__/__init__.py,sha256=l-G1z-t6twUgi8QLueqoTQLvJmv3hJoEYskGm6H7L6M,83
@@ -282,8 +282,8 @@ redis_benchmarks_specification/test-suites/memtier_benchmark-playbook-session-st
282
282
  redis_benchmarks_specification/test-suites/memtier_benchmark-playbook-session-storage-1k-sessions.yml,sha256=2egtIxPxCze2jlbAfgsk4v9JSQHNMoPLbDWFEW8olDg,7006
283
283
  redis_benchmarks_specification/test-suites/template.txt,sha256=ezqGiRPOvuSDO0iG7GEf-AGXNfHbgXI89_G0RUEzL88,481
284
284
  redis_benchmarks_specification/vector-search-test-suites/vector_db_benchmark_test.yml,sha256=PD7ow-k4Ll2BkhEC3aIqiaCZt8Hc4aJIp96Lw3J3mcI,791
285
- redis_benchmarks_specification-0.1.332.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
286
- redis_benchmarks_specification-0.1.332.dist-info/METADATA,sha256=Y4eicJqinSR2jRYOPGMTUcn2sMXqW6XHhLJRDdDz4zc,22768
287
- redis_benchmarks_specification-0.1.332.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
288
- redis_benchmarks_specification-0.1.332.dist-info/entry_points.txt,sha256=x5WBXCZsnDRTZxV7SBGmC65L2k-ygdDOxV8vuKN00Nk,715
289
- redis_benchmarks_specification-0.1.332.dist-info/RECORD,,
285
+ redis_benchmarks_specification-0.1.334.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
286
+ redis_benchmarks_specification-0.1.334.dist-info/METADATA,sha256=dE2ReU_PQYen1kcQ2YRh5AfPmkb1Hrx-j15Qj2vt508,22768
287
+ redis_benchmarks_specification-0.1.334.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
288
+ redis_benchmarks_specification-0.1.334.dist-info/entry_points.txt,sha256=x5WBXCZsnDRTZxV7SBGmC65L2k-ygdDOxV8vuKN00Nk,715
289
+ redis_benchmarks_specification-0.1.334.dist-info/RECORD,,