redis-benchmarks-specification 0.2.19__py3-none-any.whl → 0.2.21__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of redis-benchmarks-specification might be problematic. Click here for more details.

@@ -139,9 +139,13 @@ def extract_results_table(
139
139
 
140
140
  # Debug logging for JSON path resolution
141
141
  if "ALL STATS" in metric_jsonpath or "ALL_STATS" in metric_jsonpath:
142
- logging.info(f"DEBUG: Found {len(find_res)} results for JSONPath '{metric_jsonpath}'")
142
+ logging.info(
143
+ f"DEBUG: Found {len(find_res)} results for JSONPath '{metric_jsonpath}'"
144
+ )
143
145
  for i, result in enumerate(find_res):
144
- logging.info(f" Result {i}: path='{result.path}', value={result.value}, context='{result.context.path}'")
146
+ logging.info(
147
+ f" Result {i}: path='{result.path}', value={result.value}, context='{result.context.path}'"
148
+ )
145
149
 
146
150
  for metric in find_res:
147
151
  metric_name = str(metric.path)
@@ -209,11 +213,17 @@ def extract_results_table(
209
213
  )
210
214
  # Debug logging for missing metrics - show available keys
211
215
  if "ALL STATS" in str(jsonpath) or "ALL_STATS" in str(jsonpath):
212
- logging.info(f"DEBUG: Available top-level keys in results_dict: {list(results_dict.keys())}")
216
+ logging.info(
217
+ f"DEBUG: Available top-level keys in results_dict: {list(results_dict.keys())}"
218
+ )
213
219
  if "ALL STATS" in results_dict:
214
- logging.info(f"DEBUG: Keys in 'ALL STATS': {list(results_dict['ALL STATS'].keys())}")
220
+ logging.info(
221
+ f"DEBUG: Keys in 'ALL STATS': {list(results_dict['ALL STATS'].keys())}"
222
+ )
215
223
  if "ALL_STATS" in results_dict:
216
- logging.info(f"DEBUG: Keys in 'ALL_STATS': {list(results_dict['ALL_STATS'].keys())}")
224
+ logging.info(
225
+ f"DEBUG: Keys in 'ALL_STATS': {list(results_dict['ALL_STATS'].keys())}"
226
+ )
217
227
  return results_matrix
218
228
 
219
229
 
@@ -422,9 +432,13 @@ def common_timeseries_extraction(
422
432
  use_metric_context_path = cleaned_metric[5]
423
433
 
424
434
  # Debug logging for metric extraction
425
- logging.info(f"Extracted metric - JSONPath: '{metric_jsonpath}', Name: '{metric_name}', Value: {metric_value}")
435
+ logging.info(
436
+ f"Extracted metric - JSONPath: '{metric_jsonpath}', Name: '{metric_name}', Value: {metric_value}"
437
+ )
426
438
  if "ALL_STATS.Totals.Ops/sec" in metric_name or "ALL STATS" in metric_jsonpath:
427
- logging.warning(f"DEBUG ALL_STATS metric - JSONPath: '{metric_jsonpath}', Name: '{metric_name}', Value: {metric_value}, Context: '{metric_context_path}')")
439
+ logging.warning(
440
+ f"DEBUG ALL_STATS metric - JSONPath: '{metric_jsonpath}', Name: '{metric_name}', Value: {metric_value}, Context: '{metric_context_path}')"
441
+ )
428
442
 
429
443
  target_table_keyname, target_table_dict = from_metric_kv_to_timeserie(
430
444
  break_by_key,
@@ -86,7 +86,7 @@ def spin_docker_standalone_redis(
86
86
  ceil_db_cpu_limit, current_cpu_pos
87
87
  )
88
88
  # Calculate nano_cpus for better CPU distribution
89
- redis_cpu_count = len(db_cpuset_cpus.split(','))
89
+ redis_cpu_count = len(db_cpuset_cpus.split(","))
90
90
  redis_nano_cpus = int(redis_cpu_count * 1e9) # 1 CPU = 1e9 nano_cpus
91
91
 
92
92
  logging.info(
@@ -94,7 +94,9 @@ def spin_docker_standalone_redis(
94
94
  run_image, db_cpuset_cpus, command_str
95
95
  )
96
96
  )
97
- logging.info(f"Redis container will use {redis_cpu_count} CPUs (nano_cpus={redis_nano_cpus}) on cores {db_cpuset_cpus}")
97
+ logging.info(
98
+ f"Redis container will use {redis_cpu_count} CPUs (nano_cpus={redis_nano_cpus}) on cores {db_cpuset_cpus}"
99
+ )
98
100
 
99
101
  container = docker_client.containers.run(
100
102
  image=run_image,
@@ -109,7 +111,7 @@ def spin_docker_standalone_redis(
109
111
  detach=True,
110
112
  cpuset_cpus=db_cpuset_cpus,
111
113
  nano_cpus=redis_nano_cpus, # Force CPU distribution
112
- # pid_mode="host",
114
+ # pid_mode="host",
113
115
  )
114
116
  redis_containers.append(container)
115
117
  return current_cpu_pos
@@ -68,10 +68,12 @@ def data_prepopulation_step(
68
68
  preload_start_time = datetime.datetime.now()
69
69
 
70
70
  # Calculate nano_cpus for better CPU distribution
71
- preload_cpu_count = len(client_cpuset_cpus.split(','))
71
+ preload_cpu_count = len(client_cpuset_cpus.split(","))
72
72
  preload_nano_cpus = int(preload_cpu_count * 1e9) # 1 CPU = 1e9 nano_cpus
73
73
 
74
- logging.info(f"Preload container will use {preload_cpu_count} CPUs (nano_cpus={preload_nano_cpus}) on cores {client_cpuset_cpus}")
74
+ logging.info(
75
+ f"Preload container will use {preload_cpu_count} CPUs (nano_cpus={preload_nano_cpus}) on cores {client_cpuset_cpus}"
76
+ )
75
77
 
76
78
  client_container_stdout = docker_client.containers.run(
77
79
  image=preload_image,
@@ -541,10 +541,14 @@ def process_self_contained_coordinator_stream(
541
541
  benchmark_start_time = datetime.datetime.now()
542
542
 
543
543
  # Calculate nano_cpus for better CPU distribution
544
- client_cpu_count = len(client_cpuset_cpus.split(','))
545
- client_nano_cpus = int(client_cpu_count * 1e9) # 1 CPU = 1e9 nano_cpus
544
+ client_cpu_count = len(client_cpuset_cpus.split(","))
545
+ client_nano_cpus = int(
546
+ client_cpu_count * 1e9
547
+ ) # 1 CPU = 1e9 nano_cpus
546
548
 
547
- logging.info(f"Client container will use {client_cpu_count} CPUs (nano_cpus={client_nano_cpus}) on cores {client_cpuset_cpus}")
549
+ logging.info(
550
+ f"Client container will use {client_cpu_count} CPUs (nano_cpus={client_nano_cpus}) on cores {client_cpuset_cpus}"
551
+ )
548
552
 
549
553
  client_container_stdout = docker_client.containers.run(
550
554
  image=client_container_image,
@@ -1408,7 +1408,9 @@ def process_self_contained_coordinator_stream(
1408
1408
  run_image,
1409
1409
  temporary_dir,
1410
1410
  auto_remove=False,
1411
- enable_cpu_distribution=args.enable_cpu_distribution if args else False,
1411
+ enable_cpu_distribution=(
1412
+ getattr(args, 'enable_cpu_distribution', False) if args else False
1413
+ ),
1412
1414
  )
1413
1415
 
1414
1416
  r = redis.StrictRedis(
@@ -1622,21 +1624,37 @@ def process_self_contained_coordinator_stream(
1622
1624
  }
1623
1625
 
1624
1626
  # Add CPU distribution settings if enabled
1625
- enable_cpu_distribution = args.enable_cpu_distribution if args else False
1627
+ enable_cpu_distribution = (
1628
+ getattr(args, 'enable_cpu_distribution', False) if args else False
1629
+ )
1626
1630
  if enable_cpu_distribution:
1627
- client_cpu_count = len(client_cpuset_cpus.split(','))
1628
- client_nano_cpus = int(client_cpu_count * 1e9) # 1 CPU = 1e9 nano_cpus
1631
+ client_cpu_count = len(
1632
+ client_cpuset_cpus.split(",")
1633
+ )
1634
+ client_nano_cpus = int(
1635
+ client_cpu_count * 1e9
1636
+ ) # 1 CPU = 1e9 nano_cpus
1629
1637
 
1630
- client_container_args["cpuset_cpus"] = client_cpuset_cpus
1631
- client_container_args["nano_cpus"] = client_nano_cpus
1638
+ client_container_args["cpuset_cpus"] = (
1639
+ client_cpuset_cpus
1640
+ )
1641
+ client_container_args["nano_cpus"] = (
1642
+ client_nano_cpus
1643
+ )
1632
1644
 
1633
- logging.info(f"Client container will use {client_cpu_count} CPUs (nano_cpus={client_nano_cpus}) on cores {client_cpuset_cpus} [CPU distribution enabled]")
1645
+ logging.info(
1646
+ f"Client container will use {client_cpu_count} CPUs (nano_cpus={client_nano_cpus}) on cores {client_cpuset_cpus} [CPU distribution enabled]"
1647
+ )
1634
1648
  else:
1635
- logging.info(f"Client container will use default CPU allocation [CPU distribution disabled]")
1649
+ logging.info(
1650
+ f"Client container will use default CPU allocation [CPU distribution disabled]"
1651
+ )
1636
1652
 
1637
1653
  try:
1638
1654
  # Start container with detach=True to enable timeout handling
1639
- container = docker_client.containers.run(**client_container_args)
1655
+ container = docker_client.containers.run(
1656
+ **client_container_args
1657
+ )
1640
1658
 
1641
1659
  logging.info(
1642
1660
  f"Started container {container.name} ({container.id[:12]}) with {container_timeout}s timeout"
@@ -1645,31 +1663,62 @@ def process_self_contained_coordinator_stream(
1645
1663
  # Apply CPU affinity using taskset if CPU distribution is enabled
1646
1664
  if enable_cpu_distribution:
1647
1665
  try:
1648
- container_info = docker_client.api.inspect_container(container.id)
1649
- container_pid = container_info['State']['Pid']
1666
+ container_info = (
1667
+ docker_client.api.inspect_container(
1668
+ container.id
1669
+ )
1670
+ )
1671
+ container_pid = container_info["State"][
1672
+ "Pid"
1673
+ ]
1650
1674
 
1651
- logging.info(f"Setting CPU affinity for client container PID {container_pid} to cores {client_cpuset_cpus}")
1675
+ logging.info(
1676
+ f"Setting CPU affinity for client container PID {container_pid} to cores {client_cpuset_cpus}"
1677
+ )
1652
1678
 
1653
1679
  # Set CPU affinity for the main process and all its threads
1654
- subprocess.run(f"taskset -cp {client_cpuset_cpus} {container_pid}", shell=True, check=True)
1680
+ subprocess.run(
1681
+ f"taskset -cp {client_cpuset_cpus} {container_pid}",
1682
+ shell=True,
1683
+ check=True,
1684
+ )
1655
1685
 
1656
1686
  # Wait a moment for client to start its threads, then set affinity for all child processes
1657
1687
  time.sleep(1)
1658
- result = subprocess.run(f"pgrep -P {container_pid}", shell=True, capture_output=True, text=True)
1688
+ result = subprocess.run(
1689
+ f"pgrep -P {container_pid}",
1690
+ shell=True,
1691
+ capture_output=True,
1692
+ text=True,
1693
+ )
1659
1694
  if result.returncode == 0:
1660
- child_pids = result.stdout.strip().split('\n')
1695
+ child_pids = (
1696
+ result.stdout.strip().split("\n")
1697
+ )
1661
1698
  for child_pid in child_pids:
1662
1699
  if child_pid.strip():
1663
1700
  try:
1664
- subprocess.run(f"taskset -cp {client_cpuset_cpus} {child_pid.strip()}", shell=True, check=True)
1665
- logging.info(f"Set CPU affinity for client child process {child_pid.strip()}")
1666
- except subprocess.CalledProcessError:
1701
+ subprocess.run(
1702
+ f"taskset -cp {client_cpuset_cpus} {child_pid.strip()}",
1703
+ shell=True,
1704
+ check=True,
1705
+ )
1706
+ logging.info(
1707
+ f"Set CPU affinity for client child process {child_pid.strip()}"
1708
+ )
1709
+ except (
1710
+ subprocess.CalledProcessError
1711
+ ):
1667
1712
  pass # Child process may have exited
1668
1713
 
1669
- logging.info(f"✅ Applied CPU affinity to client container and all child processes")
1714
+ logging.info(
1715
+ f"✅ Applied CPU affinity to client container and all child processes"
1716
+ )
1670
1717
 
1671
1718
  except Exception as e:
1672
- logging.warning(f"Failed to set CPU affinity for client container: {e}")
1719
+ logging.warning(
1720
+ f"Failed to set CPU affinity for client container: {e}"
1721
+ )
1673
1722
 
1674
1723
  # Wait for container with timeout
1675
1724
  try:
@@ -2313,15 +2362,19 @@ def start_redis_container(
2313
2362
 
2314
2363
  # Add CPU distribution settings if enabled
2315
2364
  if enable_cpu_distribution:
2316
- redis_cpu_count = len(db_cpuset_cpus.split(','))
2365
+ redis_cpu_count = len(db_cpuset_cpus.split(","))
2317
2366
  redis_nano_cpus = int(redis_cpu_count * 1e9) # 1 CPU = 1e9 nano_cpus
2318
2367
 
2319
2368
  container_args["cpuset_cpus"] = db_cpuset_cpus
2320
2369
  container_args["nano_cpus"] = redis_nano_cpus
2321
2370
 
2322
- logging.info(f"Redis container will use {redis_cpu_count} CPUs (nano_cpus={redis_nano_cpus}) on cores {db_cpuset_cpus} [CPU distribution enabled]")
2371
+ logging.info(
2372
+ f"Redis container will use {redis_cpu_count} CPUs (nano_cpus={redis_nano_cpus}) on cores {db_cpuset_cpus} [CPU distribution enabled]"
2373
+ )
2323
2374
  else:
2324
- logging.info(f"Redis container will use default CPU allocation [CPU distribution disabled]")
2375
+ logging.info(
2376
+ f"Redis container will use default CPU allocation [CPU distribution disabled]"
2377
+ )
2325
2378
 
2326
2379
  redis_container = docker_client.containers.run(**container_args)
2327
2380
  time.sleep(5)
@@ -2330,27 +2383,41 @@ def start_redis_container(
2330
2383
  if enable_cpu_distribution:
2331
2384
  try:
2332
2385
  container_info = docker_client.api.inspect_container(redis_container.id)
2333
- container_pid = container_info['State']['Pid']
2386
+ container_pid = container_info["State"]["Pid"]
2334
2387
 
2335
- logging.info(f"Setting CPU affinity for Redis container PID {container_pid} to cores {db_cpuset_cpus}")
2388
+ logging.info(
2389
+ f"Setting CPU affinity for Redis container PID {container_pid} to cores {db_cpuset_cpus}"
2390
+ )
2336
2391
 
2337
2392
  # Set CPU affinity for the main Redis process and all its threads
2338
- subprocess.run(f"taskset -cp {db_cpuset_cpus} {container_pid}", shell=True, check=True)
2393
+ subprocess.run(
2394
+ f"taskset -cp {db_cpuset_cpus} {container_pid}", shell=True, check=True
2395
+ )
2339
2396
 
2340
2397
  # Wait a moment for Redis to start its IO threads, then set affinity for all Redis processes
2341
2398
  time.sleep(2)
2342
- result = subprocess.run(f"pgrep -P {container_pid}", shell=True, capture_output=True, text=True)
2399
+ result = subprocess.run(
2400
+ f"pgrep -P {container_pid}", shell=True, capture_output=True, text=True
2401
+ )
2343
2402
  if result.returncode == 0:
2344
- child_pids = result.stdout.strip().split('\n')
2403
+ child_pids = result.stdout.strip().split("\n")
2345
2404
  for child_pid in child_pids:
2346
2405
  if child_pid.strip():
2347
2406
  try:
2348
- subprocess.run(f"taskset -cp {db_cpuset_cpus} {child_pid.strip()}", shell=True, check=True)
2349
- logging.info(f"Set CPU affinity for Redis child process {child_pid.strip()}")
2407
+ subprocess.run(
2408
+ f"taskset -cp {db_cpuset_cpus} {child_pid.strip()}",
2409
+ shell=True,
2410
+ check=True,
2411
+ )
2412
+ logging.info(
2413
+ f"Set CPU affinity for Redis child process {child_pid.strip()}"
2414
+ )
2350
2415
  except subprocess.CalledProcessError:
2351
2416
  pass # Child process may have exited
2352
2417
 
2353
- logging.info(f"✅ Applied CPU affinity to Redis container and all child processes")
2418
+ logging.info(
2419
+ f"✅ Applied CPU affinity to Redis container and all child processes"
2420
+ )
2354
2421
 
2355
2422
  except Exception as e:
2356
2423
  logging.warning(f"Failed to set CPU affinity for Redis container: {e}")
@@ -2561,17 +2628,21 @@ def data_prepopulation_step(
2561
2628
  }
2562
2629
 
2563
2630
  # Add CPU distribution settings if enabled
2564
- enable_cpu_distribution = args.enable_cpu_distribution if args else False
2631
+ enable_cpu_distribution = getattr(args, 'enable_cpu_distribution', False) if args else False
2565
2632
  if enable_cpu_distribution:
2566
- preload_cpu_count = len(client_cpuset_cpus.split(','))
2633
+ preload_cpu_count = len(client_cpuset_cpus.split(","))
2567
2634
  preload_nano_cpus = int(preload_cpu_count * 1e9) # 1 CPU = 1e9 nano_cpus
2568
2635
 
2569
2636
  preload_container_args["cpuset_cpus"] = client_cpuset_cpus
2570
2637
  preload_container_args["nano_cpus"] = preload_nano_cpus
2571
2638
 
2572
- logging.info(f"Preload container will use {preload_cpu_count} CPUs (nano_cpus={preload_nano_cpus}) on cores {client_cpuset_cpus} [CPU distribution enabled]")
2639
+ logging.info(
2640
+ f"Preload container will use {preload_cpu_count} CPUs (nano_cpus={preload_nano_cpus}) on cores {client_cpuset_cpus} [CPU distribution enabled]"
2641
+ )
2573
2642
  else:
2574
- logging.info(f"Preload container will use default CPU allocation [CPU distribution disabled]")
2643
+ logging.info(
2644
+ f"Preload container will use default CPU allocation [CPU distribution disabled]"
2645
+ )
2575
2646
 
2576
2647
  try:
2577
2648
  # Start container with detach=True to enable timeout handling
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: redis-benchmarks-specification
3
- Version: 0.2.19
3
+ Version: 0.2.21
4
4
  Summary: The Redis benchmarks specification describes the cross-language/tools requirements and expectations to foster performance and observability standards around redis related technologies. Members from both industry and academia, including organizations and individuals are encouraged to contribute.
5
5
  Author: filipecosta90
6
6
  Author-email: filipecosta.90@gmail.com
@@ -18,7 +18,7 @@ redis_benchmarks_specification/__common__/package.py,sha256=4uVt1BAZ999LV2rZkq--
18
18
  redis_benchmarks_specification/__common__/runner.py,sha256=TKMUFJ3nLSfmSU7P_ok9oM5-pI4L4tFxsWLUWaUHhbI,16733
19
19
  redis_benchmarks_specification/__common__/spec.py,sha256=D_SN48wg6NMthW_-OS1H5bydSDiuZpfd4WPPj7Vfwmc,5760
20
20
  redis_benchmarks_specification/__common__/suppress_warnings.py,sha256=xpOjJ_piGYWlGq9ITr-ZwSCl2GpreA9juZIBao4fDRs,691
21
- redis_benchmarks_specification/__common__/timeseries.py,sha256=EzNTts2aUS_GoEGaMcl1wpKkFWQ1txseHUfuNGSaSqU,55739
21
+ redis_benchmarks_specification/__common__/timeseries.py,sha256=HxCLQmS1JCaxympxMxMk92HHnmfKehIigZ6GZZWHoLw,56045
22
22
  redis_benchmarks_specification/__compare__/__init__.py,sha256=DtBXRp0Q01XgCFmY-1OIePMyyYihVNAjZ1Y8zwqSDN0,101
23
23
  redis_benchmarks_specification/__compare__/args.py,sha256=CNtA7pI9CJDTBJPGL2pNVfis7VDdxLautwRyka7oUCI,8911
24
24
  redis_benchmarks_specification/__compare__/compare.py,sha256=_AbuV3FZxtUZIdq4qq24LNzPNIdtQQaqrk8bUjn9blk,84327
@@ -33,11 +33,11 @@ redis_benchmarks_specification/__self_contained_coordinator__/artifacts.py,sha25
33
33
  redis_benchmarks_specification/__self_contained_coordinator__/build_info.py,sha256=vlg8H8Rxu2falW8xp1GvL1SV1fyBguSbz6Apxc7A2yM,2282
34
34
  redis_benchmarks_specification/__self_contained_coordinator__/clients.py,sha256=EL1V4-i-tTav1mcF_CUosqPF3Q1qi9BZL0zFajEk70c,1878
35
35
  redis_benchmarks_specification/__self_contained_coordinator__/cpuset.py,sha256=sRvtoJIitppcOpm3R5LbVmSfPEAqPumOqVATnF5Wbek,594
36
- redis_benchmarks_specification/__self_contained_coordinator__/docker.py,sha256=eeRv7GzDVfRDijc6ucjMIO-SNGThE7rTKEBTm_dtN9s,3526
36
+ redis_benchmarks_specification/__self_contained_coordinator__/docker.py,sha256=TuJrnijaK_5Sw9GVL6vi9H1m7f9p3g9c1f7kpU_RHuM,3542
37
37
  redis_benchmarks_specification/__self_contained_coordinator__/post_processing.py,sha256=sVLKNnWdAqYY9DjVdqRC5tDaIrVSaI3Ca7w8-DQ-LRM,776
38
- redis_benchmarks_specification/__self_contained_coordinator__/prepopulation.py,sha256=VYGwUiIV382AjXTeYTOImPENzw2zf2VPQM2HvaZgE_M,3368
39
- redis_benchmarks_specification/__self_contained_coordinator__/runners.py,sha256=kpSNvliGOEBUa8aXXk9M1rb5jKmBI6QqC0znf2lz0kw,34110
40
- redis_benchmarks_specification/__self_contained_coordinator__/self_contained_coordinator.py,sha256=vi2hOOGpHBbIbePMVYCsEan08NbvpSknKw0rM3421l8,120185
38
+ redis_benchmarks_specification/__self_contained_coordinator__/prepopulation.py,sha256=30SU8dAKmRpznjAKbs1n0iHO-CjG5goCuR5TYx7XIMc,3390
39
+ redis_benchmarks_specification/__self_contained_coordinator__/runners.py,sha256=yHgRpNeuBcft6sEeiT12qQ012xXypXsVM8Ov91CE2k0,34234
40
+ redis_benchmarks_specification/__self_contained_coordinator__/self_contained_coordinator.py,sha256=4BoyeizeDnESrYmWt12loP5Z-KbSRm9F56hT7ILlg2A,122966
41
41
  redis_benchmarks_specification/__setups__/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
42
42
  redis_benchmarks_specification/__setups__/topologies.py,sha256=xQ1IJkcTji_ZjLiJd3vOxZpvbNtBLZw9cPkw5hGJKHU,481
43
43
  redis_benchmarks_specification/__spec__/__init__.py,sha256=l-G1z-t6twUgi8QLueqoTQLvJmv3hJoEYskGm6H7L6M,83
@@ -287,8 +287,8 @@ redis_benchmarks_specification/test-suites/memtier_benchmark-playbook-session-st
287
287
  redis_benchmarks_specification/test-suites/memtier_benchmark-playbook-session-storage-1k-sessions.yml,sha256=2egtIxPxCze2jlbAfgsk4v9JSQHNMoPLbDWFEW8olDg,7006
288
288
  redis_benchmarks_specification/test-suites/template.txt,sha256=ezqGiRPOvuSDO0iG7GEf-AGXNfHbgXI89_G0RUEzL88,481
289
289
  redis_benchmarks_specification/vector-search-test-suites/vector_db_benchmark_test.yml,sha256=PD7ow-k4Ll2BkhEC3aIqiaCZt8Hc4aJIp96Lw3J3mcI,791
290
- redis_benchmarks_specification-0.2.19.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
291
- redis_benchmarks_specification-0.2.19.dist-info/METADATA,sha256=FlUSC9vHASXRp0K8PmOJfZK0zr_hXwmtFG7kYBCk630,22767
292
- redis_benchmarks_specification-0.2.19.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
293
- redis_benchmarks_specification-0.2.19.dist-info/entry_points.txt,sha256=x5WBXCZsnDRTZxV7SBGmC65L2k-ygdDOxV8vuKN00Nk,715
294
- redis_benchmarks_specification-0.2.19.dist-info/RECORD,,
290
+ redis_benchmarks_specification-0.2.21.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
291
+ redis_benchmarks_specification-0.2.21.dist-info/METADATA,sha256=J3gaSNDYZZF8N-WqtCg3SsVnfavQHaBvepJAVvMypyY,22767
292
+ redis_benchmarks_specification-0.2.21.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
293
+ redis_benchmarks_specification-0.2.21.dist-info/entry_points.txt,sha256=x5WBXCZsnDRTZxV7SBGmC65L2k-ygdDOxV8vuKN00Nk,715
294
+ redis_benchmarks_specification-0.2.21.dist-info/RECORD,,