redis-benchmarks-specification 0.2.18__py3-none-any.whl → 0.2.19__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of redis-benchmarks-specification might be problematic. Click here for more details.
- redis_benchmarks_specification/__self_contained_coordinator__/args.py +6 -0
- redis_benchmarks_specification/__self_contained_coordinator__/self_contained_coordinator.py +136 -107
- redis_benchmarks_specification/setups/topologies/topologies.yml +9 -9
- {redis_benchmarks_specification-0.2.18.dist-info → redis_benchmarks_specification-0.2.19.dist-info}/METADATA +1 -1
- {redis_benchmarks_specification-0.2.18.dist-info → redis_benchmarks_specification-0.2.19.dist-info}/RECORD +8 -8
- {redis_benchmarks_specification-0.2.18.dist-info → redis_benchmarks_specification-0.2.19.dist-info}/LICENSE +0 -0
- {redis_benchmarks_specification-0.2.18.dist-info → redis_benchmarks_specification-0.2.19.dist-info}/WHEEL +0 -0
- {redis_benchmarks_specification-0.2.18.dist-info → redis_benchmarks_specification-0.2.19.dist-info}/entry_points.txt +0 -0
|
@@ -207,4 +207,10 @@ def create_self_contained_coordinator_args(project_name):
|
|
|
207
207
|
action="store_true",
|
|
208
208
|
help="Skip automatically clearing pending messages and resetting consumer group position on startup. By default, pending messages are cleared and consumer group is reset to latest position to skip old work and recover from crashes.",
|
|
209
209
|
)
|
|
210
|
+
parser.add_argument(
|
|
211
|
+
"--enable-cpu-distribution",
|
|
212
|
+
default=False,
|
|
213
|
+
action="store_true",
|
|
214
|
+
help="Enable CPU distribution optimization using Docker cpuset_cpus and nano_cpus parameters. This forces better CPU core distribution for Redis and client containers. Disabled by default.",
|
|
215
|
+
)
|
|
210
216
|
return parser
|
|
@@ -772,6 +772,7 @@ def main():
|
|
|
772
772
|
priority_upper_limit,
|
|
773
773
|
default_baseline_branch,
|
|
774
774
|
default_metrics_str,
|
|
775
|
+
args=args,
|
|
775
776
|
)
|
|
776
777
|
|
|
777
778
|
|
|
@@ -808,6 +809,7 @@ def self_contained_coordinator_blocking_read(
|
|
|
808
809
|
default_metrics_str="ALL_STATS.Totals.Ops/sec",
|
|
809
810
|
docker_keep_env=False,
|
|
810
811
|
restore_build_artifacts_default=True,
|
|
812
|
+
args=None,
|
|
811
813
|
):
|
|
812
814
|
num_process_streams = 0
|
|
813
815
|
num_process_test_suites = 0
|
|
@@ -1405,6 +1407,8 @@ def process_self_contained_coordinator_stream(
|
|
|
1405
1407
|
redis_containers,
|
|
1406
1408
|
run_image,
|
|
1407
1409
|
temporary_dir,
|
|
1410
|
+
auto_remove=False,
|
|
1411
|
+
enable_cpu_distribution=args.enable_cpu_distribution if args else False,
|
|
1408
1412
|
)
|
|
1409
1413
|
|
|
1410
1414
|
r = redis.StrictRedis(
|
|
@@ -1600,63 +1604,72 @@ def process_self_contained_coordinator_stream(
|
|
|
1600
1604
|
f"Using default container timeout: {container_timeout}s"
|
|
1601
1605
|
)
|
|
1602
1606
|
|
|
1603
|
-
#
|
|
1604
|
-
|
|
1605
|
-
|
|
1606
|
-
|
|
1607
|
-
|
|
1607
|
+
# Prepare client container arguments
|
|
1608
|
+
client_container_args = {
|
|
1609
|
+
"image": client_container_image,
|
|
1610
|
+
"volumes": {
|
|
1611
|
+
temporary_dir_client: {
|
|
1612
|
+
"bind": client_mnt_point,
|
|
1613
|
+
"mode": "rw",
|
|
1614
|
+
},
|
|
1615
|
+
},
|
|
1616
|
+
"auto_remove": False, # Don't auto-remove so we can get logs if timeout
|
|
1617
|
+
"privileged": True,
|
|
1618
|
+
"working_dir": benchmark_tool_workdir,
|
|
1619
|
+
"command": benchmark_command_str,
|
|
1620
|
+
"network_mode": "host",
|
|
1621
|
+
"detach": True, # Detach to enable timeout
|
|
1622
|
+
}
|
|
1623
|
+
|
|
1624
|
+
# Add CPU distribution settings if enabled
|
|
1625
|
+
enable_cpu_distribution = args.enable_cpu_distribution if args else False
|
|
1626
|
+
if enable_cpu_distribution:
|
|
1627
|
+
client_cpu_count = len(client_cpuset_cpus.split(','))
|
|
1628
|
+
client_nano_cpus = int(client_cpu_count * 1e9) # 1 CPU = 1e9 nano_cpus
|
|
1629
|
+
|
|
1630
|
+
client_container_args["cpuset_cpus"] = client_cpuset_cpus
|
|
1631
|
+
client_container_args["nano_cpus"] = client_nano_cpus
|
|
1632
|
+
|
|
1633
|
+
logging.info(f"Client container will use {client_cpu_count} CPUs (nano_cpus={client_nano_cpus}) on cores {client_cpuset_cpus} [CPU distribution enabled]")
|
|
1634
|
+
else:
|
|
1635
|
+
logging.info(f"Client container will use default CPU allocation [CPU distribution disabled]")
|
|
1608
1636
|
|
|
1609
1637
|
try:
|
|
1610
1638
|
# Start container with detach=True to enable timeout handling
|
|
1611
|
-
container = docker_client.containers.run(
|
|
1612
|
-
image=client_container_image,
|
|
1613
|
-
volumes={
|
|
1614
|
-
temporary_dir_client: {
|
|
1615
|
-
"bind": client_mnt_point,
|
|
1616
|
-
"mode": "rw",
|
|
1617
|
-
},
|
|
1618
|
-
},
|
|
1619
|
-
auto_remove=False, # Don't auto-remove so we can get logs if timeout
|
|
1620
|
-
privileged=True,
|
|
1621
|
-
working_dir=benchmark_tool_workdir,
|
|
1622
|
-
command=benchmark_command_str,
|
|
1623
|
-
network_mode="host",
|
|
1624
|
-
detach=True, # Detach to enable timeout
|
|
1625
|
-
cpuset_cpus=client_cpuset_cpus,
|
|
1626
|
-
nano_cpus=client_nano_cpus, # Force CPU distribution
|
|
1627
|
-
)
|
|
1639
|
+
container = docker_client.containers.run(**client_container_args)
|
|
1628
1640
|
|
|
1629
1641
|
logging.info(
|
|
1630
1642
|
f"Started container {container.name} ({container.id[:12]}) with {container_timeout}s timeout"
|
|
1631
1643
|
)
|
|
1632
1644
|
|
|
1633
|
-
# Apply CPU affinity using taskset
|
|
1634
|
-
|
|
1635
|
-
|
|
1636
|
-
|
|
1645
|
+
# Apply CPU affinity using taskset if CPU distribution is enabled
|
|
1646
|
+
if enable_cpu_distribution:
|
|
1647
|
+
try:
|
|
1648
|
+
container_info = docker_client.api.inspect_container(container.id)
|
|
1649
|
+
container_pid = container_info['State']['Pid']
|
|
1637
1650
|
|
|
1638
|
-
|
|
1651
|
+
logging.info(f"Setting CPU affinity for client container PID {container_pid} to cores {client_cpuset_cpus}")
|
|
1639
1652
|
|
|
1640
|
-
|
|
1641
|
-
|
|
1653
|
+
# Set CPU affinity for the main process and all its threads
|
|
1654
|
+
subprocess.run(f"taskset -cp {client_cpuset_cpus} {container_pid}", shell=True, check=True)
|
|
1642
1655
|
|
|
1643
|
-
|
|
1644
|
-
|
|
1645
|
-
|
|
1646
|
-
|
|
1647
|
-
|
|
1648
|
-
|
|
1649
|
-
|
|
1650
|
-
|
|
1651
|
-
|
|
1652
|
-
|
|
1653
|
-
|
|
1654
|
-
|
|
1656
|
+
# Wait a moment for client to start its threads, then set affinity for all child processes
|
|
1657
|
+
time.sleep(1)
|
|
1658
|
+
result = subprocess.run(f"pgrep -P {container_pid}", shell=True, capture_output=True, text=True)
|
|
1659
|
+
if result.returncode == 0:
|
|
1660
|
+
child_pids = result.stdout.strip().split('\n')
|
|
1661
|
+
for child_pid in child_pids:
|
|
1662
|
+
if child_pid.strip():
|
|
1663
|
+
try:
|
|
1664
|
+
subprocess.run(f"taskset -cp {client_cpuset_cpus} {child_pid.strip()}", shell=True, check=True)
|
|
1665
|
+
logging.info(f"Set CPU affinity for client child process {child_pid.strip()}")
|
|
1666
|
+
except subprocess.CalledProcessError:
|
|
1667
|
+
pass # Child process may have exited
|
|
1655
1668
|
|
|
1656
|
-
|
|
1669
|
+
logging.info(f"✅ Applied CPU affinity to client container and all child processes")
|
|
1657
1670
|
|
|
1658
|
-
|
|
1659
|
-
|
|
1671
|
+
except Exception as e:
|
|
1672
|
+
logging.warning(f"Failed to set CPU affinity for client container: {e}")
|
|
1660
1673
|
|
|
1661
1674
|
# Wait for container with timeout
|
|
1662
1675
|
try:
|
|
@@ -2266,6 +2279,7 @@ def start_redis_container(
|
|
|
2266
2279
|
run_image,
|
|
2267
2280
|
temporary_dir,
|
|
2268
2281
|
auto_remove=False,
|
|
2282
|
+
enable_cpu_distribution=False,
|
|
2269
2283
|
):
|
|
2270
2284
|
logging.info(
|
|
2271
2285
|
"Running redis-server on docker image {} (cpuset={}) with the following args: {}".format(
|
|
@@ -2284,55 +2298,62 @@ def start_redis_container(
|
|
|
2284
2298
|
logging.info(f"setting volume as follow: {volumes}. working_dir={mnt_point}")
|
|
2285
2299
|
working_dir = mnt_point
|
|
2286
2300
|
|
|
2287
|
-
#
|
|
2288
|
-
|
|
2289
|
-
|
|
2290
|
-
|
|
2291
|
-
|
|
2292
|
-
|
|
2293
|
-
|
|
2294
|
-
|
|
2295
|
-
|
|
2296
|
-
|
|
2297
|
-
|
|
2298
|
-
|
|
2299
|
-
|
|
2300
|
-
|
|
2301
|
-
|
|
2302
|
-
|
|
2303
|
-
|
|
2304
|
-
|
|
2305
|
-
|
|
2306
|
-
|
|
2301
|
+
# Prepare container arguments
|
|
2302
|
+
container_args = {
|
|
2303
|
+
"image": run_image,
|
|
2304
|
+
"volumes": volumes,
|
|
2305
|
+
"auto_remove": auto_remove,
|
|
2306
|
+
"privileged": True,
|
|
2307
|
+
"working_dir": mnt_point,
|
|
2308
|
+
"command": command_str,
|
|
2309
|
+
"network_mode": "host",
|
|
2310
|
+
"detach": True,
|
|
2311
|
+
"publish_all_ports": True,
|
|
2312
|
+
}
|
|
2313
|
+
|
|
2314
|
+
# Add CPU distribution settings if enabled
|
|
2315
|
+
if enable_cpu_distribution:
|
|
2316
|
+
redis_cpu_count = len(db_cpuset_cpus.split(','))
|
|
2317
|
+
redis_nano_cpus = int(redis_cpu_count * 1e9) # 1 CPU = 1e9 nano_cpus
|
|
2318
|
+
|
|
2319
|
+
container_args["cpuset_cpus"] = db_cpuset_cpus
|
|
2320
|
+
container_args["nano_cpus"] = redis_nano_cpus
|
|
2321
|
+
|
|
2322
|
+
logging.info(f"Redis container will use {redis_cpu_count} CPUs (nano_cpus={redis_nano_cpus}) on cores {db_cpuset_cpus} [CPU distribution enabled]")
|
|
2323
|
+
else:
|
|
2324
|
+
logging.info(f"Redis container will use default CPU allocation [CPU distribution disabled]")
|
|
2325
|
+
|
|
2326
|
+
redis_container = docker_client.containers.run(**container_args)
|
|
2307
2327
|
time.sleep(5)
|
|
2308
2328
|
|
|
2309
|
-
# Apply CPU affinity using taskset
|
|
2310
|
-
|
|
2311
|
-
|
|
2312
|
-
|
|
2329
|
+
# Apply CPU affinity using taskset if CPU distribution is enabled
|
|
2330
|
+
if enable_cpu_distribution:
|
|
2331
|
+
try:
|
|
2332
|
+
container_info = docker_client.api.inspect_container(redis_container.id)
|
|
2333
|
+
container_pid = container_info['State']['Pid']
|
|
2313
2334
|
|
|
2314
|
-
|
|
2335
|
+
logging.info(f"Setting CPU affinity for Redis container PID {container_pid} to cores {db_cpuset_cpus}")
|
|
2315
2336
|
|
|
2316
|
-
|
|
2317
|
-
|
|
2337
|
+
# Set CPU affinity for the main Redis process and all its threads
|
|
2338
|
+
subprocess.run(f"taskset -cp {db_cpuset_cpus} {container_pid}", shell=True, check=True)
|
|
2318
2339
|
|
|
2319
|
-
|
|
2320
|
-
|
|
2321
|
-
|
|
2322
|
-
|
|
2323
|
-
|
|
2324
|
-
|
|
2325
|
-
|
|
2326
|
-
|
|
2327
|
-
|
|
2328
|
-
|
|
2329
|
-
|
|
2330
|
-
|
|
2340
|
+
# Wait a moment for Redis to start its IO threads, then set affinity for all Redis processes
|
|
2341
|
+
time.sleep(2)
|
|
2342
|
+
result = subprocess.run(f"pgrep -P {container_pid}", shell=True, capture_output=True, text=True)
|
|
2343
|
+
if result.returncode == 0:
|
|
2344
|
+
child_pids = result.stdout.strip().split('\n')
|
|
2345
|
+
for child_pid in child_pids:
|
|
2346
|
+
if child_pid.strip():
|
|
2347
|
+
try:
|
|
2348
|
+
subprocess.run(f"taskset -cp {db_cpuset_cpus} {child_pid.strip()}", shell=True, check=True)
|
|
2349
|
+
logging.info(f"Set CPU affinity for Redis child process {child_pid.strip()}")
|
|
2350
|
+
except subprocess.CalledProcessError:
|
|
2351
|
+
pass # Child process may have exited
|
|
2331
2352
|
|
|
2332
|
-
|
|
2353
|
+
logging.info(f"✅ Applied CPU affinity to Redis container and all child processes")
|
|
2333
2354
|
|
|
2334
|
-
|
|
2335
|
-
|
|
2355
|
+
except Exception as e:
|
|
2356
|
+
logging.warning(f"Failed to set CPU affinity for Redis container: {e}")
|
|
2336
2357
|
|
|
2337
2358
|
redis_containers.append(redis_container)
|
|
2338
2359
|
return redis_container
|
|
@@ -2522,31 +2543,39 @@ def data_prepopulation_step(
|
|
|
2522
2543
|
preload_timeout = 1800 # 30 minutes default for data loading
|
|
2523
2544
|
logging.info(f"Starting preload container with {preload_timeout}s timeout")
|
|
2524
2545
|
|
|
2525
|
-
#
|
|
2526
|
-
|
|
2527
|
-
|
|
2546
|
+
# Prepare preload container arguments
|
|
2547
|
+
preload_container_args = {
|
|
2548
|
+
"image": preload_image,
|
|
2549
|
+
"volumes": {
|
|
2550
|
+
temporary_dir: {
|
|
2551
|
+
"bind": client_mnt_point,
|
|
2552
|
+
"mode": "rw",
|
|
2553
|
+
},
|
|
2554
|
+
},
|
|
2555
|
+
"auto_remove": False, # Don't auto-remove so we can get logs if timeout
|
|
2556
|
+
"privileged": True,
|
|
2557
|
+
"working_dir": benchmark_tool_workdir,
|
|
2558
|
+
"command": preload_command_str,
|
|
2559
|
+
"network_mode": "host",
|
|
2560
|
+
"detach": True, # Detach to enable timeout
|
|
2561
|
+
}
|
|
2562
|
+
|
|
2563
|
+
# Add CPU distribution settings if enabled
|
|
2564
|
+
enable_cpu_distribution = args.enable_cpu_distribution if args else False
|
|
2565
|
+
if enable_cpu_distribution:
|
|
2566
|
+
preload_cpu_count = len(client_cpuset_cpus.split(','))
|
|
2567
|
+
preload_nano_cpus = int(preload_cpu_count * 1e9) # 1 CPU = 1e9 nano_cpus
|
|
2568
|
+
|
|
2569
|
+
preload_container_args["cpuset_cpus"] = client_cpuset_cpus
|
|
2570
|
+
preload_container_args["nano_cpus"] = preload_nano_cpus
|
|
2528
2571
|
|
|
2529
|
-
|
|
2572
|
+
logging.info(f"Preload container will use {preload_cpu_count} CPUs (nano_cpus={preload_nano_cpus}) on cores {client_cpuset_cpus} [CPU distribution enabled]")
|
|
2573
|
+
else:
|
|
2574
|
+
logging.info(f"Preload container will use default CPU allocation [CPU distribution disabled]")
|
|
2530
2575
|
|
|
2531
2576
|
try:
|
|
2532
2577
|
# Start container with detach=True to enable timeout handling
|
|
2533
|
-
container = docker_client.containers.run(
|
|
2534
|
-
image=preload_image,
|
|
2535
|
-
volumes={
|
|
2536
|
-
temporary_dir: {
|
|
2537
|
-
"bind": client_mnt_point,
|
|
2538
|
-
"mode": "rw",
|
|
2539
|
-
},
|
|
2540
|
-
},
|
|
2541
|
-
auto_remove=False, # Don't auto-remove so we can get logs if timeout
|
|
2542
|
-
privileged=True,
|
|
2543
|
-
working_dir=benchmark_tool_workdir,
|
|
2544
|
-
command=preload_command_str,
|
|
2545
|
-
network_mode="host",
|
|
2546
|
-
detach=True, # Detach to enable timeout
|
|
2547
|
-
cpuset_cpus=client_cpuset_cpus,
|
|
2548
|
-
nano_cpus=preload_nano_cpus, # Force CPU distribution
|
|
2549
|
-
)
|
|
2578
|
+
container = docker_client.containers.run(**preload_container_args)
|
|
2550
2579
|
|
|
2551
2580
|
logging.info(
|
|
2552
2581
|
f"Started preload container {container.name} ({container.id[:12]}) with {preload_timeout}s timeout"
|
|
@@ -26,10 +26,10 @@ spec:
|
|
|
26
26
|
redis_topology:
|
|
27
27
|
primaries: 1
|
|
28
28
|
replicas: 0
|
|
29
|
-
redis_arguments: --io-threads 2 --io-threads-do-reads yes
|
|
29
|
+
redis_arguments: --io-threads 2 --io-threads-do-reads yes
|
|
30
30
|
resources:
|
|
31
31
|
requests:
|
|
32
|
-
cpus: "
|
|
32
|
+
cpus: "2"
|
|
33
33
|
memory: "10g"
|
|
34
34
|
|
|
35
35
|
- name: oss-standalone-04-io-threads
|
|
@@ -37,10 +37,10 @@ spec:
|
|
|
37
37
|
redis_topology:
|
|
38
38
|
primaries: 1
|
|
39
39
|
replicas: 0
|
|
40
|
-
redis_arguments: --io-threads 4 --io-threads-do-reads yes
|
|
40
|
+
redis_arguments: --io-threads 4 --io-threads-do-reads yes
|
|
41
41
|
resources:
|
|
42
42
|
requests:
|
|
43
|
-
cpus: "
|
|
43
|
+
cpus: "4"
|
|
44
44
|
memory: "10g"
|
|
45
45
|
|
|
46
46
|
- name: oss-standalone-08-io-threads
|
|
@@ -48,10 +48,10 @@ spec:
|
|
|
48
48
|
redis_topology:
|
|
49
49
|
primaries: 1
|
|
50
50
|
replicas: 0
|
|
51
|
-
redis_arguments: --io-threads 8 --io-threads-do-reads yes
|
|
51
|
+
redis_arguments: --io-threads 8 --io-threads-do-reads yes
|
|
52
52
|
resources:
|
|
53
53
|
requests:
|
|
54
|
-
cpus: "
|
|
54
|
+
cpus: "8"
|
|
55
55
|
memory: "10g"
|
|
56
56
|
|
|
57
57
|
- name: oss-standalone-16-io-threads
|
|
@@ -62,7 +62,7 @@ spec:
|
|
|
62
62
|
redis_arguments: --io-threads 16 --io-threads-do-reads yes
|
|
63
63
|
resources:
|
|
64
64
|
requests:
|
|
65
|
-
cpus: "
|
|
65
|
+
cpus: "16"
|
|
66
66
|
memory: "10g"
|
|
67
67
|
|
|
68
68
|
- name: oss-standalone-32-io-threads
|
|
@@ -73,7 +73,7 @@ spec:
|
|
|
73
73
|
redis_arguments: --io-threads 32 --io-threads-do-reads yes
|
|
74
74
|
resources:
|
|
75
75
|
requests:
|
|
76
|
-
cpus: "
|
|
76
|
+
cpus: "32"
|
|
77
77
|
memory: "10g"
|
|
78
78
|
|
|
79
79
|
- name: oss-standalone-64-io-threads
|
|
@@ -84,7 +84,7 @@ spec:
|
|
|
84
84
|
redis_arguments: --io-threads 64 --io-threads-do-reads yes
|
|
85
85
|
resources:
|
|
86
86
|
requests:
|
|
87
|
-
cpus: "
|
|
87
|
+
cpus: "64"
|
|
88
88
|
memory: "10g"
|
|
89
89
|
- name: oss-standalone-1replica
|
|
90
90
|
type: oss-standalone
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: redis-benchmarks-specification
|
|
3
|
-
Version: 0.2.
|
|
3
|
+
Version: 0.2.19
|
|
4
4
|
Summary: The Redis benchmarks specification describes the cross-language/tools requirements and expectations to foster performance and observability standards around redis related technologies. Members from both industry and academia, including organizations and individuals are encouraged to contribute.
|
|
5
5
|
Author: filipecosta90
|
|
6
6
|
Author-email: filipecosta.90@gmail.com
|
|
@@ -28,7 +28,7 @@ redis_benchmarks_specification/__runner__/args.py,sha256=K3VGmBC0-9lSv9H6VDp0N-6
|
|
|
28
28
|
redis_benchmarks_specification/__runner__/remote_profiling.py,sha256=R7obNQju8mmY9oKkcndjI4aAuxi84OCLhDSqqaYu1SU,18610
|
|
29
29
|
redis_benchmarks_specification/__runner__/runner.py,sha256=lWIpjThVeYW1hxR2dl2OSIeAwUXrWVHKHfQqsD9mrF8,155505
|
|
30
30
|
redis_benchmarks_specification/__self_contained_coordinator__/__init__.py,sha256=l-G1z-t6twUgi8QLueqoTQLvJmv3hJoEYskGm6H7L6M,83
|
|
31
|
-
redis_benchmarks_specification/__self_contained_coordinator__/args.py,sha256=
|
|
31
|
+
redis_benchmarks_specification/__self_contained_coordinator__/args.py,sha256=hZo-zfVzj_LUamMgFva2QZQv5PwhUf1dHCMxdu9voMc,7648
|
|
32
32
|
redis_benchmarks_specification/__self_contained_coordinator__/artifacts.py,sha256=OVHqJzDgeSSRfUSiKp1ZTAVv14PvSbk-5yJsAAoUfpw,936
|
|
33
33
|
redis_benchmarks_specification/__self_contained_coordinator__/build_info.py,sha256=vlg8H8Rxu2falW8xp1GvL1SV1fyBguSbz6Apxc7A2yM,2282
|
|
34
34
|
redis_benchmarks_specification/__self_contained_coordinator__/clients.py,sha256=EL1V4-i-tTav1mcF_CUosqPF3Q1qi9BZL0zFajEk70c,1878
|
|
@@ -37,7 +37,7 @@ redis_benchmarks_specification/__self_contained_coordinator__/docker.py,sha256=e
|
|
|
37
37
|
redis_benchmarks_specification/__self_contained_coordinator__/post_processing.py,sha256=sVLKNnWdAqYY9DjVdqRC5tDaIrVSaI3Ca7w8-DQ-LRM,776
|
|
38
38
|
redis_benchmarks_specification/__self_contained_coordinator__/prepopulation.py,sha256=VYGwUiIV382AjXTeYTOImPENzw2zf2VPQM2HvaZgE_M,3368
|
|
39
39
|
redis_benchmarks_specification/__self_contained_coordinator__/runners.py,sha256=kpSNvliGOEBUa8aXXk9M1rb5jKmBI6QqC0znf2lz0kw,34110
|
|
40
|
-
redis_benchmarks_specification/__self_contained_coordinator__/self_contained_coordinator.py,sha256=
|
|
40
|
+
redis_benchmarks_specification/__self_contained_coordinator__/self_contained_coordinator.py,sha256=vi2hOOGpHBbIbePMVYCsEan08NbvpSknKw0rM3421l8,120185
|
|
41
41
|
redis_benchmarks_specification/__setups__/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
42
42
|
redis_benchmarks_specification/__setups__/topologies.py,sha256=xQ1IJkcTji_ZjLiJd3vOxZpvbNtBLZw9cPkw5hGJKHU,481
|
|
43
43
|
redis_benchmarks_specification/__spec__/__init__.py,sha256=l-G1z-t6twUgi8QLueqoTQLvJmv3hJoEYskGm6H7L6M,83
|
|
@@ -51,7 +51,7 @@ redis_benchmarks_specification/commands/commands.py,sha256=hJbKkGzAFt_l40fJyQLfB
|
|
|
51
51
|
redis_benchmarks_specification/setups/builders/gcc:15.2.0-amd64-debian-bookworm-default.yml,sha256=UobsjPRRQALKNvAkOqvYJJs8HLrlG9AbfJwuIkLpwHU,528
|
|
52
52
|
redis_benchmarks_specification/setups/builders/gcc:15.2.0-arm64-debian-bookworm-default.yml,sha256=zexg-qwlrdjNEsJDigcwQgm-CluwtrWHPygvXzv0wwo,528
|
|
53
53
|
redis_benchmarks_specification/setups/platforms/aws-ec2-1node-c5.4xlarge.yml,sha256=l7HsjccpebwZXeutnt3SHSETw4iiRwQ9dCDXLOySSRQ,622
|
|
54
|
-
redis_benchmarks_specification/setups/topologies/topologies.yml,sha256=
|
|
54
|
+
redis_benchmarks_specification/setups/topologies/topologies.yml,sha256=37NQPN0H5WGk6oOnlxzkOVbj3-bzVedRHEEYAULFF9g,3264
|
|
55
55
|
redis_benchmarks_specification/test-suites/defaults.yml,sha256=EJHv9INdjoNVMOgHY8qo4IVCHfvXVz5sv7Vxtr3DAIE,1392
|
|
56
56
|
redis_benchmarks_specification/test-suites/generate.py,sha256=1QJBuWiouJ5OLil_r4OMG_UtZkmA8TLcyPlQAUuxCUw,4175
|
|
57
57
|
redis_benchmarks_specification/test-suites/memtier_benchmark-100Kkeys-hash-hgetall-50-fields-100B-values.yml,sha256=BYmI-u_QEoSVXczuWPIaoCU_IQdhPrNCdXj8SO4s60g,2317
|
|
@@ -287,8 +287,8 @@ redis_benchmarks_specification/test-suites/memtier_benchmark-playbook-session-st
|
|
|
287
287
|
redis_benchmarks_specification/test-suites/memtier_benchmark-playbook-session-storage-1k-sessions.yml,sha256=2egtIxPxCze2jlbAfgsk4v9JSQHNMoPLbDWFEW8olDg,7006
|
|
288
288
|
redis_benchmarks_specification/test-suites/template.txt,sha256=ezqGiRPOvuSDO0iG7GEf-AGXNfHbgXI89_G0RUEzL88,481
|
|
289
289
|
redis_benchmarks_specification/vector-search-test-suites/vector_db_benchmark_test.yml,sha256=PD7ow-k4Ll2BkhEC3aIqiaCZt8Hc4aJIp96Lw3J3mcI,791
|
|
290
|
-
redis_benchmarks_specification-0.2.
|
|
291
|
-
redis_benchmarks_specification-0.2.
|
|
292
|
-
redis_benchmarks_specification-0.2.
|
|
293
|
-
redis_benchmarks_specification-0.2.
|
|
294
|
-
redis_benchmarks_specification-0.2.
|
|
290
|
+
redis_benchmarks_specification-0.2.19.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
|
|
291
|
+
redis_benchmarks_specification-0.2.19.dist-info/METADATA,sha256=FlUSC9vHASXRp0K8PmOJfZK0zr_hXwmtFG7kYBCk630,22767
|
|
292
|
+
redis_benchmarks_specification-0.2.19.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
|
|
293
|
+
redis_benchmarks_specification-0.2.19.dist-info/entry_points.txt,sha256=x5WBXCZsnDRTZxV7SBGmC65L2k-ygdDOxV8vuKN00Nk,715
|
|
294
|
+
redis_benchmarks_specification-0.2.19.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|