redis-benchmarks-specification 0.2.14__py3-none-any.whl → 0.2.16__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of redis-benchmarks-specification might be problematic. Click here for more details.
- redis_benchmarks_specification/__self_contained_coordinator__/docker.py +7 -0
- redis_benchmarks_specification/__self_contained_coordinator__/prepopulation.py +7 -0
- redis_benchmarks_specification/__self_contained_coordinator__/runners.py +7 -0
- redis_benchmarks_specification/__self_contained_coordinator__/self_contained_coordinator.py +22 -0
- {redis_benchmarks_specification-0.2.14.dist-info → redis_benchmarks_specification-0.2.16.dist-info}/METADATA +1 -1
- {redis_benchmarks_specification-0.2.14.dist-info → redis_benchmarks_specification-0.2.16.dist-info}/RECORD +9 -9
- {redis_benchmarks_specification-0.2.14.dist-info → redis_benchmarks_specification-0.2.16.dist-info}/LICENSE +0 -0
- {redis_benchmarks_specification-0.2.14.dist-info → redis_benchmarks_specification-0.2.16.dist-info}/WHEEL +0 -0
- {redis_benchmarks_specification-0.2.14.dist-info → redis_benchmarks_specification-0.2.16.dist-info}/entry_points.txt +0 -0
|
@@ -85,11 +85,17 @@ def spin_docker_standalone_redis(
|
|
|
85
85
|
db_cpuset_cpus, current_cpu_pos = generate_cpuset_cpus(
|
|
86
86
|
ceil_db_cpu_limit, current_cpu_pos
|
|
87
87
|
)
|
|
88
|
+
# Calculate nano_cpus for better CPU distribution
|
|
89
|
+
redis_cpu_count = len(db_cpuset_cpus.split(','))
|
|
90
|
+
redis_nano_cpus = int(redis_cpu_count * 1e9) # 1 CPU = 1e9 nano_cpus
|
|
91
|
+
|
|
88
92
|
logging.info(
|
|
89
93
|
"Running redis-server on docker image {} (cpuset={}) with the following args: {}".format(
|
|
90
94
|
run_image, db_cpuset_cpus, command_str
|
|
91
95
|
)
|
|
92
96
|
)
|
|
97
|
+
logging.info(f"Redis container will use {redis_cpu_count} CPUs (nano_cpus={redis_nano_cpus}) on cores {db_cpuset_cpus}")
|
|
98
|
+
|
|
93
99
|
container = docker_client.containers.run(
|
|
94
100
|
image=run_image,
|
|
95
101
|
volumes={
|
|
@@ -102,6 +108,7 @@ def spin_docker_standalone_redis(
|
|
|
102
108
|
network_mode="host",
|
|
103
109
|
detach=True,
|
|
104
110
|
cpuset_cpus=db_cpuset_cpus,
|
|
111
|
+
nano_cpus=redis_nano_cpus, # Force CPU distribution
|
|
105
112
|
pid_mode="host",
|
|
106
113
|
)
|
|
107
114
|
redis_containers.append(container)
|
|
@@ -67,6 +67,12 @@ def data_prepopulation_step(
|
|
|
67
67
|
# run the benchmark
|
|
68
68
|
preload_start_time = datetime.datetime.now()
|
|
69
69
|
|
|
70
|
+
# Calculate nano_cpus for better CPU distribution
|
|
71
|
+
preload_cpu_count = len(client_cpuset_cpus.split(','))
|
|
72
|
+
preload_nano_cpus = int(preload_cpu_count * 1e9) # 1 CPU = 1e9 nano_cpus
|
|
73
|
+
|
|
74
|
+
logging.info(f"Preload container will use {preload_cpu_count} CPUs (nano_cpus={preload_nano_cpus}) on cores {client_cpuset_cpus}")
|
|
75
|
+
|
|
70
76
|
client_container_stdout = docker_client.containers.run(
|
|
71
77
|
image=preload_image,
|
|
72
78
|
volumes={
|
|
@@ -82,6 +88,7 @@ def data_prepopulation_step(
|
|
|
82
88
|
network_mode="host",
|
|
83
89
|
detach=False,
|
|
84
90
|
cpuset_cpus=client_cpuset_cpus,
|
|
91
|
+
nano_cpus=preload_nano_cpus, # Force CPU distribution
|
|
85
92
|
)
|
|
86
93
|
|
|
87
94
|
preload_end_time = datetime.datetime.now()
|
|
@@ -540,6 +540,12 @@ def process_self_contained_coordinator_stream(
|
|
|
540
540
|
# run the benchmark
|
|
541
541
|
benchmark_start_time = datetime.datetime.now()
|
|
542
542
|
|
|
543
|
+
# Calculate nano_cpus for better CPU distribution
|
|
544
|
+
client_cpu_count = len(client_cpuset_cpus.split(','))
|
|
545
|
+
client_nano_cpus = int(client_cpu_count * 1e9) # 1 CPU = 1e9 nano_cpus
|
|
546
|
+
|
|
547
|
+
logging.info(f"Client container will use {client_cpu_count} CPUs (nano_cpus={client_nano_cpus}) on cores {client_cpuset_cpus}")
|
|
548
|
+
|
|
543
549
|
client_container_stdout = docker_client.containers.run(
|
|
544
550
|
image=client_container_image,
|
|
545
551
|
volumes={
|
|
@@ -555,6 +561,7 @@ def process_self_contained_coordinator_stream(
|
|
|
555
561
|
network_mode="host",
|
|
556
562
|
detach=False,
|
|
557
563
|
cpuset_cpus=client_cpuset_cpus,
|
|
564
|
+
nano_cpus=client_nano_cpus, # Force CPU distribution
|
|
558
565
|
)
|
|
559
566
|
|
|
560
567
|
benchmark_end_time = datetime.datetime.now()
|
|
@@ -1600,6 +1600,12 @@ def process_self_contained_coordinator_stream(
|
|
|
1600
1600
|
f"Using default container timeout: {container_timeout}s"
|
|
1601
1601
|
)
|
|
1602
1602
|
|
|
1603
|
+
# Calculate nano_cpus for better CPU distribution
|
|
1604
|
+
client_cpu_count = len(client_cpuset_cpus.split(','))
|
|
1605
|
+
client_nano_cpus = int(client_cpu_count * 1e9) # 1 CPU = 1e9 nano_cpus
|
|
1606
|
+
|
|
1607
|
+
logging.info(f"Client container will use {client_cpu_count} CPUs (nano_cpus={client_nano_cpus}) on cores {client_cpuset_cpus}")
|
|
1608
|
+
|
|
1603
1609
|
try:
|
|
1604
1610
|
# Start container with detach=True to enable timeout handling
|
|
1605
1611
|
container = docker_client.containers.run(
|
|
@@ -1617,6 +1623,7 @@ def process_self_contained_coordinator_stream(
|
|
|
1617
1623
|
network_mode="host",
|
|
1618
1624
|
detach=True, # Detach to enable timeout
|
|
1619
1625
|
cpuset_cpus=client_cpuset_cpus,
|
|
1626
|
+
nano_cpus=client_nano_cpus, # Force CPU distribution
|
|
1620
1627
|
)
|
|
1621
1628
|
|
|
1622
1629
|
logging.info(
|
|
@@ -2248,6 +2255,13 @@ def start_redis_container(
|
|
|
2248
2255
|
}
|
|
2249
2256
|
logging.info(f"setting volume as follow: {volumes}. working_dir={mnt_point}")
|
|
2250
2257
|
working_dir = mnt_point
|
|
2258
|
+
|
|
2259
|
+
# Calculate nano_cpus for better CPU distribution
|
|
2260
|
+
redis_cpu_count = len(db_cpuset_cpus.split(','))
|
|
2261
|
+
redis_nano_cpus = int(redis_cpu_count * 1e9) # 1 CPU = 1e9 nano_cpus
|
|
2262
|
+
|
|
2263
|
+
logging.info(f"Redis container will use {redis_cpu_count} CPUs (nano_cpus={redis_nano_cpus}) on cores {db_cpuset_cpus}")
|
|
2264
|
+
|
|
2251
2265
|
redis_container = docker_client.containers.run(
|
|
2252
2266
|
image=run_image,
|
|
2253
2267
|
volumes=volumes,
|
|
@@ -2258,6 +2272,7 @@ def start_redis_container(
|
|
|
2258
2272
|
network_mode="host",
|
|
2259
2273
|
detach=True,
|
|
2260
2274
|
cpuset_cpus=db_cpuset_cpus,
|
|
2275
|
+
nano_cpus=redis_nano_cpus, # Force CPU distribution
|
|
2261
2276
|
pid_mode="host",
|
|
2262
2277
|
publish_all_ports=True,
|
|
2263
2278
|
)
|
|
@@ -2450,6 +2465,12 @@ def data_prepopulation_step(
|
|
|
2450
2465
|
preload_timeout = 1800 # 30 minutes default for data loading
|
|
2451
2466
|
logging.info(f"Starting preload container with {preload_timeout}s timeout")
|
|
2452
2467
|
|
|
2468
|
+
# Calculate nano_cpus for better CPU distribution
|
|
2469
|
+
preload_cpu_count = len(client_cpuset_cpus.split(','))
|
|
2470
|
+
preload_nano_cpus = int(preload_cpu_count * 1e9) # 1 CPU = 1e9 nano_cpus
|
|
2471
|
+
|
|
2472
|
+
logging.info(f"Preload container will use {preload_cpu_count} CPUs (nano_cpus={preload_nano_cpus}) on cores {client_cpuset_cpus}")
|
|
2473
|
+
|
|
2453
2474
|
try:
|
|
2454
2475
|
# Start container with detach=True to enable timeout handling
|
|
2455
2476
|
container = docker_client.containers.run(
|
|
@@ -2467,6 +2488,7 @@ def data_prepopulation_step(
|
|
|
2467
2488
|
network_mode="host",
|
|
2468
2489
|
detach=True, # Detach to enable timeout
|
|
2469
2490
|
cpuset_cpus=client_cpuset_cpus,
|
|
2491
|
+
nano_cpus=preload_nano_cpus, # Force CPU distribution
|
|
2470
2492
|
)
|
|
2471
2493
|
|
|
2472
2494
|
logging.info(
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: redis-benchmarks-specification
|
|
3
|
-
Version: 0.2.
|
|
3
|
+
Version: 0.2.16
|
|
4
4
|
Summary: The Redis benchmarks specification describes the cross-language/tools requirements and expectations to foster performance and observability standards around redis related technologies. Members from both industry and academia, including organizations and individuals are encouraged to contribute.
|
|
5
5
|
Author: filipecosta90
|
|
6
6
|
Author-email: filipecosta.90@gmail.com
|
|
@@ -33,11 +33,11 @@ redis_benchmarks_specification/__self_contained_coordinator__/artifacts.py,sha25
|
|
|
33
33
|
redis_benchmarks_specification/__self_contained_coordinator__/build_info.py,sha256=vlg8H8Rxu2falW8xp1GvL1SV1fyBguSbz6Apxc7A2yM,2282
|
|
34
34
|
redis_benchmarks_specification/__self_contained_coordinator__/clients.py,sha256=EL1V4-i-tTav1mcF_CUosqPF3Q1qi9BZL0zFajEk70c,1878
|
|
35
35
|
redis_benchmarks_specification/__self_contained_coordinator__/cpuset.py,sha256=sRvtoJIitppcOpm3R5LbVmSfPEAqPumOqVATnF5Wbek,594
|
|
36
|
-
redis_benchmarks_specification/__self_contained_coordinator__/docker.py,sha256=
|
|
36
|
+
redis_benchmarks_specification/__self_contained_coordinator__/docker.py,sha256=eEk1UxmKGzhVwiWjlgX6lJU2-JJri3sDNOhD-vFCx3o,3525
|
|
37
37
|
redis_benchmarks_specification/__self_contained_coordinator__/post_processing.py,sha256=sVLKNnWdAqYY9DjVdqRC5tDaIrVSaI3Ca7w8-DQ-LRM,776
|
|
38
|
-
redis_benchmarks_specification/__self_contained_coordinator__/prepopulation.py,sha256=
|
|
39
|
-
redis_benchmarks_specification/__self_contained_coordinator__/runners.py,sha256=
|
|
40
|
-
redis_benchmarks_specification/__self_contained_coordinator__/self_contained_coordinator.py,sha256=
|
|
38
|
+
redis_benchmarks_specification/__self_contained_coordinator__/prepopulation.py,sha256=VYGwUiIV382AjXTeYTOImPENzw2zf2VPQM2HvaZgE_M,3368
|
|
39
|
+
redis_benchmarks_specification/__self_contained_coordinator__/runners.py,sha256=kpSNvliGOEBUa8aXXk9M1rb5jKmBI6QqC0znf2lz0kw,34110
|
|
40
|
+
redis_benchmarks_specification/__self_contained_coordinator__/self_contained_coordinator.py,sha256=Kjb_BQ8SLZDgItAPE1PDhTT2AC6f1yW3gfAXOi_Yirw,114906
|
|
41
41
|
redis_benchmarks_specification/__setups__/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
42
42
|
redis_benchmarks_specification/__setups__/topologies.py,sha256=xQ1IJkcTji_ZjLiJd3vOxZpvbNtBLZw9cPkw5hGJKHU,481
|
|
43
43
|
redis_benchmarks_specification/__spec__/__init__.py,sha256=l-G1z-t6twUgi8QLueqoTQLvJmv3hJoEYskGm6H7L6M,83
|
|
@@ -287,8 +287,8 @@ redis_benchmarks_specification/test-suites/memtier_benchmark-playbook-session-st
|
|
|
287
287
|
redis_benchmarks_specification/test-suites/memtier_benchmark-playbook-session-storage-1k-sessions.yml,sha256=2egtIxPxCze2jlbAfgsk4v9JSQHNMoPLbDWFEW8olDg,7006
|
|
288
288
|
redis_benchmarks_specification/test-suites/template.txt,sha256=ezqGiRPOvuSDO0iG7GEf-AGXNfHbgXI89_G0RUEzL88,481
|
|
289
289
|
redis_benchmarks_specification/vector-search-test-suites/vector_db_benchmark_test.yml,sha256=PD7ow-k4Ll2BkhEC3aIqiaCZt8Hc4aJIp96Lw3J3mcI,791
|
|
290
|
-
redis_benchmarks_specification-0.2.
|
|
291
|
-
redis_benchmarks_specification-0.2.
|
|
292
|
-
redis_benchmarks_specification-0.2.
|
|
293
|
-
redis_benchmarks_specification-0.2.
|
|
294
|
-
redis_benchmarks_specification-0.2.
|
|
290
|
+
redis_benchmarks_specification-0.2.16.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
|
|
291
|
+
redis_benchmarks_specification-0.2.16.dist-info/METADATA,sha256=u4dNxQ_Wq9C4Pi6mzkb8Tav4OgnDbvdiWpaFqY5YE6Q,22767
|
|
292
|
+
redis_benchmarks_specification-0.2.16.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
|
|
293
|
+
redis_benchmarks_specification-0.2.16.dist-info/entry_points.txt,sha256=x5WBXCZsnDRTZxV7SBGmC65L2k-ygdDOxV8vuKN00Nk,715
|
|
294
|
+
redis_benchmarks_specification-0.2.16.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|