redis-benchmarks-specification 0.1.300__py3-none-any.whl → 0.1.303__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of redis-benchmarks-specification might be problematic. Click here for more details.

@@ -3,23 +3,26 @@ import logging
3
3
  import os
4
4
  import pathlib
5
5
  import re
6
-
6
+ import datetime as dt
7
7
  import redis
8
- from redisbench_admin.run.metrics import collect_redis_metrics
9
- from redisbench_admin.run_remote.run_remote import export_redis_metrics
10
8
 
11
9
  from redis_benchmarks_specification.__common__.timeseries import (
12
10
  timeseries_test_sucess_flow,
11
+ push_data_to_redistimeseries,
12
+ get_project_ts_tags,
13
13
  )
14
14
 
15
15
 
16
16
  def execute_init_commands(benchmark_config, r, dbconfig_keyname="dbconfig"):
17
17
  cmds = None
18
+ lua_scripts = None
18
19
  res = 0
19
20
  if dbconfig_keyname in benchmark_config:
20
21
  for k, v in benchmark_config[dbconfig_keyname].items():
21
22
  if "init_commands" in k:
22
23
  cmds = v
24
+ elif "init_lua" in k:
25
+ lua_scripts = v
23
26
 
24
27
  if type(cmds) == str:
25
28
  cmds = [cmds]
@@ -37,7 +40,7 @@ def execute_init_commands(benchmark_config, r, dbconfig_keyname="dbconfig"):
37
40
  quoting=csv.QUOTE_ALL,
38
41
  skipinitialspace=True,
39
42
  ):
40
- if lines[0] != " " and len(lines[0]) > 0:
43
+ if len(lines) > 0 and lines[0] != " " and len(lines[0]) > 0:
41
44
  cols.append(lines[0])
42
45
  cmd = cols
43
46
  is_array = True
@@ -57,6 +60,21 @@ def execute_init_commands(benchmark_config, r, dbconfig_keyname="dbconfig"):
57
60
  )
58
61
  )
59
62
 
63
+ # Process init_lua scripts
64
+ if lua_scripts is not None:
65
+ if type(lua_scripts) == str:
66
+ lua_scripts = [lua_scripts]
67
+
68
+ for lua_script in lua_scripts:
69
+ try:
70
+ logging.info("Executing Lua script (length: {} chars)".format(len(lua_script)))
71
+ # Execute the Lua script using EVAL command with 0 keys
72
+ stdout = r.execute_command("EVAL", lua_script, 0)
73
+ logging.info("Lua script result: {}".format(stdout))
74
+ res = res + 1
75
+ except Exception as e:
76
+ logging.error("Lua script execution failed: {}".format(e))
77
+
60
78
  return res
61
79
 
62
80
 
@@ -119,6 +137,210 @@ def extract_testsuites(args):
119
137
  return testsuite_spec_files
120
138
 
121
139
 
140
+
141
+ def commandstats_latencystats_process_name(
142
+ metric_name, prefix, setup_name, variant_labels_dict
143
+ ):
144
+ if prefix in metric_name:
145
+ command_and_metric_and_shard = metric_name[len(prefix) :]
146
+ command = (
147
+ command_and_metric_and_shard[0]
148
+ + command_and_metric_and_shard[1:].split("_", 1)[0]
149
+ )
150
+ metric_and_shard = command_and_metric_and_shard[1:].split("_", 1)[1]
151
+ metric = metric_and_shard
152
+ shard = "1"
153
+ if "_shard_" in metric_and_shard:
154
+ metric = metric_and_shard.split("_shard_")[0]
155
+ shard = metric_and_shard.split("_shard_")[1]
156
+ variant_labels_dict["metric"] = metric
157
+ variant_labels_dict["command"] = command
158
+ variant_labels_dict["command_and_metric"] = "{} - {}".format(command, metric)
159
+ variant_labels_dict["command_and_metric_and_setup"] = "{} - {} - {}".format(
160
+ command, metric, setup_name
161
+ )
162
+ variant_labels_dict["command_and_setup"] = "{} - {}".format(command, setup_name)
163
+ variant_labels_dict["shard"] = shard
164
+ variant_labels_dict["metric_and_shard"] = metric_and_shard
165
+
166
+ version = None
167
+ branch = None
168
+ if "version" in variant_labels_dict:
169
+ version = variant_labels_dict["version"]
170
+ if "branch" in variant_labels_dict:
171
+ branch = variant_labels_dict["branch"]
172
+
173
+ if version is not None:
174
+ variant_labels_dict["command_and_metric_and_version"] = (
175
+ "{} - {} - {}".format(command, metric, version)
176
+ )
177
+ variant_labels_dict["command_and_metric_and_setup_and_version"] = (
178
+ "{} - {} - {} - {}".format(command, metric, setup_name, version)
179
+ )
180
+
181
+ if branch is not None:
182
+ variant_labels_dict["command_and_metric_and_branch"] = (
183
+ "{} - {} - {}".format(command, metric, branch)
184
+ )
185
+ variant_labels_dict["command_and_metric_and_setup_and_branch"] = (
186
+ "{} - {} - {} - {}".format(command, metric, setup_name, branch)
187
+ )
188
+
189
+
190
+ def collect_redis_metrics(
191
+ redis_conns,
192
+ sections=["memory", "cpu", "commandstats", "latencystats"],
193
+ section_filter=None,
194
+ ):
195
+ start_time = dt.datetime.utcnow()
196
+ start_time_ms = int((start_time - dt.datetime(1970, 1, 1)).total_seconds() * 1000)
197
+ res = []
198
+ overall = {}
199
+ multi_shard = False
200
+ if len(redis_conns) > 1:
201
+ multi_shard = True
202
+ for conn_n, conn in enumerate(redis_conns):
203
+ conn_res = {}
204
+ for section in sections:
205
+ info = conn.info(section)
206
+ conn_res[section] = info
207
+ if section not in overall:
208
+ overall[section] = {}
209
+ for k, v in info.items():
210
+ collect = True
211
+ if section_filter is not None:
212
+ if section in section_filter:
213
+ if k not in section_filter[section]:
214
+ collect = False
215
+ if collect and type(v) is float or type(v) is int:
216
+ if k not in overall[section]:
217
+ overall[section][k] = 0
218
+ overall[section][k] += v
219
+ if collect and type(v) is dict:
220
+ for inner_k, inner_v in v.items():
221
+ if type(inner_v) is float or type(inner_v) is int:
222
+ final_str_k = "{}_{}".format(k, inner_k)
223
+ if multi_shard:
224
+ final_str_k += "_shard_{}".format(conn_n + 1)
225
+ if final_str_k not in overall[section]:
226
+ overall[section][final_str_k] = inner_v
227
+
228
+ res.append(conn_res)
229
+
230
+ kv_overall = {}
231
+ for sec, kv_detail in overall.items():
232
+ for k, metric_value in kv_detail.items():
233
+ metric_name = "{}_{}".format(sec, k)
234
+ kv_overall[metric_name] = metric_value
235
+
236
+ return start_time_ms, res, kv_overall
237
+
238
+ def export_redis_metrics(
239
+ artifact_version,
240
+ end_time_ms,
241
+ overall_end_time_metrics,
242
+ rts,
243
+ setup_name,
244
+ setup_type,
245
+ test_name,
246
+ tf_github_branch,
247
+ tf_github_org,
248
+ tf_github_repo,
249
+ tf_triggering_env,
250
+ metadata_dict=None,
251
+ expire_ms=0,
252
+ git_hash=None,
253
+ running_platform=None,
254
+ ):
255
+ datapoint_errors = 0
256
+ datapoint_inserts = 0
257
+ sprefix = (
258
+ "ci.benchmarks.redis/"
259
+ + "{triggering_env}/{github_org}/{github_repo}".format(
260
+ triggering_env=tf_triggering_env,
261
+ github_org=tf_github_org,
262
+ github_repo=tf_github_repo,
263
+ )
264
+ )
265
+ logging.info(
266
+ "Adding a total of {} server side metrics collected at the end of benchmark".format(
267
+ len(list(overall_end_time_metrics.items()))
268
+ )
269
+ )
270
+ timeseries_dict = {}
271
+ by_variants = {}
272
+ if tf_github_branch is not None and tf_github_branch != "":
273
+ by_variants["by.branch/{}".format(tf_github_branch)] = {
274
+ "branch": tf_github_branch
275
+ }
276
+ if git_hash is not None and git_hash != "":
277
+ by_variants["by.hash/{}".format(git_hash)] = {
278
+ "hash": git_hash
279
+ }
280
+ if artifact_version is not None and artifact_version != "":
281
+ by_variants["by.version/{}".format(artifact_version)] = {
282
+ "version": artifact_version
283
+ }
284
+ for (
285
+ by_variant,
286
+ variant_labels_dict,
287
+ ) in by_variants.items():
288
+ for (
289
+ metric_name,
290
+ metric_value,
291
+ ) in overall_end_time_metrics.items():
292
+ tsname_metric = "{}/{}/{}/benchmark_end/{}/{}".format(
293
+ sprefix,
294
+ test_name,
295
+ by_variant,
296
+ setup_name,
297
+ metric_name,
298
+ )
299
+
300
+ logging.debug(
301
+ "Adding a redis server side metric collected at the end of benchmark."
302
+ + " metric_name={} metric_value={} time-series name: {}".format(
303
+ metric_name,
304
+ metric_value,
305
+ tsname_metric,
306
+ )
307
+ )
308
+ variant_labels_dict["metric"] = metric_name
309
+ commandstats_latencystats_process_name(
310
+ metric_name, "commandstats_cmdstat_", setup_name, variant_labels_dict
311
+ )
312
+ commandstats_latencystats_process_name(
313
+ metric_name,
314
+ "latencystats_latency_percentiles_usec_",
315
+ setup_name,
316
+ variant_labels_dict,
317
+ )
318
+
319
+ variant_labels_dict["test_name"] = test_name
320
+ if metadata_dict is not None:
321
+ variant_labels_dict.update(metadata_dict)
322
+
323
+ timeseries_dict[tsname_metric] = {
324
+ "labels": get_project_ts_tags(
325
+ tf_github_org,
326
+ tf_github_repo,
327
+ setup_name,
328
+ setup_type,
329
+ tf_triggering_env,
330
+ variant_labels_dict,
331
+ None,
332
+ running_platform,
333
+ ),
334
+ "data": {end_time_ms: metric_value},
335
+ }
336
+ i_errors, i_inserts = push_data_to_redistimeseries(rts, timeseries_dict, expire_ms)
337
+ datapoint_errors = datapoint_errors + i_errors
338
+ datapoint_inserts = datapoint_inserts + i_inserts
339
+ return datapoint_errors, datapoint_inserts
340
+
341
+
342
+
343
+
122
344
  def reset_commandstats(redis_conns):
123
345
  for pos, redis_conn in enumerate(redis_conns):
124
346
  logging.info("Resetting commmandstats for shard {}".format(pos))
@@ -248,13 +470,16 @@ def exporter_datasink_common(
248
470
  deployment_type_and_name = f"{setup_type}_AND_{setup_name}"
249
471
  deployment_type_and_name_and_version = f"{setup_type}_AND_{setup_name}_AND_{git_version}"
250
472
 
251
- # Add to deployment-specific set
252
- deployment_set_key = f"ci.benchmarks.redis/{tf_triggering_env}/{deployment_type_and_name_and_version}:set"
253
- datasink_conn.sadd(deployment_set_key, test_name)
473
+ # Add to deployment-specific set (only if datasink connection is available)
474
+ if datasink_conn is not None:
475
+ deployment_set_key = f"ci.benchmarks.redis/{tf_triggering_env}/{deployment_type_and_name_and_version}:set"
476
+ datasink_conn.sadd(deployment_set_key, test_name)
254
477
 
255
- # Add to testcases set
256
- testcases_set_key = f"ci.benchmarks.redis/{tf_triggering_env}/testcases:set"
257
- datasink_conn.sadd(testcases_set_key, test_name)
478
+ # Add to testcases set
479
+ testcases_set_key = f"ci.benchmarks.redis/{tf_triggering_env}/testcases:set"
480
+ datasink_conn.sadd(testcases_set_key, test_name)
481
+ else:
482
+ logging.debug("Datasink connection not available, skipping set operations")
258
483
 
259
484
  # Add metadata fields to timeseries metadata
260
485
  metadata["deployment_type_AND_deployment_name"] = deployment_type_and_name
@@ -28,7 +28,10 @@ from redisbench_admin.run.common import (
28
28
  prepare_benchmark_parameters,
29
29
  dbconfig_keyspacelen_check,
30
30
  )
31
- from redisbench_admin.run_remote.run_remote import export_redis_metrics
31
+
32
+ from redis_benchmarks_specification.__common__.runner import (
33
+ export_redis_metrics,
34
+ )
32
35
 
33
36
  from redisbench_admin.run.metrics import extract_results_table
34
37
  from redisbench_admin.run.run import calculate_client_tool_duration_and_check
@@ -56,6 +59,7 @@ from redis_benchmarks_specification.__common__.runner import (
56
59
  exporter_datasink_common,
57
60
  reset_commandstats,
58
61
  execute_init_commands,
62
+ export_redis_metrics,
59
63
  )
60
64
  from redis_benchmarks_specification.__common__.spec import (
61
65
  extract_client_container_image,
@@ -1935,8 +1939,10 @@ def process_self_contained_coordinator_stream(
1935
1939
  tf_github_org,
1936
1940
  tf_github_repo,
1937
1941
  tf_triggering_env,
1938
- {"metric-type": "memory-stats"},
1942
+ metadata,
1939
1943
  expire_redis_metrics_ms,
1944
+ git_hash,
1945
+ running_platform,
1940
1946
  )
1941
1947
 
1942
1948
  exporter_datasink_common(
@@ -0,0 +1,203 @@
1
+ version: 0.4
2
+ name: memtier_benchmark-playbook-session-storage
3
+ description: |
4
+ Runs memtier_benchmark to simulate a session-based SaaS application.
5
+ The workload mimics user session CRUD, session tracking, organization-level analytics,
6
+ and rate limiting. It includes realistic key sizes and command mixes representative
7
+ of production traffic (e.g., 400–600B session hashes, ZSETs for analytics, and SETs for tracking).
8
+
9
+ Rate limiting is modeled using an atomic Lua script, inspired by the Upstash Redis example:
10
+ https://github.com/upstash/examples/tree/main/examples/ratelimit-with-redis
11
+
12
+ Each user has a dedicated key in the form of `ratelimit:user-<id>:/api/resource`, which is
13
+ used to track usage under a fixed window. The logic is evaluated atomically with the following script:
14
+
15
+ local key = KEYS[1]
16
+ local limit = 100
17
+ local window = 60
18
+ local current = redis.call("INCR", key)
19
+ if current == 1 then
20
+ redis.call("EXPIRE", key, window)
21
+ end
22
+ if current > limit then
23
+ return 0
24
+ else
25
+ return 1
26
+ end
27
+
28
+ This ensures that rate enforcement and usage tracking are done without race conditions, and
29
+ mirrors a real-world API quota model.
30
+
31
+ The workload emphasizes read-heavy patterns to reflect common SaaS access behavior. The overall
32
+ **read:write ratio is approximately 85:15**, with read operations covering session access, user-session
33
+ lookups, org analytics, and rate limit enforcement, while writes handle session updates, activity tracking,
34
+ and quota increments.
35
+
36
+ Command groups by use-case (approximate ratio of total operations):
37
+ - Session CRUD (HGETALL, HSET): ~55%
38
+ - User session tracking (SMEMBERS, SADD): ~21%
39
+ - Organization analytics (ZRANGE, ZADD): ~12%
40
+ - Rate limiting (EVAL-based quota check): ~12%
41
+
42
+ exporter:
43
+ redistimeseries:
44
+ break_by:
45
+ - version
46
+ - commit
47
+ timemetric: $."ALL STATS".Runtime."Start time"
48
+ metrics:
49
+ - $."BEST RUN RESULTS".Hgetalls."Ops/sec"
50
+ - $."BEST RUN RESULTS".Hsets."Ops/sec"
51
+ - $."BEST RUN RESULTS".Smemberss."Ops/sec"
52
+ - $."BEST RUN RESULTS".Sadds."Ops/sec"
53
+ - $."BEST RUN RESULTS".Zranges."Ops/sec"
54
+ - $."BEST RUN RESULTS".Zadds."Ops/sec"
55
+ - $."BEST RUN RESULTS".Evals."Ops/sec"
56
+ - $."BEST RUN RESULTS".Totals."Ops/sec"
57
+ - $."BEST RUN RESULTS".Totals."Latency"
58
+ - $."BEST RUN RESULTS".Totals."Misses/sec"
59
+ - $."BEST RUN RESULTS".Totals."Percentile Latencies"."p50.00"
60
+ - $."BEST RUN RESULTS".Totals."Percentile Latencies"."p99.00"
61
+ - $."ALL STATS".Hgetalls."Ops/sec"
62
+ - $."ALL STATS".Hsets."Ops/sec"
63
+ - $."ALL STATS".Smemberss."Ops/sec"
64
+ - $."ALL STATS".Sadds."Ops/sec"
65
+ - $."ALL STATS".Zranges."Ops/sec"
66
+ - $."ALL STATS".Zadds."Ops/sec"
67
+ - $."ALL STATS".Evals."Ops/sec"
68
+ - $."ALL STATS".Totals."Ops/sec"
69
+ - $."ALL STATS".Totals."Latency"
70
+ - $."ALL STATS".Totals."Misses/sec"
71
+ - $."ALL STATS".Hgetalls."Percentile Latencies"."p50.00"
72
+ - $."ALL STATS".Hsets."Percentile Latencies"."p50.00"
73
+ - $."ALL STATS".Smemberss."Percentile Latencies"."p50.00"
74
+ - $."ALL STATS".Sadds."Percentile Latencies"."p50.00"
75
+ - $."ALL STATS".Zranges."Percentile Latencies"."p50.00"
76
+ - $."ALL STATS".Zadds."Percentile Latencies"."p50.00"
77
+ - $."ALL STATS".Evals."Percentile Latencies"."p50.00"
78
+ - $."ALL STATS".Totals."Percentile Latencies"."p50.00"
79
+ - $."ALL STATS".Totals."Percentile Latencies"."p99.00"
80
+
81
+
82
+ dbconfig:
83
+ configuration-parameters:
84
+ save: '""'
85
+ resources:
86
+ requests:
87
+ memory: 1g
88
+ init_lua: |
89
+ -- Use a fixed seed for reproducibility
90
+ local seed = 12345
91
+ math.randomseed(seed)
92
+
93
+ local now = tonumber(redis.call('TIME')[1])
94
+
95
+ local function rand_str(len)
96
+ local chars = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'
97
+ local res = ''
98
+ for i = 1, len do
99
+ local idx = math.random(#chars)
100
+ res = res .. chars:sub(idx, idx)
101
+ end
102
+ return res
103
+ end
104
+
105
+ for i = 1, 100000 do
106
+ local session_id = 'session:' .. i
107
+ local user_id = 'user-' .. i
108
+ local org_id = 'org-' .. i
109
+
110
+ redis.call('HSET', session_id,
111
+ 'userId', user_id,
112
+ 'organizationId', org_id,
113
+ 'role', 'member',
114
+ 'createdAt', tostring(now - math.random(3600)),
115
+ 'lastAccessed', tostring(now),
116
+ 'ipAddress', '192.168.1.' .. (i % 255),
117
+ 'device', 'device-' .. rand_str(8),
118
+ 'authMethod', 'password',
119
+ 'status', 'active',
120
+ 'metadata', rand_str(200 + (i % 100))
121
+ )
122
+
123
+ redis.call('SADD', 'user:' .. i .. ':sessions', session_id)
124
+
125
+ local org_key = 'org:' .. i .. ':sessions'
126
+ for j = 1, 10 do
127
+ local uid = 'user-' .. ((i + j) % 1000 + 1)
128
+ local sid = 'session:' .. ((i + j) % 1000 + 1)
129
+ local zmember = uid .. ':' .. sid
130
+ local zscore = now - math.random(86400)
131
+ redis.call('ZADD', org_key, zscore, zmember)
132
+ end
133
+
134
+ local endpoint = '/api/resource'
135
+ local rate_key = 'ratelimit:' .. user_id .. ':' .. endpoint
136
+ redis.call('INCR', rate_key)
137
+ redis.call('EXPIRE', rate_key, 60)
138
+ end
139
+
140
+ return 'OK'
141
+ tested-groups:
142
+ - hash
143
+ - zset
144
+ - set
145
+ - string
146
+
147
+ tested-commands:
148
+ - hgetall
149
+ - hset
150
+ - smembers
151
+ - sadd
152
+ - zrange
153
+ - zadd
154
+ - incr
155
+ - expire
156
+ - get
157
+
158
+ redis-topologies:
159
+ - oss-standalone
160
+ build-variants:
161
+ - gcc:8.5.0-amd64-debian-buster-default
162
+ - dockerhub
163
+
164
+ clientconfig:
165
+ run_image: redislabs/memtier_benchmark:edge
166
+ tool: memtier_benchmark
167
+ arguments: >
168
+ --key-prefix ""
169
+ --key-minimum 1
170
+ --key-maximum 100000
171
+ --data-size-range=400-600
172
+ --pipeline=1
173
+ --print-percentiles=50,90,95,99
174
+ --run-count=1
175
+ --test-time=120
176
+ --command="HGETALL session:__key__"
177
+ --command-key-pattern=R
178
+ --command-ratio=50
179
+ --command="HSET session:__key__ userId user-__key__ organizationId org-__key__ role admin email user__key__@example.com name "User __key__" permissions "["read","write"]" lastActivity __timestamp__ ipAddress 192.168.1.__key__ userAgent "Mozilla/5.0" createdAt __timestamp__"
180
+ --command-key-pattern=R
181
+ --command-ratio=5
182
+ --command="SMEMBERS user:__key__:sessions"
183
+ --command-key-pattern=R
184
+ --command-ratio=18
185
+ --command="SADD user:__key__:sessions session-__key__"
186
+ --command-key-pattern=R
187
+ --command-ratio=3
188
+ --command="ZRANGE org:__key__:sessions 0 -1 WITHSCORES"
189
+ --command-key-pattern=R
190
+ --command-ratio=10
191
+ --command="ZADD org:__key__:sessions 1 user-__key__:session-__key__"
192
+ --command-key-pattern=R
193
+ --command-ratio=2
194
+ --command='EVAL "local key=KEYS[1];local limit=10;local window=60;local current=redis.call(\"INCR\",key);if current==1 then redis.call(\"EXPIRE\",key,window) end;if current>limit then return 0 else return 1 end" 1 ratelimit:user-__key__:/api/resource'
195
+ --command-key-pattern=R
196
+ --command-ratio=12
197
+ --hide-histogram
198
+ resources:
199
+ requests:
200
+ cpus: '4'
201
+ memory: 2g
202
+
203
+ priority: 150
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: redis-benchmarks-specification
3
- Version: 0.1.300
3
+ Version: 0.1.303
4
4
  Summary: The Redis benchmarks specification describes the cross-language/tools requirements and expectations to foster performance and observability standards around redis related technologies. Members from both industry and academia, including organizations and individuals are encouraged to contribute.
5
5
  Author: filipecosta90
6
6
  Author-email: filipecosta.90@gmail.com
@@ -15,7 +15,7 @@ redis_benchmarks_specification/__common__/builder_schema.py,sha256=kfDpRIk7NkJrb
15
15
  redis_benchmarks_specification/__common__/env.py,sha256=kvJ8Ll-fvI_Tc0vynrzUEr22TqnJizzvJ4Lu9RjNr_M,3119
16
16
  redis_benchmarks_specification/__common__/github.py,sha256=9TZtnISsSgXTSAN_VQejo5YRPDPhlU0gjxgKGPw_sP8,10699
17
17
  redis_benchmarks_specification/__common__/package.py,sha256=4uVt1BAZ999LV2rZkq--Tk6otAVIf9YR3g3KGeUpiW4,834
18
- redis_benchmarks_specification/__common__/runner.py,sha256=3eZ3GUKfWMUbvNhoryRnbennqPVRqzQ9189-B9N2YdE,8314
18
+ redis_benchmarks_specification/__common__/runner.py,sha256=7Xc8QjwqYXQH9Vnq7s2YiP052zI3dk-OFhO-8Z5XY3o,16589
19
19
  redis_benchmarks_specification/__common__/spec.py,sha256=D_SN48wg6NMthW_-OS1H5bydSDiuZpfd4WPPj7Vfwmc,5760
20
20
  redis_benchmarks_specification/__common__/timeseries.py,sha256=w8XQrGPEFuuemDXXz4iny7lYsNbYH0ycQyq3jHIr80g,52916
21
21
  redis_benchmarks_specification/__compare__/__init__.py,sha256=DtBXRp0Q01XgCFmY-1OIePMyyYihVNAjZ1Y8zwqSDN0,101
@@ -25,7 +25,7 @@ redis_benchmarks_specification/__init__.py,sha256=YQIEx2sLPPA0JR9OuCuMNMNtm-f_gq
25
25
  redis_benchmarks_specification/__runner__/__init__.py,sha256=l-G1z-t6twUgi8QLueqoTQLvJmv3hJoEYskGm6H7L6M,83
26
26
  redis_benchmarks_specification/__runner__/args.py,sha256=YeN7-eOGoqCHKh5FrKz9z5Ee-Rh-3DF9tm2Hb-3m7sQ,10648
27
27
  redis_benchmarks_specification/__runner__/remote_profiling.py,sha256=JS46mFxnRB2HSU_HO90WE4w7P7usHzX1dwZg1bJpTMc,18588
28
- redis_benchmarks_specification/__runner__/runner.py,sha256=V92Skt7KopmMsbXXd3zrmoIpICTU-Cbob330WfKNA3o,137838
28
+ redis_benchmarks_specification/__runner__/runner.py,sha256=pHgPkZktwlsIrvYaGais0SAvkH5olDeTDO4R5kdxudU,137953
29
29
  redis_benchmarks_specification/__self_contained_coordinator__/__init__.py,sha256=l-G1z-t6twUgi8QLueqoTQLvJmv3hJoEYskGm6H7L6M,83
30
30
  redis_benchmarks_specification/__self_contained_coordinator__/args.py,sha256=uxBjdQ78klvsVi6lOfGYQVaWIxc8OI-DwYKY16SgvCY,5952
31
31
  redis_benchmarks_specification/__self_contained_coordinator__/artifacts.py,sha256=OVHqJzDgeSSRfUSiKp1ZTAVv14PvSbk-5yJsAAoUfpw,936
@@ -271,10 +271,11 @@ redis_benchmarks_specification/test-suites/memtier_benchmark-nokeys-pubsub-mixed
271
271
  redis_benchmarks_specification/test-suites/memtier_benchmark-nokeys-pubsub-mixed-100-channels-128B-100-publishers-50K-subscribers-5k-conns.yml,sha256=mh_cAomSjsU4WL2l9gt0dhyWdSwfXTrEZgnRz6gzSQE,1144
272
272
  redis_benchmarks_specification/test-suites/memtier_benchmark-nokeys-pubsub-publish-1K-channels-10B-no-subscribers.yml,sha256=zwb27Jmg0mwcthbmdUe0KuzqRzAPs7OHcK2gc9-5VBE,779
273
273
  redis_benchmarks_specification/test-suites/memtier_benchmark-nokeys-server-time-pipeline-10.yml,sha256=7G_J8kUFay7jXhZvsZK5jvVHSLZvhMV0uuDMkZBbeSQ,675
274
+ redis_benchmarks_specification/test-suites/memtier_benchmark-playbook-session-storage.yml,sha256=Q5JKvADD1R71hyRx4Qsz5dxHHlYICHqWHD7XTAmX72M,6936
274
275
  redis_benchmarks_specification/test-suites/template.txt,sha256=d_edIE7Sxa5X7I2yG-Io0bPdbDIHR0oWFoCA3XUt_EU,435
275
276
  redis_benchmarks_specification/vector-search-test-suites/vector_db_benchmark_test.yml,sha256=PD7ow-k4Ll2BkhEC3aIqiaCZt8Hc4aJIp96Lw3J3mcI,791
276
- redis_benchmarks_specification-0.1.300.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
277
- redis_benchmarks_specification-0.1.300.dist-info/METADATA,sha256=qVACCT9ef1hzlk4KCbBcGkUtKHpzV3TQVuEdowVOaJ4,22726
278
- redis_benchmarks_specification-0.1.300.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
279
- redis_benchmarks_specification-0.1.300.dist-info/entry_points.txt,sha256=x5WBXCZsnDRTZxV7SBGmC65L2k-ygdDOxV8vuKN00Nk,715
280
- redis_benchmarks_specification-0.1.300.dist-info/RECORD,,
277
+ redis_benchmarks_specification-0.1.303.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
278
+ redis_benchmarks_specification-0.1.303.dist-info/METADATA,sha256=EhMb2tY2pNg3PZtPh8R40wztMm6GVTR48j5P_fP04nw,22726
279
+ redis_benchmarks_specification-0.1.303.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
280
+ redis_benchmarks_specification-0.1.303.dist-info/entry_points.txt,sha256=x5WBXCZsnDRTZxV7SBGmC65L2k-ygdDOxV8vuKN00Nk,715
281
+ redis_benchmarks_specification-0.1.303.dist-info/RECORD,,