redis-benchmarks-specification 0.1.208__py3-none-any.whl → 0.1.210__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of redis-benchmarks-specification might be problematic. Click here for more details.

Files changed (120) hide show
  1. redis_benchmarks_specification/__builder__/builder.py +130 -64
  2. redis_benchmarks_specification/__cli__/args.py +13 -1
  3. redis_benchmarks_specification/__cli__/cli.py +66 -3
  4. redis_benchmarks_specification/__common__/env.py +1 -1
  5. redis_benchmarks_specification/__common__/github.py +7 -11
  6. redis_benchmarks_specification/__common__/runner.py +15 -3
  7. redis_benchmarks_specification/__common__/timeseries.py +1551 -0
  8. redis_benchmarks_specification/__compare__/compare.py +123 -36
  9. redis_benchmarks_specification/__runner__/runner.py +20 -6
  10. redis_benchmarks_specification/__self_contained_coordinator__/args.py +0 -5
  11. redis_benchmarks_specification/__self_contained_coordinator__/build_info.py +5 -3
  12. redis_benchmarks_specification/__self_contained_coordinator__/docker.py +2 -2
  13. redis_benchmarks_specification/__self_contained_coordinator__/prepopulation.py +4 -1
  14. redis_benchmarks_specification/__self_contained_coordinator__/runners.py +6 -2
  15. redis_benchmarks_specification/__self_contained_coordinator__/self_contained_coordinator.py +153 -52
  16. redis_benchmarks_specification/test-suites/memtier_benchmark-10Mkeys-load-hash-5-fields-with-1000B-values-pipeline-10.yml +1 -0
  17. redis_benchmarks_specification/test-suites/memtier_benchmark-10Mkeys-load-hash-5-fields-with-1000B-values.yml +1 -0
  18. redis_benchmarks_specification/test-suites/memtier_benchmark-10Mkeys-load-hash-5-fields-with-100B-values-pipeline-10.yml +1 -0
  19. redis_benchmarks_specification/test-suites/memtier_benchmark-10Mkeys-load-hash-5-fields-with-100B-values.yml +1 -0
  20. redis_benchmarks_specification/test-suites/memtier_benchmark-10Mkeys-load-hash-5-fields-with-10B-values-pipeline-10.yml +1 -0
  21. redis_benchmarks_specification/test-suites/memtier_benchmark-10Mkeys-load-hash-5-fields-with-10B-values.yml +1 -0
  22. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-100B-expire-use-case.yml +1 -0
  23. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-10B-expire-use-case.yml +1 -0
  24. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-1KiB-expire-use-case.yml +1 -0
  25. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-4KiB-expire-use-case.yml +1 -0
  26. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-bitmap-getbit-pipeline-10.yml +1 -0
  27. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-generic-exists-pipeline-10.yml +1 -0
  28. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-generic-expire-pipeline-10.yml +1 -0
  29. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-generic-expireat-pipeline-10.yml +1 -0
  30. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-generic-pexpire-pipeline-10.yml +1 -0
  31. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-generic-scan-pipeline-10.yml +1 -0
  32. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-generic-touch-pipeline-10.yml +1 -0
  33. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-generic-ttl-pipeline-10.yml +1 -0
  34. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-hash-hexists.yml +1 -0
  35. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-hash-hget-hgetall-hkeys-hvals-with-100B-values.yml +1 -0
  36. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-hash-hincrby.yml +1 -0
  37. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-hash-hmget-5-fields-with-100B-values-pipeline-10.yml +1 -0
  38. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-hash-transactions-multi-exec-pipeline-20.yml +1 -0
  39. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-list-lpop-rpop-with-100B-values.yml +1 -0
  40. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-list-lpop-rpop-with-10B-values.yml +1 -0
  41. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-list-lpop-rpop-with-1KiB-values.yml +1 -0
  42. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-hash-5-fields-with-1000B-values-pipeline-10.yml +1 -0
  43. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-hash-5-fields-with-1000B-values.yml +1 -0
  44. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-hash-hmset-5-fields-with-1000B-values.yml +1 -0
  45. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-list-with-100B-values.yml +1 -0
  46. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-list-with-10B-values.yml +1 -0
  47. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-list-with-1KiB-values.yml +1 -0
  48. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-stream-1-fields-with-100B-values-pipeline-10.yml +1 -0
  49. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-stream-1-fields-with-100B-values.yml +1 -0
  50. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-stream-5-fields-with-100B-values-pipeline-10.yml +1 -0
  51. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-stream-5-fields-with-100B-values.yml +1 -0
  52. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-string-with-100B-values-pipeline-10.yml +1 -0
  53. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-string-with-100B-values.yml +2 -5
  54. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-string-with-10B-values-pipeline-10.yml +1 -0
  55. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-string-with-10B-values.yml +1 -0
  56. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-string-with-1KiB-values.yml +1 -0
  57. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-string-with-200KiB-values.yml +2 -5
  58. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-string-with-20KiB-values.yml +2 -5
  59. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-string-with-2MB-values.yml +2 -5
  60. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-zset-with-10-elements-double-score.yml +1 -0
  61. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-zset-with-10-elements-int-score.yml +1 -0
  62. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-decr.yml +1 -0
  63. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-get-100B-pipeline-10.yml +1 -0
  64. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-get-100B.yml +1 -0
  65. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-get-10B-pipeline-10.yml +1 -0
  66. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-get-10B.yml +1 -0
  67. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-get-1KiB-pipeline-10.yml +1 -0
  68. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-get-1KiB.yml +1 -0
  69. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-get-200KiB.yml +1 -0
  70. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-get-20KiB.yml +1 -0
  71. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-get-2MB.yml +1 -0
  72. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-mget-1KiB.yml +1 -0
  73. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-geo-60M-elements-geodist-pipeline-10.yml +1 -0
  74. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-geo-60M-elements-geodist.yml +1 -0
  75. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-geo-60M-elements-geohash-pipeline-10.yml +1 -0
  76. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-geo-60M-elements-geohash.yml +1 -0
  77. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-geo-60M-elements-geopos-pipeline-10.yml +1 -0
  78. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-geo-60M-elements-geopos.yml +1 -0
  79. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-geo-60M-elements-geosearch-fromlonlat-bybox.yml +1 -0
  80. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-geo-60M-elements-geosearch-fromlonlat-pipeline-10.yml +1 -0
  81. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-geo-60M-elements-geosearch-fromlonlat.yml +1 -0
  82. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-list-10-elements-lrange-all-elements.yml +1 -0
  83. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-list-100-elements-lrange-all-elements.yml +1 -0
  84. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-list-1K-elements-lrange-all-elements.yml +1 -0
  85. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-pfadd-4KB-values-pipeline-10.yml +1 -0
  86. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-set-10-elements-smembers-pipeline-10.yml +1 -0
  87. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-set-10-elements-smembers.yml +1 -0
  88. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-set-10-elements-smismember.yml +1 -0
  89. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-set-100-elements-smembers.yml +1 -0
  90. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-set-100-elements-smismember.yml +1 -0
  91. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-set-1K-elements-smembers.yml +1 -0
  92. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-set-200K-elements-sadd-constant.yml +1 -0
  93. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-set-2M-elements-sadd-increasing.yml +1 -0
  94. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-zincrby-1M-elements-pipeline-1.yml +1 -0
  95. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-zrank-1M-elements-pipeline-1.yml +1 -0
  96. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-zrem-5M-elements-pipeline-1.yml +1 -0
  97. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-zrevrangebyscore-256K-elements-pipeline-1.yml +1 -0
  98. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-zrevrank-1M-elements-pipeline-1.yml +1 -0
  99. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-zset-10-elements-zrange-all-elements-long-scores.yml +1 -0
  100. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-zset-10-elements-zrange-all-elements.yml +1 -0
  101. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-zset-100-elements-zrange-all-elements.yml +1 -0
  102. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-zset-100-elements-zrangebyscore-all-elements-long-scores.yml +1 -0
  103. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-zset-100-elements-zrangebyscore-all-elements.yml +1 -0
  104. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-zset-1K-elements-zrange-all-elements.yml +1 -0
  105. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-zset-1M-elements-zcard-pipeline-10.yml +1 -0
  106. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-zset-1M-elements-zrevrange-5-elements.yml +1 -0
  107. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-zset-1M-elements-zscore-pipeline-10.yml +1 -0
  108. redis_benchmarks_specification/test-suites/memtier_benchmark-2keys-lua-eval-hset-expire.yml +1 -0
  109. redis_benchmarks_specification/test-suites/memtier_benchmark-2keys-lua-evalsha-hset-expire.yml +1 -0
  110. redis_benchmarks_specification/test-suites/memtier_benchmark-2keys-set-10-100-elements-sdiff.yml +1 -0
  111. redis_benchmarks_specification/test-suites/memtier_benchmark-2keys-set-10-100-elements-sinter.yml +1 -0
  112. redis_benchmarks_specification/test-suites/memtier_benchmark-2keys-set-10-100-elements-sunion.yml +1 -0
  113. redis_benchmarks_specification/test-suites/memtier_benchmark-2keys-stream-5-entries-xread-all-entries-pipeline-10.yml +1 -0
  114. redis_benchmarks_specification/test-suites/memtier_benchmark-2keys-stream-5-entries-xread-all-entries.yml +1 -0
  115. redis_benchmarks_specification/test-suites/template.txt +1 -0
  116. {redis_benchmarks_specification-0.1.208.dist-info → redis_benchmarks_specification-0.1.210.dist-info}/METADATA +12 -4
  117. {redis_benchmarks_specification-0.1.208.dist-info → redis_benchmarks_specification-0.1.210.dist-info}/RECORD +120 -119
  118. {redis_benchmarks_specification-0.1.208.dist-info → redis_benchmarks_specification-0.1.210.dist-info}/LICENSE +0 -0
  119. {redis_benchmarks_specification-0.1.208.dist-info → redis_benchmarks_specification-0.1.210.dist-info}/WHEEL +0 -0
  120. {redis_benchmarks_specification-0.1.208.dist-info → redis_benchmarks_specification-0.1.210.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,1551 @@
1
+ # BSD 3-Clause License
2
+ #
3
+ # Copyright (c) 2021., Redis Labs Modules
4
+ # All rights reserved.
5
+ #
6
+ import datetime
7
+ import logging
8
+ from tqdm import tqdm
9
+ import redis
10
+ from jsonpath_ng.parser import JsonPathParser
11
+
12
+
13
+ def parse(string):
14
+ return JsonPathParser().parse(string)
15
+
16
+
17
+ def parse_exporter_timemetric(metric_path: str, results_dict: dict):
18
+ datapoints_timestamp = None
19
+ try:
20
+ jsonpath_expr = parse(metric_path)
21
+ find_res = jsonpath_expr.find(results_dict)
22
+ if len(find_res) > 0:
23
+ datapoints_timestamp = int(find_res[0].value)
24
+ except Exception as e:
25
+ logging.error(
26
+ "Unable to parse time-metric {}. Error: {}".format(metric_path, e.__str__())
27
+ )
28
+ return datapoints_timestamp
29
+
30
+
31
+ def parse_exporter_timemetric_definition(
32
+ benchmark_config: dict, configkey: str = "redistimeseries"
33
+ ):
34
+ metric_path = None
35
+ if "timemetric" in benchmark_config[configkey]:
36
+ metric_path = benchmark_config[configkey]["timemetric"]
37
+ return metric_path
38
+
39
+
40
+ def parse_exporter_metrics_definition(
41
+ benchmark_config: dict, configkey: str = "redistimeseries"
42
+ ):
43
+ metrics = []
44
+ if configkey in benchmark_config:
45
+ if "metrics" in benchmark_config[configkey]:
46
+ for metric_name in benchmark_config[configkey]["metrics"]:
47
+ metrics.append(metric_name)
48
+ return metrics
49
+
50
+
51
+ def get_ts_metric_name(
52
+ by,
53
+ by_value,
54
+ tf_github_org,
55
+ tf_github_repo,
56
+ deployment_name,
57
+ deployment_type,
58
+ test_name,
59
+ tf_triggering_env,
60
+ metric_name,
61
+ metric_context_path=None,
62
+ use_metric_context_path=False,
63
+ build_variant_name=None,
64
+ running_platform=None,
65
+ ):
66
+ if use_metric_context_path:
67
+ metric_name = "{}/{}".format(metric_name, metric_context_path)
68
+ build_variant_str = ""
69
+ if build_variant_name is not None:
70
+ build_variant_str = "{}/".format(str(build_variant_name))
71
+ running_platform_str = ""
72
+ if running_platform is not None:
73
+ running_platform_str = "{}/".format(str(running_platform))
74
+ if deployment_name != deployment_type:
75
+ deployment_name = "/{}".format(deployment_name)
76
+ else:
77
+ deployment_name = ""
78
+ ts_name = (
79
+ "ci.benchmarks.redislabs/{by}/"
80
+ "{triggering_env}/{github_org}/{github_repo}/"
81
+ "{test_name}/{build_variant_str}{running_platform_str}{deployment_type}{deployment_name}/{by_value}/{metric}".format(
82
+ by=by,
83
+ triggering_env=tf_triggering_env,
84
+ github_org=tf_github_org,
85
+ github_repo=tf_github_repo,
86
+ test_name=test_name,
87
+ deployment_type=deployment_type,
88
+ deployment_name=deployment_name,
89
+ build_variant_str=build_variant_str,
90
+ running_platform_str=running_platform_str,
91
+ by_value=str(by_value),
92
+ metric=metric_name,
93
+ )
94
+ )
95
+ return ts_name
96
+
97
+
98
+ def extract_results_table(
99
+ metrics,
100
+ results_dict,
101
+ ):
102
+ results_matrix = []
103
+ cleaned_metrics = []
104
+ already_present_metrics = []
105
+ # insert first the dict metrics
106
+ for jsonpath in metrics:
107
+ if type(jsonpath) == dict:
108
+ cleaned_metrics.append(jsonpath)
109
+ metric_jsonpath = list(jsonpath.keys())[0]
110
+ already_present_metrics.append(metric_jsonpath)
111
+ for jsonpath in metrics:
112
+ if type(jsonpath) == str:
113
+ if jsonpath not in already_present_metrics:
114
+ already_present_metrics.append(jsonpath)
115
+ cleaned_metrics.append(jsonpath)
116
+
117
+ for jsonpath in cleaned_metrics:
118
+ test_case_targets_dict = {}
119
+ metric_jsonpath = jsonpath
120
+ find_res = None
121
+ try:
122
+ if type(jsonpath) == str:
123
+ jsonpath_expr = parse(jsonpath)
124
+ if type(jsonpath) == dict:
125
+ metric_jsonpath = list(jsonpath.keys())[0]
126
+ test_case_targets_dict = jsonpath[metric_jsonpath]
127
+ jsonpath_expr = parse(metric_jsonpath)
128
+ find_res = jsonpath_expr.find(results_dict)
129
+ except Exception:
130
+ pass
131
+ finally:
132
+ if find_res is not None:
133
+ use_metric_context_path = False
134
+ if len(find_res) > 1:
135
+ use_metric_context_path = True
136
+ for metric in find_res:
137
+ metric_name = str(metric.path)
138
+ metric_value = float(metric.value)
139
+ metric_context_path = str(metric.context.path)
140
+ if metric_jsonpath[0] == "$":
141
+ metric_jsonpath = metric_jsonpath[1:]
142
+ if metric_jsonpath[0] == ".":
143
+ metric_jsonpath = metric_jsonpath[1:]
144
+
145
+ # retro-compatible naming
146
+ if use_metric_context_path is False:
147
+ metric_name = metric_jsonpath
148
+
149
+ metric_name = metric_name.replace("'", "")
150
+ metric_name = metric_name.replace('"', "")
151
+ metric_name = metric_name.replace("(", "")
152
+ metric_name = metric_name.replace(")", "")
153
+ metric_name = metric_name.replace(" ", "_")
154
+
155
+ results_matrix.append(
156
+ [
157
+ metric_jsonpath,
158
+ metric_context_path,
159
+ metric_name,
160
+ metric_value,
161
+ test_case_targets_dict,
162
+ use_metric_context_path,
163
+ ]
164
+ )
165
+
166
+ else:
167
+ logging.warning(
168
+ "Unable to find metric path {} in result dict".format(jsonpath)
169
+ )
170
+ return results_matrix
171
+
172
+
173
+ def get_ts_tags_and_name(
174
+ break_by_key,
175
+ break_by_str,
176
+ break_by_value,
177
+ build_variant_name,
178
+ deployment_name,
179
+ deployment_type,
180
+ metadata_tags,
181
+ metric_context_path,
182
+ metric_jsonpath,
183
+ metric_name,
184
+ running_platform,
185
+ test_name,
186
+ testcase_metric_context_paths,
187
+ tf_github_org,
188
+ tf_github_repo,
189
+ tf_triggering_env,
190
+ use_metric_context_path,
191
+ ):
192
+ # prepare tags
193
+ timeserie_tags = get_project_ts_tags(
194
+ tf_github_org,
195
+ tf_github_repo,
196
+ deployment_name,
197
+ deployment_type,
198
+ tf_triggering_env,
199
+ metadata_tags,
200
+ build_variant_name,
201
+ running_platform,
202
+ )
203
+ timeserie_tags[break_by_key] = break_by_value
204
+ timeserie_tags["{}+{}".format("deployment_name", break_by_key)] = "{} {}".format(
205
+ deployment_name, break_by_value
206
+ )
207
+ timeserie_tags[break_by_key] = break_by_value
208
+ timeserie_tags["{}+{}".format("target", break_by_key)] = "{} {}".format(
209
+ break_by_value, tf_github_repo
210
+ )
211
+ timeserie_tags["test_name"] = str(test_name)
212
+ if build_variant_name is not None:
213
+ timeserie_tags["test_name:build_variant"] = "{}:{}".format(
214
+ test_name, build_variant_name
215
+ )
216
+ timeserie_tags["metric"] = str(metric_name)
217
+ timeserie_tags["metric_name"] = metric_name
218
+ timeserie_tags["metric_context_path"] = metric_context_path
219
+ if metric_context_path is not None:
220
+ timeserie_tags["test_name:metric_context_path"] = "{}:{}".format(
221
+ test_name, metric_context_path
222
+ )
223
+ timeserie_tags["metric_jsonpath"] = metric_jsonpath
224
+ if metric_context_path not in testcase_metric_context_paths:
225
+ testcase_metric_context_paths.append(metric_context_path)
226
+ ts_name = get_ts_metric_name(
227
+ break_by_str,
228
+ break_by_value,
229
+ tf_github_org,
230
+ tf_github_repo,
231
+ deployment_name,
232
+ deployment_type,
233
+ test_name,
234
+ tf_triggering_env,
235
+ metric_name,
236
+ metric_context_path,
237
+ use_metric_context_path,
238
+ build_variant_name,
239
+ running_platform,
240
+ )
241
+ return timeserie_tags, ts_name
242
+
243
+
244
+ def from_metric_kv_to_timeserie(
245
+ break_by_key,
246
+ break_by_str,
247
+ break_by_value,
248
+ build_variant_name,
249
+ datapoints_timestamp,
250
+ deployment_name,
251
+ deployment_type,
252
+ metadata_tags,
253
+ metric_context_path,
254
+ metric_jsonpath,
255
+ metric_name,
256
+ metric_value,
257
+ running_platform,
258
+ test_case_targets_dict,
259
+ test_name,
260
+ testcase_metric_context_paths,
261
+ tf_github_org,
262
+ tf_github_repo,
263
+ tf_triggering_env,
264
+ time_series_dict,
265
+ use_metric_context_path,
266
+ ):
267
+ timeserie_tags, ts_name = get_ts_tags_and_name(
268
+ break_by_key,
269
+ break_by_str,
270
+ break_by_value,
271
+ build_variant_name,
272
+ deployment_name,
273
+ deployment_type,
274
+ metadata_tags,
275
+ metric_context_path,
276
+ metric_jsonpath,
277
+ metric_name,
278
+ running_platform,
279
+ test_name,
280
+ testcase_metric_context_paths,
281
+ tf_github_org,
282
+ tf_github_repo,
283
+ tf_triggering_env,
284
+ use_metric_context_path,
285
+ )
286
+ logging.info(f"Adding timeserie named {ts_name} to time_series_dict.")
287
+ time_series_dict[ts_name] = {
288
+ "labels": timeserie_tags.copy(),
289
+ "data": {datapoints_timestamp: metric_value},
290
+ }
291
+
292
+ original_ts_name = ts_name
293
+ target_table_keyname = "target_tables:{triggering_env}:ci.benchmarks.redislabs/{break_by_key}/{break_by_str}/{tf_github_org}/{tf_github_repo}/{deployment_type}/{deployment_name}/{test_name}/{metric_name}".format(
294
+ triggering_env=tf_triggering_env,
295
+ break_by_key=break_by_key,
296
+ break_by_str=break_by_str,
297
+ tf_github_org=tf_github_org,
298
+ tf_github_repo=tf_github_repo,
299
+ deployment_name=deployment_name,
300
+ deployment_type=deployment_type,
301
+ test_name=test_name,
302
+ metric_name=metric_name,
303
+ )
304
+ target_table_dict = {
305
+ "test-case": test_name,
306
+ "metric-name": metric_name,
307
+ tf_github_repo: metric_value,
308
+ "contains-target": False,
309
+ }
310
+ for target_name, target_value in test_case_targets_dict.items():
311
+ target_table_dict["contains-target"] = True
312
+ ts_name = original_ts_name + "/target/{}".format(target_name)
313
+ timeserie_tags_target = timeserie_tags.copy()
314
+ timeserie_tags_target["is_target"] = "true"
315
+ timeserie_tags_target["{}+{}".format("target", break_by_key)] = "{} {}".format(
316
+ break_by_value, target_name
317
+ )
318
+ time_series_dict[ts_name] = {
319
+ "labels": timeserie_tags_target,
320
+ "data": {datapoints_timestamp: target_value},
321
+ }
322
+ if "overallQuantiles" in metric_name:
323
+ comparison_type = "(lower-better)"
324
+ else:
325
+ comparison_type = "(higher-better)"
326
+ if comparison_type == "(higher-better)":
327
+ target_value_pct = (
328
+ (float(metric_value) / float(target_value)) - 1.0
329
+ ) * 100.0
330
+ else:
331
+ target_value_pct = (
332
+ (float(target_value) / float(metric_value)) - 1.0
333
+ ) * 100.0
334
+
335
+ target_value_pct_str = "{:.2f}".format(target_value_pct)
336
+
337
+ target_table_dict[target_name] = target_value
338
+
339
+ target_table_dict["{}:percent {}".format(target_name, comparison_type)] = (
340
+ target_value_pct_str
341
+ )
342
+ return target_table_keyname, target_table_dict
343
+
344
+
345
+ def common_timeseries_extraction(
346
+ break_by_key,
347
+ break_by_str,
348
+ datapoints_timestamp,
349
+ deployment_name,
350
+ deployment_type,
351
+ metrics,
352
+ break_by_value,
353
+ results_dict,
354
+ test_name,
355
+ tf_github_org,
356
+ tf_github_repo,
357
+ tf_triggering_env,
358
+ metadata_tags={},
359
+ build_variant_name=None,
360
+ running_platform=None,
361
+ testcase_metric_context_paths=[],
362
+ ):
363
+ time_series_dict = {}
364
+ target_tables = {}
365
+ cleaned_metrics_arr = extract_results_table(metrics, results_dict)
366
+ total_metrics = len(cleaned_metrics_arr)
367
+ logging.info(f"Total of {total_metrics} cleaned metrics: {cleaned_metrics_arr}")
368
+ for cleaned_metric in cleaned_metrics_arr:
369
+
370
+ metric_jsonpath = cleaned_metric[0]
371
+ metric_context_path = cleaned_metric[1]
372
+ metric_name = cleaned_metric[2]
373
+ metric_value = cleaned_metric[3]
374
+ test_case_targets_dict = cleaned_metric[4]
375
+ use_metric_context_path = cleaned_metric[5]
376
+
377
+ target_table_keyname, target_table_dict = from_metric_kv_to_timeserie(
378
+ break_by_key,
379
+ break_by_str,
380
+ break_by_value,
381
+ build_variant_name,
382
+ datapoints_timestamp,
383
+ deployment_name,
384
+ deployment_type,
385
+ metadata_tags,
386
+ metric_context_path,
387
+ metric_jsonpath,
388
+ metric_name,
389
+ metric_value,
390
+ running_platform,
391
+ test_case_targets_dict,
392
+ test_name,
393
+ testcase_metric_context_paths,
394
+ tf_github_org,
395
+ tf_github_repo,
396
+ tf_triggering_env,
397
+ time_series_dict,
398
+ use_metric_context_path,
399
+ )
400
+ target_tables[target_table_keyname] = target_table_dict
401
+
402
+ return time_series_dict, target_tables
403
+
404
+
405
+ def extract_perhash_timeseries_from_results(
406
+ datapoints_timestamp: int,
407
+ metrics: list,
408
+ results_dict: dict,
409
+ git_hash: str,
410
+ tf_github_org: str,
411
+ tf_github_repo: str,
412
+ deployment_name: str,
413
+ deployment_type: str,
414
+ test_name: str,
415
+ tf_triggering_env: str,
416
+ metadata_tags={},
417
+ build_variant_name=None,
418
+ running_platform=None,
419
+ testcase_metric_context_paths=[],
420
+ ):
421
+ break_by_key = "hash"
422
+ break_by_str = "by.{}".format(break_by_key)
423
+ (
424
+ time_series_dict,
425
+ target_tables,
426
+ ) = common_timeseries_extraction(
427
+ break_by_key,
428
+ break_by_str,
429
+ datapoints_timestamp,
430
+ deployment_name,
431
+ deployment_type,
432
+ metrics,
433
+ git_hash,
434
+ results_dict,
435
+ test_name,
436
+ tf_github_org,
437
+ tf_github_repo,
438
+ tf_triggering_env,
439
+ metadata_tags,
440
+ build_variant_name,
441
+ running_platform,
442
+ testcase_metric_context_paths,
443
+ )
444
+ return True, time_series_dict, target_tables
445
+
446
+
447
+ def extract_perversion_timeseries_from_results(
448
+ datapoints_timestamp: int,
449
+ metrics: list,
450
+ results_dict: dict,
451
+ project_version: str,
452
+ tf_github_org: str,
453
+ tf_github_repo: str,
454
+ deployment_name: str,
455
+ deployment_type: str,
456
+ test_name: str,
457
+ tf_triggering_env: str,
458
+ metadata_tags={},
459
+ build_variant_name=None,
460
+ running_platform=None,
461
+ testcase_metric_context_paths=[],
462
+ ):
463
+ break_by_key = "version"
464
+ break_by_str = "by.{}".format(break_by_key)
465
+ (
466
+ branch_time_series_dict,
467
+ target_tables,
468
+ ) = common_timeseries_extraction(
469
+ break_by_key,
470
+ break_by_str,
471
+ datapoints_timestamp,
472
+ deployment_name,
473
+ deployment_type,
474
+ metrics,
475
+ project_version,
476
+ results_dict,
477
+ test_name,
478
+ tf_github_org,
479
+ tf_github_repo,
480
+ tf_triggering_env,
481
+ metadata_tags,
482
+ build_variant_name,
483
+ running_platform,
484
+ testcase_metric_context_paths,
485
+ )
486
+ return True, branch_time_series_dict, target_tables
487
+
488
+
489
+ def push_data_to_redistimeseries(rts, time_series_dict: dict, expire_msecs=0):
490
+ datapoint_errors = 0
491
+ datapoint_inserts = 0
492
+ if rts is not None and time_series_dict is not None:
493
+ progress = tqdm(
494
+ unit="benchmark time-series", total=len(time_series_dict.values())
495
+ )
496
+ for timeseries_name, time_series in time_series_dict.items():
497
+ exporter_create_ts(rts, time_series, timeseries_name)
498
+ for timestamp, value in time_series["data"].items():
499
+ try:
500
+ if timestamp is None:
501
+ logging.warning("The provided timestamp is null. Using auto-ts")
502
+ rts.ts().add(
503
+ timeseries_name,
504
+ value,
505
+ duplicate_policy="last",
506
+ )
507
+ else:
508
+ rts.ts().add(
509
+ timeseries_name,
510
+ timestamp,
511
+ value,
512
+ duplicate_policy="last",
513
+ )
514
+ datapoint_inserts += 1
515
+ except redis.exceptions.DataError:
516
+ logging.warning(
517
+ "Error while inserting datapoint ({} : {}) in timeseries named {}. ".format(
518
+ timestamp, value, timeseries_name
519
+ )
520
+ )
521
+ datapoint_errors += 1
522
+ pass
523
+ except redis.exceptions.ResponseError:
524
+ logging.warning(
525
+ "Error while inserting datapoint ({} : {}) in timeseries named {}. ".format(
526
+ timestamp, value, timeseries_name
527
+ )
528
+ )
529
+ datapoint_errors += 1
530
+ pass
531
+ if expire_msecs > 0:
532
+ rts.pexpire(timeseries_name, expire_msecs)
533
+ progress.update()
534
+ return datapoint_errors, datapoint_inserts
535
+
536
+
537
+ def extract_perbranch_timeseries_from_results(
538
+ datapoints_timestamp: int,
539
+ metrics: list,
540
+ results_dict: dict,
541
+ tf_github_branch: str,
542
+ tf_github_org: str,
543
+ tf_github_repo: str,
544
+ deployment_name: str,
545
+ deployment_type: str,
546
+ test_name: str,
547
+ tf_triggering_env: str,
548
+ metadata_tags={},
549
+ build_variant_name=None,
550
+ running_platform=None,
551
+ testcase_metric_context_paths=[],
552
+ ):
553
+ break_by_key = "branch"
554
+ break_by_str = "by.{}".format(break_by_key)
555
+ (branch_time_series_dict, target_tables) = common_timeseries_extraction(
556
+ break_by_key,
557
+ break_by_str,
558
+ datapoints_timestamp,
559
+ deployment_name,
560
+ deployment_type,
561
+ metrics,
562
+ tf_github_branch,
563
+ results_dict,
564
+ test_name,
565
+ tf_github_org,
566
+ tf_github_repo,
567
+ tf_triggering_env,
568
+ metadata_tags,
569
+ build_variant_name,
570
+ running_platform,
571
+ testcase_metric_context_paths,
572
+ )
573
+ return True, branch_time_series_dict, target_tables
574
+
575
+
576
+ def check_rts_labels(rts, time_series, timeseries_name):
577
+ updated_create = False
578
+ logging.debug(
579
+ "Timeseries named {} already exists. Checking that the labels match.".format(
580
+ timeseries_name
581
+ )
582
+ )
583
+ set1 = set(time_series["labels"].items())
584
+ set2 = set(rts.ts().info(timeseries_name).labels.items())
585
+ if len(set1 - set2) > 0 or len(set2 - set1) > 0:
586
+ logging.info(
587
+ "Given the labels don't match using TS.ALTER on {} to update labels to {}".format(
588
+ timeseries_name, time_series["labels"]
589
+ )
590
+ )
591
+ updated_create = True
592
+ rts.ts().alter(timeseries_name, labels=time_series["labels"])
593
+ return updated_create
594
+
595
+
596
+ def exporter_create_ts(rts, time_series, timeseries_name):
597
+ updated_create = False
598
+ try:
599
+ if rts.exists(timeseries_name):
600
+ updated_create = check_rts_labels(rts, time_series, timeseries_name)
601
+ else:
602
+ logging.debug(
603
+ "Creating timeseries named {} with labels {}".format(
604
+ timeseries_name, time_series["labels"]
605
+ )
606
+ )
607
+ rts.ts().create(
608
+ timeseries_name, labels=time_series["labels"], chunk_size=128
609
+ )
610
+ updated_create = True
611
+
612
+ except redis.exceptions.ResponseError as e:
613
+ if "already exists" in e.__str__():
614
+ updated_create = check_rts_labels(rts, time_series, timeseries_name)
615
+ pass
616
+ else:
617
+ logging.error(
618
+ "While creating timeseries named {} with the following labels: {} this error ocurred: {}".format(
619
+ timeseries_name, time_series["labels"], e.__str__()
620
+ )
621
+ )
622
+ raise
623
+ return updated_create
624
+
625
+
626
+ def get_overall_dashboard_keynames(
627
+ tf_github_org,
628
+ tf_github_repo,
629
+ tf_triggering_env,
630
+ build_variant_name=None,
631
+ running_platform=None,
632
+ test_name=None,
633
+ ):
634
+ build_variant_str = ""
635
+ if build_variant_name is not None:
636
+ build_variant_str = "/{}".format(build_variant_name)
637
+ running_platform_str = ""
638
+ if running_platform is not None:
639
+ running_platform_str = "/{}".format(running_platform)
640
+ sprefix = (
641
+ "ci.benchmarks.redislabs/"
642
+ + "{triggering_env}/{github_org}/{github_repo}".format(
643
+ triggering_env=tf_triggering_env,
644
+ github_org=tf_github_org,
645
+ github_repo=tf_github_repo,
646
+ )
647
+ )
648
+ testcases_setname = "{}:testcases".format(sprefix)
649
+ deployment_name_setname = "{}:deployment_names".format(sprefix)
650
+ project_archs_setname = "{}:archs".format(sprefix)
651
+ project_oss_setname = "{}:oss".format(sprefix)
652
+ project_branches_setname = "{}:branches".format(sprefix)
653
+ project_versions_setname = "{}:versions".format(sprefix)
654
+ project_compilers_setname = "{}:compilers".format(sprefix)
655
+ running_platforms_setname = "{}:platforms".format(sprefix)
656
+ build_variant_setname = "{}:build_variants".format(sprefix)
657
+ build_variant_prefix = "{sprefix}{build_variant_str}".format(
658
+ sprefix=sprefix,
659
+ build_variant_str=build_variant_str,
660
+ )
661
+ prefix = "{build_variant_prefix}{running_platform_str}".format(
662
+ build_variant_prefix=build_variant_prefix,
663
+ running_platform_str=running_platform_str,
664
+ )
665
+ tsname_project_total_success = "{}:total_success".format(
666
+ prefix,
667
+ )
668
+ tsname_project_total_failures = "{}:total_failures".format(
669
+ prefix,
670
+ )
671
+ testcases_metric_context_path_setname = ""
672
+ if test_name is not None:
673
+ testcases_metric_context_path_setname = (
674
+ "{testcases_setname}:metric_context_path:{test_name}".format(
675
+ testcases_setname=testcases_setname, test_name=test_name
676
+ )
677
+ )
678
+ testcases_and_metric_context_path_setname = (
679
+ "{testcases_setname}_AND_metric_context_path".format(
680
+ testcases_setname=testcases_setname
681
+ )
682
+ )
683
+ return (
684
+ prefix,
685
+ testcases_setname,
686
+ deployment_name_setname,
687
+ tsname_project_total_failures,
688
+ tsname_project_total_success,
689
+ running_platforms_setname,
690
+ build_variant_setname,
691
+ testcases_metric_context_path_setname,
692
+ testcases_and_metric_context_path_setname,
693
+ project_archs_setname,
694
+ project_oss_setname,
695
+ project_branches_setname,
696
+ project_versions_setname,
697
+ project_compilers_setname,
698
+ )
699
+
700
+
701
+ def get_project_ts_tags(
702
+ tf_github_org: str,
703
+ tf_github_repo: str,
704
+ deployment_name: str,
705
+ deployment_type: str,
706
+ tf_triggering_env: str,
707
+ metadata_tags={},
708
+ build_variant_name=None,
709
+ running_platform=None,
710
+ ):
711
+ tags = {
712
+ "github_org": tf_github_org,
713
+ "github_repo": tf_github_repo,
714
+ "github_org/github_repo": "{}/{}".format(tf_github_org, tf_github_repo),
715
+ "deployment_type": deployment_type,
716
+ "deployment_name": deployment_name,
717
+ "triggering_env": tf_triggering_env,
718
+ }
719
+ if build_variant_name is not None:
720
+ tags["build_variant"] = build_variant_name
721
+ if running_platform is not None:
722
+ tags["running_platform"] = running_platform
723
+ for k, v in metadata_tags.items():
724
+ tags[k] = str(v)
725
+ return tags
726
+
727
+
728
+ def common_exporter_logic(
729
+ deployment_name,
730
+ deployment_type,
731
+ exporter_timemetric_path,
732
+ metrics,
733
+ results_dict,
734
+ test_name,
735
+ tf_github_branch,
736
+ tf_github_org,
737
+ tf_github_repo,
738
+ tf_triggering_env,
739
+ artifact_version="N/A",
740
+ metadata_tags={},
741
+ build_variant_name=None,
742
+ running_platform=None,
743
+ datapoints_timestamp=None,
744
+ git_hash=None,
745
+ ):
746
+ per_version_time_series_dict = {}
747
+ per_branch_time_series_dict = {}
748
+ per_hash_time_series_dict = {}
749
+ testcase_metric_context_paths = []
750
+ version_target_tables = None
751
+ branch_target_tables = None
752
+ hash_target_tables = None
753
+ used_ts = datapoints_timestamp
754
+
755
+ if exporter_timemetric_path is not None and used_ts is None:
756
+ # extract timestamp
757
+ used_ts = parse_exporter_timemetric(exporter_timemetric_path, results_dict)
758
+
759
+ if used_ts is None:
760
+ used_ts = int(datetime.datetime.now(datetime.timezone.utc).timestamp() * 1000.0)
761
+ logging.warning(
762
+ "Error while trying to parse datapoints timestamp. Using current system timestamp Error: {}".format(
763
+ used_ts
764
+ )
765
+ )
766
+ assert used_ts is not None
767
+ total_break_by_added = 0
768
+ if (git_hash is not None) and (git_hash != ""):
769
+ # extract per-hash datapoints
770
+ (
771
+ _,
772
+ per_hash_time_series_dict,
773
+ version_target_tables,
774
+ ) = extract_perhash_timeseries_from_results(
775
+ used_ts,
776
+ metrics,
777
+ results_dict,
778
+ git_hash,
779
+ tf_github_org,
780
+ tf_github_repo,
781
+ deployment_name,
782
+ deployment_type,
783
+ test_name,
784
+ tf_triggering_env,
785
+ metadata_tags,
786
+ build_variant_name,
787
+ running_platform,
788
+ testcase_metric_context_paths,
789
+ )
790
+ total_break_by_added += 1
791
+ else:
792
+ logging.warning(
793
+ "there was no git hash information to push data brokedown by hash"
794
+ )
795
+ if (
796
+ artifact_version is not None
797
+ and artifact_version != ""
798
+ and artifact_version != "N/A"
799
+ ):
800
+ # extract per-version datapoints
801
+ total_hs_ts = len(per_hash_time_series_dict.keys())
802
+ logging.info(
803
+ f"Extending the by.hash {git_hash} timeseries ({total_hs_ts}) with version info {artifact_version}"
804
+ )
805
+ for hash_timeserie in per_hash_time_series_dict.values():
806
+ hash_timeserie["labels"]["version"] = artifact_version
807
+ (
808
+ _,
809
+ per_version_time_series_dict,
810
+ version_target_tables,
811
+ ) = extract_perversion_timeseries_from_results(
812
+ used_ts,
813
+ metrics,
814
+ results_dict,
815
+ artifact_version,
816
+ tf_github_org,
817
+ tf_github_repo,
818
+ deployment_name,
819
+ deployment_type,
820
+ test_name,
821
+ tf_triggering_env,
822
+ metadata_tags,
823
+ build_variant_name,
824
+ running_platform,
825
+ testcase_metric_context_paths,
826
+ )
827
+ total_break_by_added += 1
828
+ else:
829
+ logging.warning(
830
+ "there was no git VERSION information to push data brokedown by VERSION"
831
+ )
832
+ if tf_github_branch is not None and tf_github_branch != "":
833
+ total_hs_ts = len(per_hash_time_series_dict.keys())
834
+ logging.info(
835
+ f"Extending the by.hash {git_hash} timeseries ({total_hs_ts}) with branch info {tf_github_branch}"
836
+ )
837
+ for hash_timeserie in per_hash_time_series_dict.values():
838
+ hash_timeserie["labels"]["branch"] = tf_github_branch
839
+ # extract per branch datapoints
840
+ (
841
+ _,
842
+ per_branch_time_series_dict,
843
+ branch_target_tables,
844
+ ) = extract_perbranch_timeseries_from_results(
845
+ used_ts,
846
+ metrics,
847
+ results_dict,
848
+ str(tf_github_branch),
849
+ tf_github_org,
850
+ tf_github_repo,
851
+ deployment_name,
852
+ deployment_type,
853
+ test_name,
854
+ tf_triggering_env,
855
+ metadata_tags,
856
+ build_variant_name,
857
+ running_platform,
858
+ testcase_metric_context_paths,
859
+ )
860
+ total_break_by_added += 1
861
+ else:
862
+ logging.warning(
863
+ "there was no git BRANCH information to push data brokedown by BRANCH"
864
+ )
865
+ if total_break_by_added == 0:
866
+ logging.error(
867
+ "There was no BRANCH, HASH, or VERSION info to break this info by in timeseries"
868
+ )
869
+ return (
870
+ per_version_time_series_dict,
871
+ per_branch_time_series_dict,
872
+ per_hash_time_series_dict,
873
+ testcase_metric_context_paths,
874
+ version_target_tables,
875
+ branch_target_tables,
876
+ hash_target_tables,
877
+ )
878
+
879
+
880
+ def merge_default_and_config_metrics(
881
+ benchmark_config, default_metrics, exporter_timemetric_path
882
+ ):
883
+ if default_metrics is None:
884
+ default_metrics = []
885
+ metrics = default_metrics
886
+ if benchmark_config is not None:
887
+ if "exporter" in benchmark_config:
888
+ extra_metrics = parse_exporter_metrics_definition(
889
+ benchmark_config["exporter"]
890
+ )
891
+ metrics.extend(extra_metrics)
892
+ extra_timemetric_path = parse_exporter_timemetric_definition(
893
+ benchmark_config["exporter"]
894
+ )
895
+ if extra_timemetric_path is not None:
896
+ exporter_timemetric_path = extra_timemetric_path
897
+ return exporter_timemetric_path, metrics
898
+
899
+
900
+ def get_profilers_rts_key_prefix(triggering_env, tf_github_org, tf_github_repo):
901
+ zset_name = "ci.benchmarks.redis.com/{triggering_env}/{github_org}/{github_repo}:profiles".format(
902
+ triggering_env=triggering_env,
903
+ github_org=tf_github_org,
904
+ github_repo=tf_github_repo,
905
+ )
906
+ return zset_name
907
+
908
+
909
+ def prepare_timeseries_dict(
910
+ artifact_version,
911
+ benchmark_config,
912
+ default_metrics,
913
+ deployment_name,
914
+ deployment_type,
915
+ exporter_timemetric_path,
916
+ results_dict,
917
+ test_name,
918
+ tf_github_branch,
919
+ tf_github_org,
920
+ tf_github_repo,
921
+ tf_triggering_env,
922
+ metadata_tags={},
923
+ build_variant_name=None,
924
+ running_platform=None,
925
+ datapoints_timestamp=None,
926
+ git_hash=None,
927
+ ):
928
+ time_series_dict = {}
929
+ # check which metrics to extract
930
+ exporter_timemetric_path, metrics = merge_default_and_config_metrics(
931
+ benchmark_config, default_metrics, exporter_timemetric_path
932
+ )
933
+ (
934
+ per_version_time_series_dict,
935
+ per_branch_time_series_dict,
936
+ per_hash_timeseries_dict,
937
+ testcase_metric_context_paths,
938
+ version_target_tables,
939
+ branch_target_tables,
940
+ _,
941
+ ) = common_exporter_logic(
942
+ deployment_name,
943
+ deployment_type,
944
+ exporter_timemetric_path,
945
+ metrics,
946
+ results_dict,
947
+ test_name,
948
+ tf_github_branch,
949
+ tf_github_org,
950
+ tf_github_repo,
951
+ tf_triggering_env,
952
+ artifact_version,
953
+ metadata_tags,
954
+ build_variant_name,
955
+ running_platform,
956
+ datapoints_timestamp,
957
+ git_hash,
958
+ )
959
+ time_series_dict.update(per_version_time_series_dict)
960
+ time_series_dict.update(per_branch_time_series_dict)
961
+ time_series_dict.update(per_hash_timeseries_dict)
962
+ return (
963
+ time_series_dict,
964
+ testcase_metric_context_paths,
965
+ version_target_tables,
966
+ branch_target_tables,
967
+ )
968
+
969
+
970
+ def add_standardized_metric_bybranch(
971
+ metric_name,
972
+ metric_value,
973
+ tf_github_branch,
974
+ deployment_name,
975
+ deployment_type,
976
+ rts,
977
+ start_time_ms,
978
+ test_name,
979
+ tf_github_org,
980
+ tf_github_repo,
981
+ tf_triggering_env,
982
+ metadata_tags={},
983
+ build_variant_name=None,
984
+ running_platform=None,
985
+ ):
986
+ if metric_value is not None:
987
+ tsname_use_case_duration = get_ts_metric_name(
988
+ "by.branch",
989
+ tf_github_branch,
990
+ tf_github_org,
991
+ tf_github_repo,
992
+ deployment_name,
993
+ deployment_type,
994
+ test_name,
995
+ tf_triggering_env,
996
+ metric_name,
997
+ None,
998
+ False,
999
+ build_variant_name,
1000
+ running_platform,
1001
+ )
1002
+ labels = get_project_ts_tags(
1003
+ tf_github_org,
1004
+ tf_github_repo,
1005
+ deployment_name,
1006
+ deployment_type,
1007
+ tf_triggering_env,
1008
+ metadata_tags,
1009
+ build_variant_name,
1010
+ running_platform,
1011
+ )
1012
+ labels["branch"] = tf_github_branch
1013
+ labels["deployment_name+branch"] = "{} {}".format(
1014
+ deployment_name, tf_github_branch
1015
+ )
1016
+ labels["test_name"] = str(test_name)
1017
+ labels["metric"] = str(metric_name)
1018
+ logging.info(
1019
+ "Adding metric {}={} to time-serie named {}".format(
1020
+ metric_name, metric_value, tsname_use_case_duration
1021
+ )
1022
+ )
1023
+ ts = {"labels": labels}
1024
+ exporter_create_ts(rts, ts, tsname_use_case_duration)
1025
+ logging.error(labels)
1026
+ rts.ts().add(
1027
+ tsname_use_case_duration,
1028
+ start_time_ms,
1029
+ metric_value,
1030
+ labels=labels,
1031
+ )
1032
+ else:
1033
+ logging.warning(
1034
+ "Given that metric {}={} ( is None ) we will skip adding it to timeseries".format(
1035
+ metric_name, metric_value
1036
+ )
1037
+ )
1038
+
1039
+
1040
+ def add_standardized_metric_byversion(
1041
+ metric_name,
1042
+ metric_value,
1043
+ artifact_version,
1044
+ deployment_name,
1045
+ deployment_type,
1046
+ rts,
1047
+ start_time_ms,
1048
+ test_name,
1049
+ tf_github_org,
1050
+ tf_github_repo,
1051
+ tf_triggering_env,
1052
+ metadata_tags={},
1053
+ build_variant_name=None,
1054
+ running_platform=None,
1055
+ ):
1056
+ if metric_value is not None:
1057
+ tsname_use_case_duration = get_ts_metric_name(
1058
+ "by.version",
1059
+ artifact_version,
1060
+ tf_github_org,
1061
+ tf_github_repo,
1062
+ deployment_name,
1063
+ deployment_type,
1064
+ test_name,
1065
+ tf_triggering_env,
1066
+ metric_name,
1067
+ None,
1068
+ False,
1069
+ build_variant_name,
1070
+ running_platform,
1071
+ )
1072
+ labels = get_project_ts_tags(
1073
+ tf_github_org,
1074
+ tf_github_repo,
1075
+ deployment_name,
1076
+ deployment_type,
1077
+ tf_triggering_env,
1078
+ metadata_tags,
1079
+ build_variant_name,
1080
+ )
1081
+ labels["version"] = artifact_version
1082
+ labels["deployment_name+version"] = "{} {}".format(
1083
+ deployment_name, artifact_version
1084
+ )
1085
+ labels["test_name"] = str(test_name)
1086
+ labels["metric"] = str(metric_name)
1087
+ logging.info(
1088
+ "Adding metric {}={} to time-serie named {}".format(
1089
+ metric_name, metric_value, tsname_use_case_duration
1090
+ )
1091
+ )
1092
+ ts = {"labels": labels}
1093
+ exporter_create_ts(rts, ts, tsname_use_case_duration)
1094
+ rts.ts().add(
1095
+ tsname_use_case_duration,
1096
+ start_time_ms,
1097
+ metric_value,
1098
+ labels=labels,
1099
+ )
1100
+ else:
1101
+ logging.warning(
1102
+ "Given that metric {}={} ( is None ) we will skip adding it to timeseries".format(
1103
+ metric_name, metric_value
1104
+ )
1105
+ )
1106
+
1107
+
1108
+ def timeseries_test_sucess_flow(
1109
+ push_results_redistimeseries,
1110
+ artifact_version,
1111
+ benchmark_config,
1112
+ benchmark_duration_seconds,
1113
+ dataset_load_duration_seconds,
1114
+ default_metrics,
1115
+ deployment_name,
1116
+ deployment_type,
1117
+ exporter_timemetric_path,
1118
+ results_dict,
1119
+ rts,
1120
+ start_time_ms,
1121
+ test_name,
1122
+ tf_github_branch,
1123
+ tf_github_org,
1124
+ tf_github_repo,
1125
+ tf_triggering_env,
1126
+ metadata_tags={},
1127
+ build_variant_name=None,
1128
+ running_platform=None,
1129
+ timeseries_dict=None,
1130
+ git_hash=None,
1131
+ ):
1132
+ testcase_metric_context_paths = []
1133
+ version_target_tables = None
1134
+ branch_target_tables = None
1135
+
1136
+ if timeseries_dict is None:
1137
+ (
1138
+ timeseries_dict,
1139
+ testcase_metric_context_paths,
1140
+ version_target_tables,
1141
+ branch_target_tables,
1142
+ ) = prepare_timeseries_dict(
1143
+ artifact_version,
1144
+ benchmark_config,
1145
+ default_metrics,
1146
+ deployment_name,
1147
+ deployment_type,
1148
+ exporter_timemetric_path,
1149
+ results_dict,
1150
+ test_name,
1151
+ tf_github_branch,
1152
+ tf_github_org,
1153
+ tf_github_repo,
1154
+ tf_triggering_env,
1155
+ metadata_tags,
1156
+ build_variant_name,
1157
+ running_platform,
1158
+ start_time_ms,
1159
+ git_hash,
1160
+ )
1161
+ if push_results_redistimeseries:
1162
+ logging.info(
1163
+ "Pushing results to RedisTimeSeries. Have {} distinct data-points to insert.".format(
1164
+ len(timeseries_dict.keys())
1165
+ )
1166
+ )
1167
+ push_data_to_redistimeseries(rts, timeseries_dict)
1168
+ if version_target_tables is not None:
1169
+ logging.info(
1170
+ "There are a total of {} distinct target tables by version".format(
1171
+ len(version_target_tables.keys())
1172
+ )
1173
+ )
1174
+ for (
1175
+ version_target_table_keyname,
1176
+ version_target_table_dict,
1177
+ ) in version_target_tables.items():
1178
+ logging.info(
1179
+ "Setting target table by version on key {}".format(
1180
+ version_target_table_keyname
1181
+ )
1182
+ )
1183
+ if "contains-target" in version_target_table_dict:
1184
+ del version_target_table_dict["contains-target"]
1185
+ rts.hset(
1186
+ version_target_table_keyname, None, None, version_target_table_dict
1187
+ )
1188
+ if branch_target_tables is not None:
1189
+ logging.info(
1190
+ "There are a total of {} distinct target tables by branch".format(
1191
+ len(branch_target_tables.keys())
1192
+ )
1193
+ )
1194
+ for (
1195
+ branch_target_table_keyname,
1196
+ branch_target_table_dict,
1197
+ ) in branch_target_tables.items():
1198
+
1199
+ logging.info(
1200
+ "Setting target table by branch on key {}".format(
1201
+ branch_target_table_keyname
1202
+ )
1203
+ )
1204
+ if "contains-target" in branch_target_table_dict:
1205
+ del branch_target_table_dict["contains-target"]
1206
+ rts.hset(
1207
+ branch_target_table_keyname, None, None, branch_target_table_dict
1208
+ )
1209
+ if test_name is not None:
1210
+ if type(test_name) is str:
1211
+ update_secondary_result_keys(
1212
+ artifact_version,
1213
+ benchmark_duration_seconds,
1214
+ build_variant_name,
1215
+ dataset_load_duration_seconds,
1216
+ deployment_name,
1217
+ deployment_type,
1218
+ metadata_tags,
1219
+ rts,
1220
+ running_platform,
1221
+ start_time_ms,
1222
+ test_name,
1223
+ testcase_metric_context_paths,
1224
+ tf_github_branch,
1225
+ tf_github_org,
1226
+ tf_github_repo,
1227
+ tf_triggering_env,
1228
+ )
1229
+ if type(test_name) is list:
1230
+ for inner_test_name in test_name:
1231
+ update_secondary_result_keys(
1232
+ artifact_version,
1233
+ benchmark_duration_seconds,
1234
+ build_variant_name,
1235
+ dataset_load_duration_seconds,
1236
+ deployment_name,
1237
+ deployment_type,
1238
+ metadata_tags,
1239
+ rts,
1240
+ running_platform,
1241
+ start_time_ms,
1242
+ inner_test_name,
1243
+ testcase_metric_context_paths,
1244
+ tf_github_branch,
1245
+ tf_github_org,
1246
+ tf_github_repo,
1247
+ tf_triggering_env,
1248
+ )
1249
+ else:
1250
+ update_secondary_result_keys(
1251
+ artifact_version,
1252
+ benchmark_duration_seconds,
1253
+ build_variant_name,
1254
+ dataset_load_duration_seconds,
1255
+ deployment_name,
1256
+ deployment_type,
1257
+ metadata_tags,
1258
+ rts,
1259
+ running_platform,
1260
+ start_time_ms,
1261
+ test_name,
1262
+ testcase_metric_context_paths,
1263
+ tf_github_branch,
1264
+ tf_github_org,
1265
+ tf_github_repo,
1266
+ tf_triggering_env,
1267
+ )
1268
+ return version_target_tables, branch_target_tables
1269
+
1270
+
1271
+ def update_secondary_result_keys(
1272
+ artifact_version,
1273
+ benchmark_duration_seconds,
1274
+ build_variant_name,
1275
+ dataset_load_duration_seconds,
1276
+ deployment_name,
1277
+ deployment_type,
1278
+ metadata_tags,
1279
+ rts,
1280
+ running_platform,
1281
+ start_time_ms,
1282
+ test_name,
1283
+ testcase_metric_context_paths,
1284
+ tf_github_branch,
1285
+ tf_github_org,
1286
+ tf_github_repo,
1287
+ tf_triggering_env,
1288
+ ):
1289
+ (
1290
+ _,
1291
+ testcases_setname,
1292
+ deployment_name_zsetname,
1293
+ _,
1294
+ tsname_project_total_success,
1295
+ running_platforms_setname,
1296
+ build_variant_setname,
1297
+ testcases_metric_context_path_setname,
1298
+ testcases_and_metric_context_path_setname,
1299
+ project_archs_setname,
1300
+ project_oss_setname,
1301
+ project_branches_setname,
1302
+ project_versions_setname,
1303
+ project_compilers_setname,
1304
+ ) = get_overall_dashboard_keynames(
1305
+ tf_github_org,
1306
+ tf_github_repo,
1307
+ tf_triggering_env,
1308
+ build_variant_name,
1309
+ running_platform,
1310
+ test_name,
1311
+ )
1312
+ try:
1313
+ rts.zadd(deployment_name_zsetname, {deployment_name: start_time_ms})
1314
+ if test_name is not None:
1315
+ deployment_name_zsetname_testnames = (
1316
+ deployment_name_zsetname
1317
+ + "{}:deployment_name={}".format(
1318
+ deployment_name_zsetname, deployment_name
1319
+ )
1320
+ )
1321
+ rts.zadd(deployment_name_zsetname_testnames, {test_name: start_time_ms})
1322
+ rts.sadd(testcases_setname, test_name)
1323
+ testcases_zsetname = testcases_setname + ":zset"
1324
+ rts.zadd(testcases_zsetname, {test_name: start_time_ms})
1325
+ if "component" in metadata_tags:
1326
+ testcases_zsetname_component = "{}:zset:component:{}".format(
1327
+ testcases_setname, metadata_tags["component"]
1328
+ )
1329
+ rts.zadd(testcases_zsetname_component, {test_name: start_time_ms})
1330
+ if "arch" in metadata_tags:
1331
+ rts.sadd(project_archs_setname, metadata_tags["arch"])
1332
+ if "os" in metadata_tags:
1333
+ rts.sadd(project_oss_setname, metadata_tags["os"])
1334
+ if "compiler" in metadata_tags:
1335
+ rts.sadd(project_compilers_setname, metadata_tags["compiler"])
1336
+ if tf_github_branch is not None and tf_github_branch != "":
1337
+ rts.sadd(project_branches_setname, tf_github_branch)
1338
+ project_branches_zsetname = project_branches_setname + ":zset"
1339
+ rts.zadd(project_branches_zsetname, {tf_github_branch: start_time_ms})
1340
+ if artifact_version is not None and artifact_version != "":
1341
+ rts.sadd(project_versions_setname, artifact_version)
1342
+ project_versions_zsetname = project_versions_setname + ":zset"
1343
+ rts.zadd(project_versions_zsetname, {artifact_version: start_time_ms})
1344
+ if running_platform is not None:
1345
+ rts.sadd(running_platforms_setname, running_platform)
1346
+ running_platforms_szetname = running_platforms_setname + ":zset"
1347
+ rts.zadd(running_platforms_szetname, {running_platform: start_time_ms})
1348
+ if build_variant_name is not None:
1349
+ rts.sadd(build_variant_setname, build_variant_name)
1350
+ build_variant_zsetname = build_variant_setname + ":zset"
1351
+ rts.zadd(build_variant_zsetname, {build_variant_name: start_time_ms})
1352
+ if testcase_metric_context_paths is not None:
1353
+ for metric_context_path in testcase_metric_context_paths:
1354
+ if testcases_metric_context_path_setname != "":
1355
+ rts.sadd(testcases_metric_context_path_setname, metric_context_path)
1356
+ rts.sadd(
1357
+ testcases_and_metric_context_path_setname,
1358
+ "{}:{}".format(test_name, metric_context_path),
1359
+ )
1360
+ rts.ts().incrby(
1361
+ tsname_project_total_success,
1362
+ 1,
1363
+ timestamp=start_time_ms,
1364
+ labels=get_project_ts_tags(
1365
+ tf_github_org,
1366
+ tf_github_repo,
1367
+ deployment_name,
1368
+ deployment_type,
1369
+ tf_triggering_env,
1370
+ metadata_tags,
1371
+ build_variant_name,
1372
+ running_platform,
1373
+ ),
1374
+ )
1375
+ if tf_github_branch is not None and tf_github_branch != "":
1376
+ add_standardized_metric_bybranch(
1377
+ "benchmark_duration",
1378
+ benchmark_duration_seconds,
1379
+ str(tf_github_branch),
1380
+ deployment_name,
1381
+ deployment_type,
1382
+ rts,
1383
+ start_time_ms,
1384
+ test_name,
1385
+ tf_github_org,
1386
+ tf_github_repo,
1387
+ tf_triggering_env,
1388
+ metadata_tags,
1389
+ build_variant_name,
1390
+ running_platform,
1391
+ )
1392
+ add_standardized_metric_bybranch(
1393
+ "dataset_load_duration",
1394
+ dataset_load_duration_seconds,
1395
+ str(tf_github_branch),
1396
+ deployment_name,
1397
+ deployment_type,
1398
+ rts,
1399
+ start_time_ms,
1400
+ test_name,
1401
+ tf_github_org,
1402
+ tf_github_repo,
1403
+ tf_triggering_env,
1404
+ metadata_tags,
1405
+ build_variant_name,
1406
+ running_platform,
1407
+ )
1408
+ if artifact_version is not None and artifact_version != "":
1409
+ add_standardized_metric_byversion(
1410
+ "benchmark_duration",
1411
+ benchmark_duration_seconds,
1412
+ artifact_version,
1413
+ deployment_name,
1414
+ deployment_type,
1415
+ rts,
1416
+ start_time_ms,
1417
+ test_name,
1418
+ tf_github_org,
1419
+ tf_github_repo,
1420
+ tf_triggering_env,
1421
+ metadata_tags,
1422
+ build_variant_name,
1423
+ running_platform,
1424
+ )
1425
+ add_standardized_metric_byversion(
1426
+ "dataset_load_duration",
1427
+ dataset_load_duration_seconds,
1428
+ artifact_version,
1429
+ deployment_name,
1430
+ deployment_type,
1431
+ rts,
1432
+ start_time_ms,
1433
+ test_name,
1434
+ tf_github_org,
1435
+ tf_github_repo,
1436
+ tf_triggering_env,
1437
+ metadata_tags,
1438
+ build_variant_name,
1439
+ running_platform,
1440
+ )
1441
+ except redis.exceptions.ResponseError as e:
1442
+ logging.warning(
1443
+ "Error while updating secondary data structures {}. ".format(e.__str__())
1444
+ )
1445
+ pass
1446
+
1447
+
1448
+ def get_start_time_vars(start_time=None):
1449
+ if start_time is None:
1450
+ start_time = datetime.datetime.utcnow()
1451
+ start_time_ms = int(
1452
+ (start_time - datetime.datetime(1970, 1, 1)).total_seconds() * 1000
1453
+ )
1454
+ start_time_str = start_time.strftime("%Y-%m-%d-%H-%M-%S")
1455
+ return start_time, start_time_ms, start_time_str
1456
+
1457
+
1458
+ def timeseries_test_failure_flow(
1459
+ args,
1460
+ deployment_name,
1461
+ deployment_type,
1462
+ rts,
1463
+ start_time_ms,
1464
+ tf_github_org,
1465
+ tf_github_repo,
1466
+ tf_triggering_env,
1467
+ tsname_project_total_failures,
1468
+ ):
1469
+ if args.push_results_redistimeseries:
1470
+ if start_time_ms is None:
1471
+ _, start_time_ms, _ = get_start_time_vars()
1472
+ try:
1473
+ rts.ts().incrby(
1474
+ tsname_project_total_failures,
1475
+ 1,
1476
+ timestamp=start_time_ms,
1477
+ labels=get_project_ts_tags(
1478
+ tf_github_org,
1479
+ tf_github_repo,
1480
+ deployment_name,
1481
+ deployment_type,
1482
+ tf_triggering_env,
1483
+ ),
1484
+ )
1485
+ except redis.exceptions.ResponseError as e:
1486
+ logging.warning(
1487
+ "Error while updating secondary data structures {}. ".format(
1488
+ e.__str__()
1489
+ )
1490
+ )
1491
+ pass
1492
+
1493
+
1494
+ def datasink_profile_tabular_data(
1495
+ github_branch,
1496
+ github_org_name,
1497
+ github_repo_name,
1498
+ github_sha,
1499
+ overall_tabular_data_map,
1500
+ rts,
1501
+ setup_type,
1502
+ start_time_ms,
1503
+ start_time_str,
1504
+ test_name,
1505
+ tf_triggering_env,
1506
+ ):
1507
+ zset_profiles_key_name = get_profilers_rts_key_prefix(
1508
+ tf_triggering_env,
1509
+ github_org_name,
1510
+ github_repo_name,
1511
+ )
1512
+ profile_test_suffix = "{start_time_str}:{test_name}/{setup_type}/{github_branch}/{github_hash}".format(
1513
+ start_time_str=start_time_str,
1514
+ test_name=test_name,
1515
+ setup_type=setup_type,
1516
+ github_branch=github_branch,
1517
+ github_hash=github_sha,
1518
+ )
1519
+ rts.zadd(
1520
+ zset_profiles_key_name,
1521
+ {profile_test_suffix: start_time_ms},
1522
+ )
1523
+ for (
1524
+ profile_tabular_type,
1525
+ tabular_data,
1526
+ ) in overall_tabular_data_map.items():
1527
+ tabular_suffix = "{}:{}".format(profile_tabular_type, profile_test_suffix)
1528
+ logging.info(
1529
+ "Pushing to data-sink tabular data from pprof ({}). Tabular suffix: {}".format(
1530
+ profile_tabular_type, tabular_suffix
1531
+ )
1532
+ )
1533
+
1534
+ table_columns_text_key = "{}:columns:text".format(tabular_suffix)
1535
+ table_columns_type_key = "{}:columns:type".format(tabular_suffix)
1536
+ logging.info(
1537
+ "Pushing list key (named {}) the following column text: {}".format(
1538
+ table_columns_text_key, tabular_data["columns:text"]
1539
+ )
1540
+ )
1541
+ rts.rpush(table_columns_text_key, *tabular_data["columns:text"])
1542
+ logging.info(
1543
+ "Pushing list key (named {}) the following column types: {}".format(
1544
+ table_columns_type_key, tabular_data["columns:type"]
1545
+ )
1546
+ )
1547
+ rts.rpush(table_columns_type_key, *tabular_data["columns:type"])
1548
+ for row_name in tabular_data["columns:text"]:
1549
+ table_row_key = "{}:rows:{}".format(tabular_suffix, row_name)
1550
+ row_values = tabular_data["rows:{}".format(row_name)]
1551
+ rts.rpush(table_row_key, *row_values)