locust-cloud 1.12.4__py3-none-any.whl → 1.14.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,321 +0,0 @@
1
- from typing import LiteralString
2
-
3
- requests_query = """
4
- SELECT
5
- name,
6
- request_type as method,
7
- SUM(count) as requests,
8
- SUM(failed_count) as failed,
9
- MAX(max),
10
- SUM(failed_count) / SUM(count) * 100 as "errorPercentage"
11
- FROM requests_summary_view
12
- WHERE bucket BETWEEN %(start)s AND %(end)s
13
- AND run_id = %(testrun)s
14
- GROUP BY name, method
15
- """
16
-
17
-
18
- failures_query = """
19
- SELECT
20
- name as name,
21
- exception,
22
- count(*)
23
- FROM requests_view
24
- WHERE time BETWEEN %(start)s AND %(end)s AND
25
- success = 0
26
- AND run_id = %(testrun)s
27
- GROUP BY "name",exception
28
- """
29
-
30
-
31
- requests_per_second = """
32
- WITH request_count_agg AS (
33
- SELECT
34
- time_bucket_gapfill(%(resolution)s * interval '1 second', bucket) AS time,
35
- COALESCE(SUM(count)/%(resolution)s, 0) as rps
36
- FROM requests_summary_view
37
- WHERE bucket BETWEEN %(start)s AND %(end)s
38
- AND run_id = %(testrun)s
39
- GROUP BY 1
40
- ORDER BY 1
41
- ),
42
- user_count_agg AS (
43
- SELECT
44
- time_bucket_gapfill(%(resolution)s * interval '1 second', time) AS time,
45
- COALESCE(avg(user_count), 0) as users
46
- FROM number_of_users
47
- WHERE time BETWEEN %(start)s AND %(end)s
48
- AND run_id = %(testrun)s
49
- GROUP BY 1
50
- ORDER BY 1
51
- ),
52
- errors_per_s_agg AS (
53
- SELECT
54
- time_bucket_gapfill(%(resolution)s * interval '1 second', bucket) AS time,
55
- COALESCE(SUM(failed_count)/%(resolution)s, 0) as error_rate
56
- FROM requests_summary_view
57
- WHERE bucket BETWEEN %(start)s AND %(end)s
58
- AND run_id = %(testrun)s
59
- GROUP BY 1
60
- ORDER BY 1
61
- )
62
- SELECT
63
- r.time,
64
- u.users,
65
- r.rps,
66
- e.error_rate as "errorRate"
67
- FROM request_count_agg r
68
- LEFT JOIN user_count_agg u ON r.time = u.time
69
- LEFT JOIN errors_per_s_agg e on r.time = e.time
70
- ORDER BY r.time;
71
- """
72
-
73
-
74
- total_requests = """
75
- SELECT
76
- SUM(count) as "totalRequests"
77
- FROM requests_summary_view
78
- WHERE bucket BETWEEN %(start)s AND %(end)s
79
- AND run_id = %(testrun)s
80
- """
81
-
82
-
83
- total_failed = """
84
- SELECT
85
- SUM(failed_count) as "totalFailures"
86
- FROM requests_summary_view
87
- WHERE bucket BETWEEN %(start)s AND %(end)s
88
- AND run_id = %(testrun)s
89
- """
90
-
91
-
92
- error_percentage = """
93
- SELECT
94
- SUM(failed_count) / SUM(count) * 100 "errorPercentage"
95
- FROM requests_summary_view
96
- WHERE bucket BETWEEN %(start)s AND %(end)s
97
- AND run_id = %(testrun)s
98
- """
99
-
100
- rps_per_request = """
101
- SELECT
102
- time_bucket_gapfill(%(resolution)s * interval '1 second', bucket) AS time,
103
- name,
104
- COALESCE(SUM(count)/%(resolution)s, 0) as throughput
105
- FROM requests_summary_view
106
- WHERE bucket BETWEEN %(start)s AND %(end)s
107
- AND run_id = %(testrun)s
108
- GROUP BY 1, name
109
- ORDER BY 1,2
110
- """
111
-
112
-
113
- avg_response_times = """
114
- SELECT
115
- time_bucket_gapfill(%(resolution)s * interval '1 second', bucket) as time,
116
- name,
117
- avg(average) as "responseTime"
118
- FROM requests_summary_view
119
- WHERE bucket BETWEEN %(start)s AND %(end)s
120
- AND run_id = %(testrun)s
121
- GROUP BY 1, name
122
- ORDER BY 1, 2
123
- """
124
-
125
- errors_per_request = """
126
- SELECT
127
- time_bucket_gapfill(%(resolution)s * interval '1 second', bucket) AS time,
128
- name,
129
- SUM(failed_count)/%(resolution)s as "errorRate"
130
- FROM requests_summary_view
131
- WHERE bucket BETWEEN %(start)s AND %(end)s
132
- AND run_id = %(testrun)s
133
- GROUP BY 1, name
134
- ORDER BY 1
135
- """
136
-
137
-
138
- perc99_response_times = """
139
- SELECT time_bucket_gapfill(%(resolution)s * interval '1 second', bucket) AS time,
140
- name,
141
- MAX(perc99) as perc99
142
- FROM requests_summary_view
143
- WHERE bucket BETWEEN %(start)s AND %(end)s
144
- AND run_id = %(testrun)s
145
- GROUP BY 1, name
146
- ORDER BY 1
147
- """
148
-
149
-
150
- response_length = """
151
- SELECT
152
- time_bucket_gapfill(%(resolution)s * interval '1 second', bucket) as time,
153
- AVG(response_length) as "responseLength",
154
- name
155
- FROM requests_summary_view
156
- WHERE bucket BETWEEN %(start)s AND %(end)s
157
- AND run_id = %(testrun)s
158
- GROUP BY 1, name
159
- ORDER BY 1
160
- """
161
-
162
-
163
- request_names = """
164
- SELECT DISTINCT name
165
- FROM requests_summary_view
166
- WHERE bucket BETWEEN %(start)s AND %(end)s
167
- AND run_id = %(testrun)s
168
- """
169
-
170
- scatterplot = """
171
- SELECT
172
- time,
173
- name,
174
- response_time as "responseTime"
175
- FROM requests_view
176
- WHERE time BETWEEN %(start)s AND %(end)s
177
- AND run_id = %(testrun)s
178
- ORDER BY 1,2
179
- """
180
-
181
- testruns = """
182
- SELECT
183
- id as "runId",
184
- end_time as "endTime",
185
- locustfile,
186
- profile
187
- FROM testruns
188
- ORDER BY id DESC
189
- """
190
-
191
- testruns_table = """
192
- SELECT
193
- id as "runId",
194
- profile,
195
- num_users as "numUsers",
196
- round(rps_avg, 1) as "rpsAvg",
197
- round(resp_time_avg, 1) as "respTime",
198
- fail_ratio as "failRatio",
199
- requests,
200
- date_trunc('second', end_time - id) AS "runTime",
201
- exit_code as "exitCode",
202
- username,
203
- worker_count as "workerCount",
204
- locustfile
205
- FROM testruns
206
- WHERE %(profile)s::text IS NULL or profile = %(profile)s
207
- OR locustfile = %(profile)s
208
- ORDER BY id DESC
209
- """
210
-
211
- testruns_rps = """
212
- WITH avg_rps AS (
213
- SELECT
214
- id AS time,
215
- rps_avg AS avg_rps
216
- FROM testruns
217
- WHERE %(profile)s::text IS NULL or profile = %(profile)s
218
- OR locustfile = %(profile)s
219
- ORDER BY id
220
- ),
221
- avg_rps_failed AS (
222
- SELECT
223
- id AS time,
224
- CASE
225
- WHEN exit_code > 0 THEN rps_avg
226
- ELSE 0
227
- END AS avg_rps_failed
228
- FROM testruns
229
- WHERE %(profile)s::text IS NULL or profile = %(profile)s
230
- OR locustfile = %(profile)s
231
- ORDER BY id
232
- )
233
- SELECT
234
- a.time,
235
- a.avg_rps as "avgRps",
236
- f.avg_rps_failed as "avgRpsFailed"
237
- FROM avg_rps a
238
- JOIN avg_rps_failed f ON a.time = f.time
239
- ORDER BY a.time
240
- """
241
-
242
- testruns_response_time = """
243
- WITH avg_response_time AS (
244
- SELECT
245
- id AS time,
246
- resp_time_avg AS avg_response_time
247
- FROM testruns
248
- WHERE %(profile)s::text IS NULL or profile = %(profile)s
249
- OR locustfile = %(profile)s
250
- ORDER BY id
251
- ),
252
- avg_response_time_failed AS (
253
- SELECT
254
- id AS time,
255
- CASE
256
- WHEN exit_code > 0 THEN resp_time_avg
257
- ELSE 0
258
- END AS avg_response_time_failed
259
- FROM testruns
260
- WHERE %(profile)s::text IS NULL or profile = %(profile)s
261
- OR locustfile = %(profile)s
262
- ORDER BY id
263
- )
264
- SELECT
265
- a.time,
266
- a.avg_response_time as "avgResponseTime",
267
- f.avg_response_time_failed as "avgResponseTimeFailed"
268
- FROM avg_response_time a
269
- JOIN avg_response_time_failed f ON a.time = f.time
270
- ORDER BY a.time
271
- """
272
-
273
- total_vuh = """
274
- SELECT
275
- COALESCE(SUM((end_time - id) * num_users), '0') AS "totalVuh"
276
- FROM testruns
277
- WHERE id >= date_trunc('month', NOW()) AND NOT refund
278
- """
279
-
280
- customer = """
281
- SELECT
282
- max_vuh as "maxVuh",
283
- max_workers as "maxWorkers",
284
- max_users as "maxUsers",
285
- users_per_worker as "usersPerWorker"
286
- FROM customers
287
- WHERE id = current_user
288
- """
289
-
290
- profiles = """
291
- SELECT DISTINCT
292
- CASE
293
- WHEN profile IS NOT NULL AND profile != '' THEN profile
294
- ELSE locustfile
295
- END AS profile
296
- FROM testruns
297
- WHERE locustfile IS NOT NULL
298
- """
299
-
300
- queries: dict["str", LiteralString] = {
301
- "request-names": request_names,
302
- "requests": requests_query,
303
- "failures": failures_query,
304
- "rps": requests_per_second,
305
- "total-requests": total_requests,
306
- "total-failures": total_failed,
307
- "error-percentage": error_percentage,
308
- "rps-per-request": rps_per_request,
309
- "avg-response-times": avg_response_times,
310
- "errors-per-request": errors_per_request,
311
- "perc99-response-times": perc99_response_times,
312
- "response-length": response_length,
313
- "scatterplot": scatterplot,
314
- "testruns": testruns,
315
- "testruns-table": testruns_table,
316
- "testruns-rps": testruns_rps,
317
- "testruns-response-time": testruns_response_time,
318
- "total-vuh": total_vuh,
319
- "customer": customer,
320
- "profiles": profiles,
321
- }
@@ -1,74 +0,0 @@
1
- import logging
2
- from datetime import UTC
3
-
4
- from flask import Blueprint, make_response, request
5
- from flask_login import login_required
6
- from locust_cloud.timescale.queries import queries
7
-
8
- logger = logging.getLogger(__name__)
9
-
10
-
11
- def adapt_timestamp(result):
12
- return {key: str(value) if value else None for key, value in result.items()}
13
-
14
-
15
- def register_query(environment, pool):
16
- cloud_stats_blueprint = Blueprint(
17
- "locust_cloud_stats", __name__, url_prefix=environment.parsed_options.web_base_path
18
- )
19
-
20
- @cloud_stats_blueprint.route("/cloud-stats/<query>", methods=["POST"])
21
- @login_required
22
- def query(query):
23
- results = []
24
- try:
25
- if query and queries[query]:
26
- sql = queries[query]
27
- # start_time = time.perf_counter()
28
- with pool.connection() as conn:
29
- # get_conn_time = (time.perf_counter() - start_time) * 1000
30
- sql_params = request.get_json() if request.content_type == "application/json" else {}
31
- # start_time = time.perf_counter()
32
- from datetime import datetime, timedelta
33
-
34
- if "start" in sql_params:
35
- # protect the database against huge queries
36
- start_time = datetime.fromisoformat(sql_params["start"])
37
- end_time = datetime.fromisoformat(sql_params["end"])
38
- if end_time >= start_time + timedelta(hours=48):
39
- logger.warning(
40
- f"UI asked for too long time interval. Start was {sql_params['start']}, end was {sql_params['end']}"
41
- )
42
- return []
43
- if start_time >= datetime(2024, 10, 30, 11, tzinfo=UTC):
44
- # when runs before this go out of scope, we can just update the query
45
- sql = sql.replace("FROM requests_summary_view", "FROM requests_summary_view_v1")
46
-
47
- cursor = conn.execute(sql, sql_params)
48
- # exec_time = (time.perf_counter() - start_time) * 1000
49
- assert cursor
50
- # start_time = time.perf_counter()
51
- results = [
52
- adapt_timestamp(
53
- dict(
54
- zip(
55
- [column[0] for column in cursor.description],
56
- row,
57
- )
58
- )
59
- )
60
- for row in cursor.fetchall()
61
- ]
62
- # fetch_time = (time.perf_counter() - start_time) * 1000
63
- # logger.info(
64
- # f"Executed query '{query}' with params {sql_params}. It took {round(get_conn_time)}+{round(exec_time)}+{round(fetch_time)}ms"
65
- # )
66
- return results
67
- else:
68
- logger.warning(f"Received invalid query key: '{query}'")
69
- return make_response({"error": "Invalid query key"}, 401)
70
- except Exception as e:
71
- logger.info(f"Error executing UI query '{query}': {e}", exc_info=True)
72
- return make_response({"error": "Error executing query"}, 401)
73
-
74
- environment.web_ui.app.register_blueprint(cloud_stats_blueprint)
@@ -1,4 +0,0 @@
1
- __pycache__
2
- node_modules
3
- tsconfig.tsbuildinfo
4
- dist/**
@@ -1,9 +0,0 @@
1
- {
2
- "arrowParens": "avoid",
3
- "semi": true,
4
- "singleQuote": true,
5
- "jsxSingleQuote": true,
6
- "trailingComma": "all",
7
- "bracketSpacing": true,
8
- "printWidth": 100
9
- }