locust-cloud 0.1.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- locust_cloud/__init__.py +50 -0
- locust_cloud/timescale/exporter.py +336 -0
- locust_cloud/webui/.eslintrc +41 -0
- locust_cloud/webui/.gitignore +4 -0
- locust_cloud/webui/.prettierrc +9 -0
- locust_cloud/webui/dist/assets/index-BeJh9ha2.js +323 -0
- locust_cloud/webui/dist/index.html +19 -0
- locust_cloud/webui/index.html +19 -0
- locust_cloud/webui/package.json +42 -0
- locust_cloud/webui/tsconfig.json +26 -0
- locust_cloud/webui/tsconfig.tsbuildinfo +1 -0
- locust_cloud/webui/vite.config.ts +9 -0
- locust_cloud/webui/yarn.lock +4587 -0
- locust_cloud-0.1.4.dist-info/METADATA +52 -0
- locust_cloud-0.1.4.dist-info/RECORD +16 -0
- locust_cloud-0.1.4.dist-info/WHEEL +4 -0
locust_cloud/__init__.py
ADDED
@@ -0,0 +1,50 @@
|
|
1
|
+
from locust_cloud.timescale.exporter import Timescale
|
2
|
+
|
3
|
+
import os
|
4
|
+
|
5
|
+
from locust import events
|
6
|
+
from locust.argument_parser import LocustArgumentParser
|
7
|
+
|
8
|
+
if "LOCUST_BUILD_PATH" not in os.environ:
|
9
|
+
os.environ["LOCUST_BUILD_PATH"] = os.path.join(os.path.dirname(__file__), "webui/dist")
|
10
|
+
PG_USER = os.environ.get("PG_USER")
|
11
|
+
PG_HOST = os.environ.get("PG_HOST")
|
12
|
+
PG_PASSWORD = os.environ.get("PG_PASSWORD")
|
13
|
+
PG_DATABASE = os.environ.get("PG_DATABASE")
|
14
|
+
PG_PORT = os.environ.get("PG_PORT", 5432)
|
15
|
+
|
16
|
+
|
17
|
+
@events.init_command_line_parser.add_listener
|
18
|
+
def add_arguments(parser: LocustArgumentParser):
|
19
|
+
locust_cloud = parser.add_argument_group(
|
20
|
+
"locust-cloud",
|
21
|
+
"Arguments for use with Locust cloud!",
|
22
|
+
)
|
23
|
+
|
24
|
+
locust_cloud.add_argument(
|
25
|
+
"--exporter",
|
26
|
+
default=True,
|
27
|
+
action="store_true",
|
28
|
+
env_var="LOCUST_EXPORTER",
|
29
|
+
help="Exports Locust stats to Timescale",
|
30
|
+
)
|
31
|
+
locust_cloud.add_argument(
|
32
|
+
"--description",
|
33
|
+
type=str,
|
34
|
+
env_var="LOCUST_DESCRIPTION",
|
35
|
+
default="",
|
36
|
+
help="Description of the test being run",
|
37
|
+
)
|
38
|
+
|
39
|
+
|
40
|
+
@events.init.add_listener
|
41
|
+
def on_locust_init(environment, **args):
|
42
|
+
if environment.parsed_options.exporter:
|
43
|
+
Timescale(
|
44
|
+
environment,
|
45
|
+
pg_user=PG_USER,
|
46
|
+
pg_host=PG_HOST,
|
47
|
+
pg_password=PG_PASSWORD,
|
48
|
+
pg_database=PG_DATABASE,
|
49
|
+
pg_port=PG_PORT,
|
50
|
+
)
|
@@ -0,0 +1,336 @@
|
|
1
|
+
import json
|
2
|
+
import logging
|
3
|
+
import os
|
4
|
+
import socket
|
5
|
+
import sys
|
6
|
+
from contextlib import contextmanager
|
7
|
+
|
8
|
+
import gevent
|
9
|
+
import locust.env
|
10
|
+
from gevent.lock import Semaphore
|
11
|
+
from locust.exception import CatchResponseError
|
12
|
+
|
13
|
+
try:
|
14
|
+
import psycogreen.gevent
|
15
|
+
except ModuleNotFoundError as e:
|
16
|
+
logging.error(f"'{e}', you need to install it using 'pip install psycogreen'")
|
17
|
+
sys.exit(1)
|
18
|
+
|
19
|
+
psycogreen.gevent.patch_psycopg()
|
20
|
+
import atexit
|
21
|
+
from datetime import UTC, datetime, timedelta
|
22
|
+
|
23
|
+
import greenlet
|
24
|
+
import psycopg2
|
25
|
+
import psycopg2.extras
|
26
|
+
|
27
|
+
|
28
|
+
def safe_serialize(obj):
|
29
|
+
def default(o):
|
30
|
+
return f"<<non-serializable: {type(o).__qualname__}>>"
|
31
|
+
|
32
|
+
return json.dumps(obj, default=default)
|
33
|
+
|
34
|
+
|
35
|
+
def print_t(s):
|
36
|
+
print(str(s), end="\t")
|
37
|
+
|
38
|
+
|
39
|
+
class Timescale:
|
40
|
+
"""
|
41
|
+
See timescale_listener_ex.py for documentation
|
42
|
+
"""
|
43
|
+
|
44
|
+
dblock = Semaphore()
|
45
|
+
first_instance = True
|
46
|
+
|
47
|
+
def __init__(self, environment: locust.env.Environment, pg_user, pg_host, pg_password, pg_database, pg_port):
|
48
|
+
if not Timescale.first_instance:
|
49
|
+
# we should refactor this into a module as it is much more pythonic
|
50
|
+
raise Exception(
|
51
|
+
"You tried to initialize the Timescale listener twice, maybe both in your locustfile and using command line --timescale? Ignoring second initialization."
|
52
|
+
)
|
53
|
+
Timescale.first_instance = False
|
54
|
+
self.env = environment
|
55
|
+
self._run_id = ""
|
56
|
+
self.dbconn = None
|
57
|
+
self._samples: list[dict] = []
|
58
|
+
self._background = gevent.spawn(self._run)
|
59
|
+
self._hostname = socket.gethostname()
|
60
|
+
self._finished = False
|
61
|
+
self._pid = os.getpid()
|
62
|
+
|
63
|
+
self.pg_user = pg_user
|
64
|
+
self.pg_host = pg_host
|
65
|
+
self.pg_password = pg_password
|
66
|
+
self.pg_database = pg_database
|
67
|
+
self.pg_port = pg_port
|
68
|
+
|
69
|
+
events = self.env.events
|
70
|
+
events.test_start.add_listener(self.on_test_start)
|
71
|
+
events.test_stop.add_listener(self.on_test_stop)
|
72
|
+
events.request.add_listener(self.on_request)
|
73
|
+
events.cpu_warning.add_listener(self.on_cpu_warning)
|
74
|
+
events.quit.add_listener(self.on_quit)
|
75
|
+
events.spawning_complete.add_listener(self.spawning_complete)
|
76
|
+
atexit.register(self.log_stop_test_run)
|
77
|
+
|
78
|
+
if self.env.runner is not None:
|
79
|
+
self.env.runner.register_message("run_id", self.set_run_id)
|
80
|
+
|
81
|
+
def set_run_id(self, environment, msg, **kwargs):
|
82
|
+
logging.debug(f"run id from master: {msg.data}")
|
83
|
+
self._run_id = datetime.strptime(msg.data, "%Y-%m-%d, %H:%M:%S.%f").replace(tzinfo=UTC)
|
84
|
+
|
85
|
+
@contextmanager
|
86
|
+
def dbcursor(self):
|
87
|
+
with self.dblock:
|
88
|
+
try:
|
89
|
+
if self.dbconn:
|
90
|
+
if self.dbconn.closed:
|
91
|
+
self.dbconn = self._dbconn()
|
92
|
+
yield self.dbconn.cursor()
|
93
|
+
except psycopg2.Error:
|
94
|
+
try:
|
95
|
+
# try to recreate connection
|
96
|
+
self.dbconn = self._dbconn()
|
97
|
+
except Exception:
|
98
|
+
pass
|
99
|
+
raise
|
100
|
+
|
101
|
+
def on_cpu_warning(self, environment: locust.env.Environment, cpu_usage, message=None, timestamp=None, **kwargs):
|
102
|
+
# passing a custom message & timestamp to the event is a haxx to allow using this event for reporting generic events
|
103
|
+
if not timestamp:
|
104
|
+
timestamp = datetime.now(UTC).isoformat()
|
105
|
+
if not message:
|
106
|
+
message = f"High CPU usage ({cpu_usage}%)"
|
107
|
+
with self.dbcursor() as cur:
|
108
|
+
cur.execute(
|
109
|
+
"INSERT INTO events (time, text, run_id) VALUES (%s, %s, %s)", (timestamp, message, self._run_id)
|
110
|
+
)
|
111
|
+
|
112
|
+
def on_test_start(self, environment: locust.env.Environment):
|
113
|
+
try:
|
114
|
+
self.dbconn = self._dbconn()
|
115
|
+
except psycopg2.OperationalError as e:
|
116
|
+
logging.error(e)
|
117
|
+
sys.exit(1)
|
118
|
+
|
119
|
+
if not self.env.parsed_options or not self.env.parsed_options.worker:
|
120
|
+
environment._run_id = datetime.now(UTC) # type: ignore
|
121
|
+
msg = environment._run_id.strftime("%Y-%m-%d, %H:%M:%S.%f") # type: ignore
|
122
|
+
if environment.runner is not None:
|
123
|
+
logging.debug(f"about to send run_id to workers: {msg}")
|
124
|
+
environment.runner.send_message("run_id", msg)
|
125
|
+
self.log_start_testrun()
|
126
|
+
self._user_count_logger = gevent.spawn(self._log_user_count)
|
127
|
+
|
128
|
+
def _dbconn(self) -> psycopg2.extensions.connection:
|
129
|
+
try:
|
130
|
+
conn = psycopg2.connect(
|
131
|
+
host=self.pg_host,
|
132
|
+
user=self.pg_user,
|
133
|
+
password=self.pg_password,
|
134
|
+
database=self.pg_database,
|
135
|
+
port=self.pg_port,
|
136
|
+
keepalives_idle=120,
|
137
|
+
keepalives_interval=20,
|
138
|
+
keepalives_count=6,
|
139
|
+
)
|
140
|
+
|
141
|
+
conn.autocommit = True
|
142
|
+
except Exception:
|
143
|
+
sys.stderr.write(f"Could not connect to postgres ({self.pg_user}@{self.pg_host}:{self.pg_port}).")
|
144
|
+
sys.exit(1)
|
145
|
+
|
146
|
+
return conn
|
147
|
+
|
148
|
+
def _log_user_count(self):
|
149
|
+
while True:
|
150
|
+
if self.env.runner is None:
|
151
|
+
return # there is no runner, so nothing to log...
|
152
|
+
try:
|
153
|
+
with self.dbcursor() as cur:
|
154
|
+
cur.execute(
|
155
|
+
"""INSERT INTO number_of_users(time, run_id, user_count) VALUES (%s, %s, %s)""",
|
156
|
+
(datetime.now(UTC).isoformat(), self._run_id, self.env.runner.user_count),
|
157
|
+
)
|
158
|
+
except psycopg2.Error as error:
|
159
|
+
logging.error("Failed to write user count to Postgresql: " + repr(error))
|
160
|
+
try:
|
161
|
+
# try to recreate connection
|
162
|
+
self.user_conn = self._dbconn()
|
163
|
+
except Exception:
|
164
|
+
pass
|
165
|
+
gevent.sleep(2.0)
|
166
|
+
|
167
|
+
def _run(self):
|
168
|
+
while True:
|
169
|
+
if self._samples:
|
170
|
+
# Buffer samples, so that a locust greenlet will write to the new list
|
171
|
+
# instead of the one that has been sent into postgres client
|
172
|
+
samples_buffer = self._samples
|
173
|
+
self._samples = []
|
174
|
+
self.write_samples_to_db(samples_buffer)
|
175
|
+
else:
|
176
|
+
if self._finished:
|
177
|
+
break
|
178
|
+
gevent.sleep(0.5)
|
179
|
+
|
180
|
+
def write_samples_to_db(self, samples):
|
181
|
+
try:
|
182
|
+
with self.dbcursor() as cur:
|
183
|
+
psycopg2.extras.execute_values(
|
184
|
+
cur,
|
185
|
+
"""INSERT INTO requests(time,run_id,greenlet_id,loadgen,name,request_type,response_time,success,response_length,exception,pid,url,context) VALUES %s""",
|
186
|
+
samples,
|
187
|
+
template="(%(time)s, %(run_id)s, %(greenlet_id)s, %(loadgen)s, %(name)s, %(request_type)s, %(response_time)s, %(success)s, %(response_length)s, %(exception)s, %(pid)s, %(url)s, %(context)s)",
|
188
|
+
)
|
189
|
+
|
190
|
+
except psycopg2.Error as error:
|
191
|
+
logging.error("Failed to write samples to Postgresql timescale database: " + repr(error))
|
192
|
+
sys.exit(1)
|
193
|
+
|
194
|
+
def on_test_stop(self, environment):
|
195
|
+
if getattr(self, "_user_count_logger", False):
|
196
|
+
self._user_count_logger.kill()
|
197
|
+
self.log_stop_test_run()
|
198
|
+
|
199
|
+
def on_quit(self, exit_code, **kwargs):
|
200
|
+
self._finished = True
|
201
|
+
atexit._clear() # make sure we dont capture additional ctrl-c:s
|
202
|
+
self._background.join(timeout=10)
|
203
|
+
if getattr(self, "_user_count_logger", False):
|
204
|
+
self._user_count_logger.kill()
|
205
|
+
self.log_stop_test_run(exit_code)
|
206
|
+
|
207
|
+
def on_request(
|
208
|
+
self,
|
209
|
+
request_type,
|
210
|
+
name,
|
211
|
+
response_time,
|
212
|
+
response_length,
|
213
|
+
exception,
|
214
|
+
context,
|
215
|
+
start_time=None,
|
216
|
+
url=None,
|
217
|
+
**kwargs,
|
218
|
+
):
|
219
|
+
success = 0 if exception else 1
|
220
|
+
if start_time:
|
221
|
+
time = datetime.fromtimestamp(start_time, tz=UTC)
|
222
|
+
else:
|
223
|
+
# some users may not send start_time, so we just make an educated guess
|
224
|
+
# (which will be horribly wrong if users spend a lot of time in a with/catch_response-block)
|
225
|
+
time = datetime.now(UTC) - timedelta(milliseconds=response_time or 0)
|
226
|
+
greenlet_id = getattr(greenlet.getcurrent(), "minimal_ident", 0) # if we're debugging there is no greenlet
|
227
|
+
sample = {
|
228
|
+
"time": time,
|
229
|
+
"run_id": self._run_id,
|
230
|
+
"greenlet_id": greenlet_id,
|
231
|
+
"loadgen": self._hostname,
|
232
|
+
"name": name,
|
233
|
+
"request_type": request_type,
|
234
|
+
"response_time": response_time,
|
235
|
+
"success": success,
|
236
|
+
"url": url[0:255] if url else None,
|
237
|
+
"pid": self._pid,
|
238
|
+
"context": psycopg2.extras.Json(context, safe_serialize),
|
239
|
+
}
|
240
|
+
|
241
|
+
if response_length >= 0:
|
242
|
+
sample["response_length"] = response_length
|
243
|
+
else:
|
244
|
+
sample["response_length"] = None
|
245
|
+
|
246
|
+
if exception:
|
247
|
+
if isinstance(exception, CatchResponseError):
|
248
|
+
sample["exception"] = str(exception)
|
249
|
+
else:
|
250
|
+
try:
|
251
|
+
sample["exception"] = repr(exception)
|
252
|
+
except AttributeError:
|
253
|
+
sample["exception"] = f"{exception.__class__} (and it has no string representation)"
|
254
|
+
else:
|
255
|
+
sample["exception"] = None
|
256
|
+
|
257
|
+
self._samples.append(sample)
|
258
|
+
|
259
|
+
def log_start_testrun(self):
|
260
|
+
cmd = sys.argv[1:]
|
261
|
+
with self.dbcursor() as cur:
|
262
|
+
cur.execute(
|
263
|
+
"INSERT INTO testruns (id, num_users, description, arguments) VALUES (%s,%s,%s,%s)",
|
264
|
+
(
|
265
|
+
self._run_id,
|
266
|
+
self.env.parsed_options.num_users or self.env.runner.user_count or 0,
|
267
|
+
"self.env.parsed_options.description",
|
268
|
+
" ".join(cmd),
|
269
|
+
),
|
270
|
+
)
|
271
|
+
cur.execute(
|
272
|
+
"INSERT INTO events (time, text, run_id) VALUES (%s, %s, %s)",
|
273
|
+
(datetime.now(UTC).isoformat(), "Test run started", self._run_id),
|
274
|
+
)
|
275
|
+
|
276
|
+
def spawning_complete(self, user_count):
|
277
|
+
if not self.env.parsed_options.worker: # only log for master/standalone
|
278
|
+
end_time = datetime.now(UTC)
|
279
|
+
try:
|
280
|
+
with self.dbcursor() as cur:
|
281
|
+
cur.execute(
|
282
|
+
"INSERT INTO events (time, text, run_id) VALUES (%s, %s, %s)",
|
283
|
+
(end_time, f"Rampup complete, {user_count} users spawned", self._run_id),
|
284
|
+
)
|
285
|
+
except psycopg2.Error as error:
|
286
|
+
logging.error(
|
287
|
+
"Failed to insert rampup complete event time to Postgresql timescale database: " + repr(error)
|
288
|
+
)
|
289
|
+
|
290
|
+
def log_stop_test_run(self, exit_code=None):
|
291
|
+
logging.debug(f"Test run id {self._run_id} stopping")
|
292
|
+
if self.env.parsed_options.worker:
|
293
|
+
return # only run on master or standalone
|
294
|
+
if getattr(self, "dbconn", None) is None:
|
295
|
+
return # test_start never ran, so there's not much for us to do
|
296
|
+
end_time = datetime.now(UTC)
|
297
|
+
try:
|
298
|
+
with self.dbcursor() as cur:
|
299
|
+
cur.execute(
|
300
|
+
"UPDATE testruns SET end_time = %s, exit_code = %s where id = %s",
|
301
|
+
(end_time, exit_code, self._run_id),
|
302
|
+
)
|
303
|
+
cur.execute(
|
304
|
+
"INSERT INTO events (time, text, run_id) VALUES (%s, %s, %s)",
|
305
|
+
(end_time, f"Finished with exit code: {exit_code}", self._run_id),
|
306
|
+
)
|
307
|
+
# The AND time > run_id clause in the following statements are there to help Timescale performance
|
308
|
+
# We dont use start_time / end_time to calculate RPS, instead we use the time between the actual first and last request
|
309
|
+
# (as this is a more accurate measurement of the actual test)
|
310
|
+
try:
|
311
|
+
cur.execute(
|
312
|
+
"""
|
313
|
+
UPDATE testruns
|
314
|
+
SET (requests, resp_time_avg, rps_avg, fail_ratio) =
|
315
|
+
(SELECT reqs, resp_time, reqs / GREATEST(duration, 1), fails / reqs) FROM
|
316
|
+
(SELECT
|
317
|
+
COUNT(*)::numeric AS reqs,
|
318
|
+
AVG(response_time)::numeric as resp_time
|
319
|
+
FROM requests WHERE run_id = %s AND time > %s) AS _,
|
320
|
+
(SELECT
|
321
|
+
EXTRACT(epoch FROM (SELECT MAX(time)-MIN(time) FROM requests WHERE run_id = %s AND time > %s))::numeric AS duration) AS __,
|
322
|
+
(SELECT
|
323
|
+
COUNT(*)::numeric AS fails
|
324
|
+
FROM requests WHERE run_id = %s AND time > %s AND success = 0) AS ___
|
325
|
+
WHERE id = %s""",
|
326
|
+
[self._run_id] * 7,
|
327
|
+
)
|
328
|
+
except psycopg2.errors.DivisionByZero:
|
329
|
+
logging.info(
|
330
|
+
"Got DivisionByZero error when trying to update testruns, most likely because there were no requests logged"
|
331
|
+
)
|
332
|
+
except psycopg2.Error as error:
|
333
|
+
logging.error(
|
334
|
+
"Failed to update testruns record (or events) with end time to Postgresql timescale database: "
|
335
|
+
+ repr(error)
|
336
|
+
)
|
@@ -0,0 +1,41 @@
|
|
1
|
+
{
|
2
|
+
"parser": "@typescript-eslint/parser",
|
3
|
+
"extends": ["plugin:@typescript-eslint/recommended"],
|
4
|
+
"plugins": ["react", "react-hooks", "@typescript-eslint", "prettier", "unused-imports", "import"],
|
5
|
+
"rules": {
|
6
|
+
"react/display-name": "off",
|
7
|
+
"@typescript-eslint/no-explicit-any": "off",
|
8
|
+
"no-console": "error",
|
9
|
+
"react/jsx-sort-props": 2,
|
10
|
+
"react/sort-prop-types": 2,
|
11
|
+
"import/order": [
|
12
|
+
"error",
|
13
|
+
{
|
14
|
+
"groups": ["external", "internal"],
|
15
|
+
"newlines-between": "always",
|
16
|
+
"alphabetize": { "order": "asc", "caseInsensitive": true },
|
17
|
+
"pathGroups": [
|
18
|
+
{
|
19
|
+
"pattern": "react",
|
20
|
+
"group": "external",
|
21
|
+
"position": "before",
|
22
|
+
},
|
23
|
+
{
|
24
|
+
"pattern": "App",
|
25
|
+
"group": "internal",
|
26
|
+
},
|
27
|
+
{
|
28
|
+
"pattern": "Report",
|
29
|
+
"group": "internal",
|
30
|
+
},
|
31
|
+
{
|
32
|
+
"pattern": "{components,hooks,redux,utils}/**",
|
33
|
+
"group": "internal",
|
34
|
+
},
|
35
|
+
],
|
36
|
+
"distinctGroup": false,
|
37
|
+
"pathGroupsExcludedImportTypes": ["internal"],
|
38
|
+
},
|
39
|
+
],
|
40
|
+
},
|
41
|
+
}
|