locust-cloud 1.12.3__py3-none-any.whl → 1.13.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- locust_cloud/cloud.py +268 -133
- {locust_cloud-1.12.3.dist-info → locust_cloud-1.13.0.dist-info}/METADATA +3 -4
- locust_cloud-1.13.0.dist-info/RECORD +5 -0
- locust_cloud/__init__.py +0 -133
- locust_cloud/auth.py +0 -443
- locust_cloud/credential_manager.py +0 -141
- locust_cloud/idle_exit.py +0 -38
- locust_cloud/socket_logging.py +0 -127
- locust_cloud/timescale/exporter.py +0 -313
- locust_cloud/timescale/queries.py +0 -321
- locust_cloud/timescale/query.py +0 -74
- locust_cloud/webui/.gitignore +0 -4
- locust_cloud/webui/.prettierrc +0 -9
- locust_cloud/webui/dist/assets/index-D3YieuNV.js +0 -329
- locust_cloud/webui/dist/index.html +0 -20
- locust_cloud/webui/eslint.config.mjs +0 -83
- locust_cloud/webui/index.html +0 -20
- locust_cloud/webui/package.json +0 -52
- locust_cloud/webui/tsconfig.json +0 -27
- locust_cloud/webui/tsconfig.tsbuildinfo +0 -1
- locust_cloud/webui/vite.config.ts +0 -9
- locust_cloud/webui/vitest.config.ts +0 -16
- locust_cloud/webui/yarn.lock +0 -5816
- locust_cloud-1.12.3.dist-info/RECORD +0 -25
- {locust_cloud-1.12.3.dist-info → locust_cloud-1.13.0.dist-info}/WHEEL +0 -0
- {locust_cloud-1.12.3.dist-info → locust_cloud-1.13.0.dist-info}/entry_points.txt +0 -0
@@ -1,141 +0,0 @@
|
|
1
|
-
import logging
|
2
|
-
import time
|
3
|
-
from datetime import UTC, datetime
|
4
|
-
from typing import Any
|
5
|
-
|
6
|
-
import boto3
|
7
|
-
import jwt
|
8
|
-
import requests
|
9
|
-
from botocore.credentials import RefreshableCredentials
|
10
|
-
from botocore.session import Session as BotocoreSession
|
11
|
-
from locust_cloud import __version__
|
12
|
-
|
13
|
-
logger = logging.getLogger(__name__)
|
14
|
-
|
15
|
-
|
16
|
-
class CredentialError(Exception):
|
17
|
-
"""Custom exception for credential-related errors."""
|
18
|
-
|
19
|
-
pass
|
20
|
-
|
21
|
-
|
22
|
-
class CredentialManager:
|
23
|
-
def __init__(
|
24
|
-
self,
|
25
|
-
lambda_url: str,
|
26
|
-
username: str | None = None,
|
27
|
-
password: str | None = None,
|
28
|
-
user_sub_id: str | None = None,
|
29
|
-
refresh_token: str | None = None,
|
30
|
-
access_key: str | None = None,
|
31
|
-
secret_key: str | None = None,
|
32
|
-
) -> None:
|
33
|
-
self.lambda_url = lambda_url
|
34
|
-
self.username = username
|
35
|
-
self.password = password
|
36
|
-
self.user_sub_id = user_sub_id
|
37
|
-
self.refresh_token = refresh_token
|
38
|
-
|
39
|
-
self.credentials = {
|
40
|
-
"access_key": access_key,
|
41
|
-
"secret_key": secret_key,
|
42
|
-
}
|
43
|
-
self.cognito_client_id_token: str = ""
|
44
|
-
self.expiry_time: float = 0
|
45
|
-
|
46
|
-
self.obtain_credentials()
|
47
|
-
|
48
|
-
self.refreshable_credentials = RefreshableCredentials.create_from_metadata(
|
49
|
-
metadata=self.get_current_credentials(),
|
50
|
-
refresh_using=self.refresh_credentials,
|
51
|
-
method="custom-refresh",
|
52
|
-
)
|
53
|
-
|
54
|
-
botocore_session = BotocoreSession()
|
55
|
-
botocore_session._credentials = self.refreshable_credentials # type: ignore
|
56
|
-
botocore_session.set_config_variable("signature_version", "v4")
|
57
|
-
|
58
|
-
self.session = boto3.Session(botocore_session=botocore_session)
|
59
|
-
logger.debug("Boto3 session created with RefreshableCredentials.")
|
60
|
-
|
61
|
-
def obtain_credentials(self) -> None:
|
62
|
-
payload = {}
|
63
|
-
if self.username and self.password:
|
64
|
-
payload = {"username": self.username, "password": self.password}
|
65
|
-
elif self.user_sub_id and self.refresh_token:
|
66
|
-
payload = {"user_sub_id": self.user_sub_id, "refresh_token": self.refresh_token}
|
67
|
-
else:
|
68
|
-
raise CredentialError("Insufficient credentials to obtain AWS session.")
|
69
|
-
|
70
|
-
try:
|
71
|
-
response = requests.post(
|
72
|
-
f"{self.lambda_url}/auth/login",
|
73
|
-
json=payload,
|
74
|
-
headers={"X-Client-Version": __version__},
|
75
|
-
)
|
76
|
-
response.raise_for_status()
|
77
|
-
data = response.json()
|
78
|
-
|
79
|
-
token_key = next(
|
80
|
-
(key for key in ["cognito_client_id_token", "id_token", "access_token"] if key in data), None
|
81
|
-
)
|
82
|
-
|
83
|
-
if not token_key:
|
84
|
-
raise CredentialError("No valid token found in authentication response.")
|
85
|
-
|
86
|
-
self.credentials = {
|
87
|
-
"access_key": data.get("aws_access_key_id"),
|
88
|
-
"secret_key": data.get("aws_secret_access_key"),
|
89
|
-
"token": data.get("aws_session_token"),
|
90
|
-
}
|
91
|
-
|
92
|
-
token = data.get(token_key)
|
93
|
-
if not token:
|
94
|
-
raise CredentialError(f"Token '{token_key}' is missing in the authentication response.")
|
95
|
-
|
96
|
-
decoded = jwt.decode(token, options={"verify_signature": False})
|
97
|
-
self.expiry_time = decoded.get("exp", time.time() + 3600) - 60 # Refresh 1 minute before expiry
|
98
|
-
|
99
|
-
self.cognito_client_id_token = token
|
100
|
-
|
101
|
-
except requests.exceptions.HTTPError as http_err:
|
102
|
-
response = http_err.response
|
103
|
-
if response is None:
|
104
|
-
raise CredentialError("Response was None?!") from http_err
|
105
|
-
|
106
|
-
if response.status_code == 401:
|
107
|
-
raise CredentialError("Incorrect username or password.") from http_err
|
108
|
-
else:
|
109
|
-
if js := response.json():
|
110
|
-
if message := js.get("Message"):
|
111
|
-
raise CredentialError(message)
|
112
|
-
error_info = f"HTTP {response.status_code} {response.reason}"
|
113
|
-
raise CredentialError(f"HTTP error occurred while obtaining credentials: {error_info}") from http_err
|
114
|
-
except requests.exceptions.RequestException as req_err:
|
115
|
-
raise CredentialError(f"Request exception occurred while obtaining credentials: {req_err}") from req_err
|
116
|
-
except jwt.DecodeError as decode_err:
|
117
|
-
raise CredentialError(f"Failed to decode JWT token: {decode_err}") from decode_err
|
118
|
-
except KeyError as key_err:
|
119
|
-
raise CredentialError(f"Missing expected key in authentication response: {key_err}") from key_err
|
120
|
-
|
121
|
-
def refresh_credentials(self) -> dict[str, Any]:
|
122
|
-
logger.debug("Refreshing credentials using refresh_credentials method.")
|
123
|
-
self.obtain_credentials()
|
124
|
-
return {
|
125
|
-
"access_key": self.credentials.get("access_key"),
|
126
|
-
"secret_key": self.credentials.get("secret_key"),
|
127
|
-
"token": self.credentials.get("token"),
|
128
|
-
"expiry_time": datetime.fromtimestamp(self.expiry_time, tz=UTC).isoformat(),
|
129
|
-
}
|
130
|
-
|
131
|
-
def get_current_credentials(self) -> dict[str, Any]:
|
132
|
-
if not self.cognito_client_id_token:
|
133
|
-
raise CredentialError("cognito_client_id_token not set in CredentialManager.")
|
134
|
-
|
135
|
-
return {
|
136
|
-
"access_key": self.credentials.get("access_key"),
|
137
|
-
"secret_key": self.credentials.get("secret_key"),
|
138
|
-
"token": self.credentials.get("token"),
|
139
|
-
"expiry_time": datetime.fromtimestamp(self.expiry_time, tz=UTC).isoformat(),
|
140
|
-
"cognito_client_id_token": self.cognito_client_id_token,
|
141
|
-
}
|
locust_cloud/idle_exit.py
DELETED
@@ -1,38 +0,0 @@
|
|
1
|
-
import logging
|
2
|
-
import sys
|
3
|
-
|
4
|
-
import gevent
|
5
|
-
import locust.env
|
6
|
-
from locust import events
|
7
|
-
|
8
|
-
logger = logging.getLogger(__name__)
|
9
|
-
|
10
|
-
|
11
|
-
class IdleExit:
|
12
|
-
def __init__(self, environment: locust.env.Environment):
|
13
|
-
self.environment = environment
|
14
|
-
self._destroy_task: gevent.Greenlet | None = None
|
15
|
-
events.test_start.add_listener(self.on_locust_state_change)
|
16
|
-
events.test_stop.add_listener(self.on_test_stop)
|
17
|
-
events.quit.add_listener(self.on_locust_state_change)
|
18
|
-
|
19
|
-
if not self.environment.parsed_options.autostart:
|
20
|
-
self._destroy_task = gevent.spawn(self._destroy)
|
21
|
-
|
22
|
-
def _destroy(self):
|
23
|
-
gevent.sleep(1800)
|
24
|
-
logger.info("Locust was detected as idle (no test running) for more than 30 minutes")
|
25
|
-
self.environment.runner.quit()
|
26
|
-
|
27
|
-
if self.environment.web_ui:
|
28
|
-
self.environment.web_ui.greenlet.kill(timeout=5)
|
29
|
-
|
30
|
-
if self.environment.web_ui.greenlet.started:
|
31
|
-
sys.exit(1)
|
32
|
-
|
33
|
-
def on_test_stop(self, **kwargs):
|
34
|
-
self._destroy_task = gevent.spawn(self._destroy)
|
35
|
-
|
36
|
-
def on_locust_state_change(self, **kwargs):
|
37
|
-
if self._destroy_task:
|
38
|
-
self._destroy_task.kill()
|
locust_cloud/socket_logging.py
DELETED
@@ -1,127 +0,0 @@
|
|
1
|
-
import atexit
|
2
|
-
import logging
|
3
|
-
import os
|
4
|
-
import sys
|
5
|
-
from collections import deque
|
6
|
-
|
7
|
-
import flask
|
8
|
-
import gevent
|
9
|
-
import gevent.pywsgi
|
10
|
-
import geventwebsocket.handler
|
11
|
-
import socketio
|
12
|
-
import socketio.exceptions
|
13
|
-
|
14
|
-
|
15
|
-
def setup_socket_logging():
|
16
|
-
"""
|
17
|
-
Set up a separate server listening for incomming websocket connections.
|
18
|
-
Because it listens on a separate port from the webui and the ALB only
|
19
|
-
accepts connections on port 443, this will be exposed on a different
|
20
|
-
path (/<customer-id>/socket-logs) with a separate target group.
|
21
|
-
The targetgroup will want to do health checks before exposing the server
|
22
|
-
so a small flask app exposing the health check endpoint is added to the
|
23
|
-
server in addition to the websocket stuff.
|
24
|
-
"""
|
25
|
-
|
26
|
-
quiet_logger = logging.getLogger("be_quiet")
|
27
|
-
quiet_logger.propagate = False
|
28
|
-
quiet_logger.addHandler(logging.NullHandler())
|
29
|
-
|
30
|
-
# This app will use a logger with the same name as the application
|
31
|
-
# which means it will pick up the one set up above.
|
32
|
-
healthcheck_app = flask.Flask("be_quiet")
|
33
|
-
|
34
|
-
# /login is the health check endpoint currently configured for the
|
35
|
-
# ALB controller in kubernetes.
|
36
|
-
# See cloud-onboarder/kubernetes/alb-load-balancer-ingress.yaml
|
37
|
-
@healthcheck_app.route("/login")
|
38
|
-
def healthcheck():
|
39
|
-
return ""
|
40
|
-
|
41
|
-
logger = logging.getLogger(__name__)
|
42
|
-
socketio_path = f"/{os.environ['CUSTOMER_ID']}/socket-logs"
|
43
|
-
sio = socketio.Server(async_handlers=True, always_connect=True, async_mode="gevent", cors_allowed_origins="*")
|
44
|
-
sio_app = socketio.WSGIApp(sio, healthcheck_app, socketio_path=socketio_path)
|
45
|
-
message_queue = deque(maxlen=500)
|
46
|
-
|
47
|
-
@sio.event
|
48
|
-
def connect(sid, environ, auth): # noqa: ARG001
|
49
|
-
logger.debug("Client connected to socketio server")
|
50
|
-
|
51
|
-
@sio.event
|
52
|
-
def disconnect(sid): # noqa: ARG001
|
53
|
-
logger.debug("Client disconnected from socketio server")
|
54
|
-
|
55
|
-
class QueueCopyStream:
|
56
|
-
def __init__(self, name, original):
|
57
|
-
self.name = name
|
58
|
-
self.original = original
|
59
|
-
|
60
|
-
def write(self, message):
|
61
|
-
"""
|
62
|
-
Writes to the queue first since when running in gevent the write to
|
63
|
-
stdout/stderr can yield and we want to ensure the same ordering in
|
64
|
-
the queue as for the written messages.
|
65
|
-
"""
|
66
|
-
message_queue.append((self.name, message))
|
67
|
-
self.original.write(message)
|
68
|
-
|
69
|
-
def flush(self):
|
70
|
-
self.original.flush()
|
71
|
-
|
72
|
-
def connected_sid():
|
73
|
-
for rooms in sio.manager.rooms.values():
|
74
|
-
for clients in rooms.values():
|
75
|
-
for sid in clients.keys():
|
76
|
-
return sid
|
77
|
-
|
78
|
-
def emitter():
|
79
|
-
while True:
|
80
|
-
while message_queue and (sid := connected_sid()):
|
81
|
-
name, message = message_queue[0]
|
82
|
-
|
83
|
-
try:
|
84
|
-
if name == "shutdown":
|
85
|
-
logger.debug("Sending websocket shutdown event")
|
86
|
-
|
87
|
-
sio.call(name, message, to=sid, timeout=5)
|
88
|
-
message_queue.popleft()
|
89
|
-
|
90
|
-
if name == "shutdown":
|
91
|
-
logger.debug("Websocket shutdown event acknowledged by client")
|
92
|
-
return
|
93
|
-
|
94
|
-
except socketio.exceptions.TimeoutError:
|
95
|
-
logger.debug("Timed out waiting for client to aknowledge websocket message")
|
96
|
-
|
97
|
-
gevent.sleep(1)
|
98
|
-
|
99
|
-
emitter_greenlet = gevent.spawn(emitter)
|
100
|
-
|
101
|
-
sys.stderr = QueueCopyStream("stderr", sys.stderr)
|
102
|
-
sys.stdout = QueueCopyStream("stdout", sys.stdout)
|
103
|
-
|
104
|
-
@atexit.register
|
105
|
-
def notify_shutdown(*args, **kwargs):
|
106
|
-
logger.debug("Adding shutdown event to websocket queue")
|
107
|
-
message_queue.append(("shutdown", ""))
|
108
|
-
emitter_greenlet.join(timeout=30)
|
109
|
-
|
110
|
-
class WebSocketHandlerWithoutLogging(geventwebsocket.handler.WebSocketHandler):
|
111
|
-
"""
|
112
|
-
Subclassing WebSocketHandler so it doesn't set a logger on
|
113
|
-
the server that I've explicitly configured not to have one.
|
114
|
-
"""
|
115
|
-
|
116
|
-
@property
|
117
|
-
def logger(self):
|
118
|
-
return quiet_logger
|
119
|
-
|
120
|
-
@gevent.spawn
|
121
|
-
def start_websocket_server():
|
122
|
-
logger.debug(f"Starting socketio server on port 1095 with path {socketio_path}")
|
123
|
-
server = gevent.pywsgi.WSGIServer(
|
124
|
-
("", 1095), sio_app, log=None, error_log=None, handler_class=WebSocketHandlerWithoutLogging
|
125
|
-
)
|
126
|
-
server.serve_forever()
|
127
|
-
gevent.get_hub().join()
|
@@ -1,313 +0,0 @@
|
|
1
|
-
import atexit
|
2
|
-
import json
|
3
|
-
import logging
|
4
|
-
import os
|
5
|
-
import socket
|
6
|
-
import sys
|
7
|
-
from datetime import UTC, datetime, timedelta
|
8
|
-
|
9
|
-
import gevent
|
10
|
-
import greenlet
|
11
|
-
import locust.env
|
12
|
-
import psycopg
|
13
|
-
import psycopg.types.json
|
14
|
-
from locust.exception import CatchResponseError
|
15
|
-
from locust.runners import MasterRunner
|
16
|
-
|
17
|
-
|
18
|
-
def safe_serialize(obj):
|
19
|
-
def default(o):
|
20
|
-
return f"<<non-serializable: {type(o).__qualname__}>>"
|
21
|
-
|
22
|
-
return json.dumps(obj, default=default)
|
23
|
-
|
24
|
-
|
25
|
-
def format_datetime(d: datetime):
|
26
|
-
return d.strftime("%Y-%m-%d, %H:%M:%S.%f")
|
27
|
-
|
28
|
-
|
29
|
-
def parse_datetime(s: str):
|
30
|
-
return datetime.strptime(s, "%Y-%m-%d, %H:%M:%S.%f").replace(tzinfo=UTC)
|
31
|
-
|
32
|
-
|
33
|
-
class Exporter:
|
34
|
-
def __init__(self, environment: locust.env.Environment, pool):
|
35
|
-
self.env = environment
|
36
|
-
self._run_id = None
|
37
|
-
self._samples: list[tuple] = []
|
38
|
-
self._background = gevent.spawn(self._run)
|
39
|
-
self._hostname = socket.gethostname()
|
40
|
-
self._finished = False
|
41
|
-
self._has_logged_test_stop = False
|
42
|
-
self._pid = os.getpid()
|
43
|
-
self.pool = pool
|
44
|
-
|
45
|
-
events = self.env.events
|
46
|
-
events.test_start.add_listener(self.on_test_start)
|
47
|
-
events.test_stop.add_listener(self.on_test_stop)
|
48
|
-
events.request.add_listener(self.on_request)
|
49
|
-
events.cpu_warning.add_listener(self.on_cpu_warning)
|
50
|
-
events.quit.add_listener(self.on_quit)
|
51
|
-
events.spawning_complete.add_listener(self.spawning_complete)
|
52
|
-
atexit.register(self.log_stop_test_run)
|
53
|
-
|
54
|
-
def on_cpu_warning(self, environment: locust.env.Environment, cpu_usage, message=None, timestamp=None, **kwargs):
|
55
|
-
# passing a custom message & timestamp to the event is a haxx to allow using this event for reporting generic events
|
56
|
-
if not timestamp:
|
57
|
-
timestamp = datetime.now(UTC).isoformat()
|
58
|
-
if not message:
|
59
|
-
message = f"High CPU usage ({cpu_usage}%)"
|
60
|
-
with self.pool.connection() as conn:
|
61
|
-
conn.execute(
|
62
|
-
"INSERT INTO events (time, text, run_id, customer) VALUES (%s, %s, %s, current_user)",
|
63
|
-
(timestamp, message, self._run_id),
|
64
|
-
)
|
65
|
-
|
66
|
-
def on_test_start(self, environment: locust.env.Environment):
|
67
|
-
if not self.env.parsed_options or not self.env.parsed_options.worker:
|
68
|
-
self._has_logged_test_stop = False
|
69
|
-
self._run_id = environment._run_id = datetime.now(UTC) # type: ignore
|
70
|
-
self.env.parsed_options.run_id = format_datetime(environment._run_id) # type: ignore
|
71
|
-
self.log_start_testrun()
|
72
|
-
self._user_count_logger = gevent.spawn(self._log_user_count)
|
73
|
-
self._update_end_time_task = gevent.spawn(self._update_end_time)
|
74
|
-
if self.env.parsed_options.worker:
|
75
|
-
self._run_id = parse_datetime(self.env.parsed_options.run_id)
|
76
|
-
|
77
|
-
def _log_user_count(self):
|
78
|
-
while True:
|
79
|
-
if self.env.runner is None:
|
80
|
-
return # there is no runner, so nothing to log...
|
81
|
-
try:
|
82
|
-
with self.pool.connection() as conn:
|
83
|
-
conn.execute(
|
84
|
-
"""INSERT INTO number_of_users(time, run_id, user_count, customer) VALUES (%s, %s, %s, current_user)""",
|
85
|
-
(datetime.now(UTC).isoformat(), self._run_id, self.env.runner.user_count),
|
86
|
-
)
|
87
|
-
except psycopg.Error as error:
|
88
|
-
logging.error("Failed to write user count to Postgresql: " + repr(error))
|
89
|
-
gevent.sleep(2.0)
|
90
|
-
|
91
|
-
def _run(self):
|
92
|
-
while True:
|
93
|
-
if self._samples:
|
94
|
-
# Buffer samples, so that a locust greenlet will write to the new list
|
95
|
-
# instead of the one that has been sent into postgres client
|
96
|
-
samples_buffer = self._samples
|
97
|
-
self._samples = []
|
98
|
-
self.write_samples_to_db(samples_buffer)
|
99
|
-
else:
|
100
|
-
if self._finished:
|
101
|
-
break
|
102
|
-
gevent.sleep(0.5)
|
103
|
-
|
104
|
-
def _update_end_time(self):
|
105
|
-
# delay setting first end time
|
106
|
-
# so UI doesn't display temporary value
|
107
|
-
gevent.sleep(5)
|
108
|
-
|
109
|
-
# Regularly update endtime to prevent missing endtimes when a test crashes
|
110
|
-
while True:
|
111
|
-
current_end_time = datetime.now(UTC)
|
112
|
-
try:
|
113
|
-
with self.pool.connection() as conn:
|
114
|
-
conn.execute(
|
115
|
-
"UPDATE testruns SET end_time = %s WHERE id = %s",
|
116
|
-
(current_end_time, self._run_id),
|
117
|
-
)
|
118
|
-
gevent.sleep(60)
|
119
|
-
except psycopg.Error as error:
|
120
|
-
logging.error("Failed to update testruns table with end time: " + repr(error))
|
121
|
-
gevent.sleep(1)
|
122
|
-
|
123
|
-
def write_samples_to_db(self, samples):
|
124
|
-
try:
|
125
|
-
with self.pool.connection() as conn:
|
126
|
-
conn: psycopg.connection.Connection
|
127
|
-
with conn.cursor().copy(
|
128
|
-
"COPY requests (time,run_id,greenlet_id,loadgen,name,request_type,response_time,success,response_length,exception,pid,url,context) FROM STDIN"
|
129
|
-
) as copy:
|
130
|
-
for sample in samples:
|
131
|
-
copy.write_row(sample)
|
132
|
-
except psycopg.Error as error:
|
133
|
-
logging.error("Failed to write samples to Postgresql timescale database: " + repr(error))
|
134
|
-
|
135
|
-
def on_test_stop(self, environment):
|
136
|
-
if getattr(self, "_update_end_time_task", False):
|
137
|
-
self._update_end_time_task.kill()
|
138
|
-
if getattr(self, "_user_count_logger", False):
|
139
|
-
self._user_count_logger.kill()
|
140
|
-
with self.pool.connection() as conn:
|
141
|
-
conn.execute(
|
142
|
-
"""INSERT INTO number_of_users(time, run_id, user_count, customer) VALUES (%s, %s, %s, current_user)""",
|
143
|
-
(datetime.now(UTC).isoformat(), self._run_id, 0),
|
144
|
-
)
|
145
|
-
self.log_stop_test_run()
|
146
|
-
self._has_logged_test_stop = True
|
147
|
-
|
148
|
-
def on_quit(self, exit_code, **kwargs):
|
149
|
-
self._finished = True
|
150
|
-
atexit.unregister(self.log_stop_test_run) # make sure we dont capture additional ctrl-c:s
|
151
|
-
self._background.join(timeout=10)
|
152
|
-
if getattr(self, "_update_end_time_task", False):
|
153
|
-
self._update_end_time_task.kill()
|
154
|
-
if getattr(self, "_user_count_logger", False):
|
155
|
-
self._user_count_logger.kill()
|
156
|
-
if not self._has_logged_test_stop:
|
157
|
-
self.log_stop_test_run()
|
158
|
-
if not self.env.parsed_options.worker:
|
159
|
-
self.log_exit_code(exit_code)
|
160
|
-
|
161
|
-
def on_request(
|
162
|
-
self,
|
163
|
-
request_type,
|
164
|
-
name,
|
165
|
-
response_time,
|
166
|
-
response_length,
|
167
|
-
exception,
|
168
|
-
context,
|
169
|
-
start_time=None,
|
170
|
-
url=None,
|
171
|
-
**kwargs,
|
172
|
-
):
|
173
|
-
# handle if a worker connects after test_start
|
174
|
-
if not self._run_id:
|
175
|
-
self._run_id = parse_datetime(self.env.parsed_options.run_id)
|
176
|
-
success = 0 if exception else 1
|
177
|
-
if start_time:
|
178
|
-
time = datetime.fromtimestamp(start_time, tz=UTC)
|
179
|
-
else:
|
180
|
-
# some users may not send start_time, so we just make an educated guess
|
181
|
-
# (which will be horribly wrong if users spend a lot of time in a with/catch_response-block)
|
182
|
-
time = datetime.now(UTC) - timedelta(milliseconds=response_time or 0)
|
183
|
-
greenlet_id = getattr(greenlet.getcurrent(), "minimal_ident", 0) # if we're debugging there is no greenlet
|
184
|
-
|
185
|
-
if exception:
|
186
|
-
if isinstance(exception, CatchResponseError):
|
187
|
-
exception = str(exception)
|
188
|
-
else:
|
189
|
-
try:
|
190
|
-
exception = repr(exception)
|
191
|
-
except AttributeError:
|
192
|
-
exception = f"{exception.__class__} (and it has no string representation)"
|
193
|
-
|
194
|
-
exception = exception[:300]
|
195
|
-
else:
|
196
|
-
exception = None
|
197
|
-
|
198
|
-
sample = (
|
199
|
-
time,
|
200
|
-
self._run_id,
|
201
|
-
greenlet_id,
|
202
|
-
self._hostname,
|
203
|
-
name,
|
204
|
-
request_type,
|
205
|
-
response_time,
|
206
|
-
success,
|
207
|
-
response_length,
|
208
|
-
exception,
|
209
|
-
self._pid,
|
210
|
-
url[0:255] if url else None,
|
211
|
-
psycopg.types.json.Json(context, safe_serialize),
|
212
|
-
)
|
213
|
-
|
214
|
-
self._samples.append(sample)
|
215
|
-
|
216
|
-
def log_start_testrun(self):
|
217
|
-
cmd = sys.argv[1:]
|
218
|
-
with self.pool.connection() as conn:
|
219
|
-
conn.execute(
|
220
|
-
"INSERT INTO testruns (id, num_users, worker_count, username, locustfile, profile, arguments, customer) VALUES (%s,%s,%s,%s,%s,%s,%s,current_user)",
|
221
|
-
(
|
222
|
-
self._run_id,
|
223
|
-
self.env.runner.target_user_count if self.env.runner else 1,
|
224
|
-
len(self.env.runner.clients)
|
225
|
-
if isinstance(
|
226
|
-
self.env.runner,
|
227
|
-
MasterRunner,
|
228
|
-
)
|
229
|
-
else 0,
|
230
|
-
self.env.web_ui.template_args.get("username", "") if self.env.web_ui else "",
|
231
|
-
self.env.parsed_locustfiles[0].split("/")[-1].split("__")[-1],
|
232
|
-
self.env.parsed_options.profile,
|
233
|
-
" ".join(cmd),
|
234
|
-
),
|
235
|
-
)
|
236
|
-
conn.execute(
|
237
|
-
"INSERT INTO events (time, text, run_id, customer) VALUES (%s, %s, %s, current_user)",
|
238
|
-
(datetime.now(UTC).isoformat(), "Test run started", self._run_id),
|
239
|
-
)
|
240
|
-
|
241
|
-
def spawning_complete(self, user_count):
|
242
|
-
if not self.env.parsed_options.worker: # only log for master/standalone
|
243
|
-
end_time = datetime.now(UTC)
|
244
|
-
try:
|
245
|
-
with self.pool.connection() as conn:
|
246
|
-
conn.execute(
|
247
|
-
"INSERT INTO events (time, text, run_id, customer) VALUES (%s, %s, %s, current_user)",
|
248
|
-
(end_time, f"Rampup complete, {user_count} users spawned", self._run_id),
|
249
|
-
)
|
250
|
-
except psycopg.Error as error:
|
251
|
-
logging.error(
|
252
|
-
"Failed to insert rampup complete event time to Postgresql timescale database: " + repr(error)
|
253
|
-
)
|
254
|
-
|
255
|
-
def log_stop_test_run(self):
|
256
|
-
logging.debug(f"Test run id {self._run_id} stopping")
|
257
|
-
if self.env.parsed_options.worker:
|
258
|
-
return # only run on master or standalone
|
259
|
-
end_time = datetime.now(UTC)
|
260
|
-
try:
|
261
|
-
with self.pool.connection() as conn:
|
262
|
-
conn.execute(
|
263
|
-
"UPDATE testruns SET end_time = %s WHERE id = %s",
|
264
|
-
(end_time, self._run_id),
|
265
|
-
)
|
266
|
-
|
267
|
-
try:
|
268
|
-
# The AND time > run_id clause in the following statements are there to help Timescale performance
|
269
|
-
# We dont use start_time / end_time to calculate RPS, instead we use the time between the actual first and last request
|
270
|
-
# (as this is a more accurate measurement of the actual test)
|
271
|
-
conn.execute(
|
272
|
-
"""
|
273
|
-
UPDATE testruns
|
274
|
-
SET (requests, resp_time_avg, rps_avg, fail_ratio) =
|
275
|
-
(SELECT reqs, resp_time, reqs / GREATEST(duration, 1), fails / GREATEST(reqs, 1)) FROM
|
276
|
-
(SELECT
|
277
|
-
COUNT(*)::numeric AS reqs,
|
278
|
-
AVG(response_time)::numeric as resp_time
|
279
|
-
FROM requests_view WHERE run_id = %(run_id)s AND time > %(run_id)s) AS _,
|
280
|
-
(SELECT
|
281
|
-
EXTRACT(epoch FROM (SELECT MAX(time)-MIN(time) FROM requests_view WHERE run_id = %(run_id)s AND time > %(run_id)s))::numeric AS duration) AS __,
|
282
|
-
(SELECT
|
283
|
-
COUNT(*)::numeric AS fails
|
284
|
-
FROM requests_view WHERE run_id = %(run_id)s AND time > %(run_id)s AND success = 0) AS ___
|
285
|
-
WHERE id = %(run_id)s""",
|
286
|
-
{"run_id": self._run_id},
|
287
|
-
)
|
288
|
-
except psycopg.errors.DivisionByZero: # remove this except later, because it shouldnt happen any more
|
289
|
-
logging.info(
|
290
|
-
"Got DivisionByZero error when trying to update testruns, most likely because there were no requests logged"
|
291
|
-
)
|
292
|
-
except psycopg.Error as error:
|
293
|
-
logging.error(
|
294
|
-
"Failed to update testruns record (or events) with end time to Postgresql timescale database: "
|
295
|
-
+ repr(error)
|
296
|
-
)
|
297
|
-
|
298
|
-
def log_exit_code(self, exit_code=None):
|
299
|
-
try:
|
300
|
-
with self.pool.connection() as conn:
|
301
|
-
conn.execute(
|
302
|
-
"UPDATE testruns SET exit_code = %s WHERE id = %s",
|
303
|
-
(exit_code, self._run_id),
|
304
|
-
)
|
305
|
-
conn.execute(
|
306
|
-
"INSERT INTO events (time, text, run_id, customer) VALUES (%s, %s, %s, current_user)",
|
307
|
-
(datetime.now(UTC).isoformat(), f"Finished with exit code: {exit_code}", self._run_id),
|
308
|
-
)
|
309
|
-
except psycopg.Error as error:
|
310
|
-
logging.error(
|
311
|
-
"Failed to update testruns record (or events) with end time to Postgresql timescale database: "
|
312
|
-
+ repr(error)
|
313
|
-
)
|