locust-cloud 1.5.11__py3-none-any.whl → 1.7.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
locust_cloud/__init__.py CHANGED
@@ -1,18 +1,22 @@
1
+ import importlib.metadata
1
2
  import os
2
3
 
3
4
  os.environ["LOCUST_SKIP_MONKEY_PATCH"] = "1"
5
+ __version__ = importlib.metadata.version("locust-cloud")
4
6
 
5
7
  import argparse
6
8
  import logging
7
- import sys
8
9
 
10
+ import configargparse
9
11
  import locust.env
10
12
  import psycopg
11
13
  from locust import events
12
14
  from locust.argument_parser import LocustArgumentParser
13
15
  from locust_cloud.auth import register_auth
16
+ from locust_cloud.idle_exit import IdleExit
14
17
  from locust_cloud.timescale.exporter import Exporter
15
18
  from locust_cloud.timescale.query import register_query
19
+ from psycopg.conninfo import make_conninfo
16
20
  from psycopg_pool import ConnectionPool
17
21
 
18
22
  PG_USER = os.environ.get("PG_USER")
@@ -53,39 +57,41 @@ def add_arguments(parser: LocustArgumentParser):
53
57
  default="",
54
58
  help="Description of the test being run",
55
59
  )
60
+ # do not set
61
+ # used for sending the run id from master to workers
62
+ locust_cloud.add_argument(
63
+ "--run-id",
64
+ type=str,
65
+ env_var="LOCUSTCLOUD_RUN_ID",
66
+ help=configargparse.SUPPRESS,
67
+ )
56
68
 
57
69
 
58
70
  def set_autocommit(conn: psycopg.Connection):
59
71
  conn.autocommit = True
60
72
 
61
73
 
62
- def create_connection_pool(
63
- pg_user: str, pg_host: str, pg_password: str, pg_database: str, pg_port: str | int
64
- ) -> ConnectionPool:
65
- try:
66
- return ConnectionPool(
67
- conninfo=f"postgres://{pg_user}:{pg_password}@{pg_host}:{pg_port}/{pg_database}?sslmode=require",
68
- min_size=1,
69
- max_size=10,
70
- configure=set_autocommit,
71
- )
72
- except Exception:
73
- sys.stderr.write(f"Could not connect to postgres ({pg_user}@{pg_host}:{pg_port}).")
74
- sys.exit(1)
75
-
76
-
77
74
  @events.init.add_listener
78
75
  def on_locust_init(environment: locust.env.Environment, **_args):
79
76
  if not (PG_HOST and PG_USER and PG_PASSWORD and PG_DATABASE and PG_PORT):
80
77
  return
81
78
 
82
79
  try:
83
- pool = create_connection_pool(
84
- pg_user=PG_USER,
85
- pg_host=PG_HOST,
86
- pg_password=PG_PASSWORD,
87
- pg_database=PG_DATABASE,
88
- pg_port=PG_PORT,
80
+ conninfo = make_conninfo(
81
+ dbname=PG_DATABASE,
82
+ user=PG_USER,
83
+ port=PG_PORT,
84
+ password=PG_PASSWORD,
85
+ host=PG_HOST,
86
+ sslmode="require",
87
+ # options="-c statement_timeout=55000",
88
+ )
89
+ pool = ConnectionPool(
90
+ conninfo,
91
+ min_size=1,
92
+ max_size=10,
93
+ configure=set_autocommit,
94
+ check=ConnectionPool.check_connection,
89
95
  )
90
96
  pool.wait()
91
97
  except Exception as e:
@@ -93,6 +99,9 @@ def on_locust_init(environment: locust.env.Environment, **_args):
93
99
  logger.error(f"{PG_HOST=}")
94
100
  raise
95
101
 
102
+ if not GRAPH_VIEWER:
103
+ IdleExit(environment)
104
+
96
105
  if not GRAPH_VIEWER and environment.parsed_options and environment.parsed_options.exporter:
97
106
  Exporter(environment, pool)
98
107
 
locust_cloud/auth.py CHANGED
@@ -1,3 +1,4 @@
1
+ import logging
1
2
  import os
2
3
  from datetime import UTC, datetime, timedelta
3
4
  from typing import TypedDict
@@ -7,12 +8,13 @@ import requests
7
8
  import werkzeug
8
9
  from flask import redirect, request, url_for
9
10
  from flask_login import UserMixin, login_user
10
- from locust_cloud.constants import (
11
- DEFAULT_DEPLOYER_URL,
12
- )
11
+ from locust_cloud import __version__
12
+ from locust_cloud.constants import DEFAULT_DEPLOYER_URL
13
13
 
14
14
  DEPLOYER_URL = os.environ.get("LOCUSTCLOUD_DEPLOYER_URL", DEFAULT_DEPLOYER_URL)
15
15
 
16
+ logger = logging.getLogger(__name__)
17
+
16
18
 
17
19
  class Credentials(TypedDict):
18
20
  user_sub_id: str
@@ -69,23 +71,25 @@ def register_auth(environment: locust.env.Environment):
69
71
  auth_response = requests.post(
70
72
  f"{DEPLOYER_URL}/auth/login",
71
73
  json={"username": username, "password": password},
74
+ headers={"X-Client-Version": __version__},
72
75
  )
73
76
 
74
- if auth_response.status_code == 200:
75
- credentials = auth_response.json()
76
- response = redirect(url_for("index"))
77
- response = set_credentials(username, credentials, response)
78
- login_user(AuthUser(credentials["user_sub_id"]))
77
+ auth_response.raise_for_status()
79
78
 
80
- return response
79
+ credentials = auth_response.json()
80
+ response = redirect(url_for("index"))
81
+ response = set_credentials(username, credentials, response)
82
+ login_user(AuthUser(credentials["user_sub_id"]))
81
83
 
82
- environment.web_ui.auth_args = {**environment.web_ui.auth_args, "error": "Invalid username or password"}
84
+ return response
85
+ except requests.exceptions.HTTPError as e:
86
+ if e.response.status_code == 401:
87
+ environment.web_ui.auth_args["error"] = "Invalid username or password"
88
+ else:
89
+ logger.error(f"Unknown response from auth: {e.response.status_code} {e.response.text}")
83
90
 
84
- return redirect(url_for("login"))
85
- except Exception:
86
- environment.web_ui.auth_args = {
87
- **environment.web_ui.auth_args,
88
- "error": "An unknown error occured, please try again",
89
- }
91
+ environment.web_ui.auth_args["error"] = (
92
+ "Unknown error during authentication, check logs and/or contact support"
93
+ )
90
94
 
91
95
  return redirect(url_for("login"))
locust_cloud/cloud.py CHANGED
@@ -1,4 +1,3 @@
1
- import importlib.metadata
2
1
  import json
3
2
  import logging
4
3
  import math
@@ -14,6 +13,7 @@ from typing import IO, Any
14
13
  import configargparse
15
14
  import requests
16
15
  from botocore.exceptions import ClientError
16
+ from locust_cloud import __version__
17
17
  from locust_cloud.constants import (
18
18
  DEFAULT_CLUSTER_NAME,
19
19
  DEFAULT_DEPLOYER_URL,
@@ -23,8 +23,6 @@ from locust_cloud.constants import (
23
23
  )
24
24
  from locust_cloud.credential_manager import CredentialError, CredentialManager
25
25
 
26
- __version__ = importlib.metadata.version("locust-cloud")
27
-
28
26
 
29
27
  class LocustTomlConfigParser(configargparse.TomlConfigParser):
30
28
  def parse(self, stream: IO[str]) -> OrderedDict[str, Any]:
@@ -198,7 +196,7 @@ logging.getLogger("urllib3").setLevel(logging.INFO)
198
196
 
199
197
  def main() -> None:
200
198
  s3_bucket = f"{options.kube_cluster_name}-{options.kube_namespace}"
201
- deployed_pods: list[Any] = []
199
+ deployments: list[Any] = []
202
200
  worker_count: int = max(options.workers or math.ceil(options.users / USERS_PER_WORKER), 2)
203
201
  os.environ["AWS_DEFAULT_REGION"] = options.region
204
202
  if options.users > 5000000:
@@ -305,6 +303,7 @@ def main() -> None:
305
303
  "AWS_ACCESS_KEY_ID": aws_access_key_id,
306
304
  "AWS_SECRET_ACCESS_KEY": aws_secret_access_key,
307
305
  "AWS_SESSION_TOKEN": aws_session_token,
306
+ "X-Client-Version": __version__,
308
307
  }
309
308
  try:
310
309
  # logger.info(payload) # might be useful when debugging sometimes
@@ -314,7 +313,7 @@ def main() -> None:
314
313
  sys.exit(1)
315
314
 
316
315
  if response.status_code == 200:
317
- deployed_pods = response.json().get("pods", [])
316
+ deployments = response.json().get("deployments", [])
318
317
  else:
319
318
  try:
320
319
  logger.error(f"Error when deploying: {response.json()['Message']}")
@@ -331,7 +330,7 @@ def main() -> None:
331
330
  sys.exit(0)
332
331
 
333
332
  log_group_name = f"/eks/{options.kube_cluster_name}-{options.kube_namespace}"
334
- master_pod_name = next((pod for pod in deployed_pods if "master" in pod), None)
333
+ master_pod_name = next((deployment for deployment in deployments if "master" in deployment), None)
335
334
 
336
335
  if not master_pod_name:
337
336
  logger.error(
@@ -373,6 +372,7 @@ def main() -> None:
373
372
  startTime=timestamp,
374
373
  startFromHead=True,
375
374
  )
375
+ locust_shutdown = False
376
376
  for event in response.get("events", []):
377
377
  message = event.get("message", "")
378
378
  event_timestamp = event.get("timestamp", timestamp) + 1
@@ -380,9 +380,17 @@ def main() -> None:
380
380
  message_json = json.loads(message)
381
381
  if "log" in message_json:
382
382
  print(message_json["log"])
383
+
384
+ if "Shutting down (exit code" in message_json["log"]:
385
+ locust_shutdown = True
386
+
383
387
  except json.JSONDecodeError:
384
388
  print(message)
385
389
  timestamp = event_timestamp
390
+
391
+ if locust_shutdown:
392
+ break
393
+
386
394
  time.sleep(5)
387
395
  except ClientError as e:
388
396
  error_code = e.response.get("Error", {}).get("Code", "")
@@ -408,6 +416,7 @@ def delete(s3_bucket, credential_manager):
408
416
  "AWS_ACCESS_KEY_ID": refreshed_credentials.get("access_key", ""),
409
417
  "AWS_SECRET_ACCESS_KEY": refreshed_credentials.get("secret_key", ""),
410
418
  "Authorization": f"Bearer {refreshed_credentials.get('cognito_client_id_token', '')}",
419
+ "X-Client-Version": __version__,
411
420
  }
412
421
 
413
422
  token = refreshed_credentials.get("token")
@@ -8,6 +8,7 @@ import jwt
8
8
  import requests
9
9
  from botocore.credentials import RefreshableCredentials
10
10
  from botocore.session import Session as BotocoreSession
11
+ from locust_cloud import __version__
11
12
 
12
13
  logger = logging.getLogger(__name__)
13
14
 
@@ -67,7 +68,11 @@ class CredentialManager:
67
68
  raise CredentialError("Insufficient credentials to obtain AWS session.")
68
69
 
69
70
  try:
70
- response = requests.post(f"{self.lambda_url}/auth/login", json=payload)
71
+ response = requests.post(
72
+ f"{self.lambda_url}/auth/login",
73
+ json=payload,
74
+ headers={"X-Client-Version": __version__},
75
+ )
71
76
  response.raise_for_status()
72
77
  data = response.json()
73
78
 
@@ -98,6 +103,9 @@ class CredentialManager:
98
103
  if response is not None and response.status_code == 401:
99
104
  raise CredentialError("Incorrect username or password.") from http_err
100
105
  else:
106
+ if js := response.json():
107
+ if message := js.get("Message"):
108
+ raise CredentialError(message)
101
109
  error_info = f"HTTP {response.status_code} {response.reason}" if response else "No response received."
102
110
  raise CredentialError(f"HTTP error occurred while obtaining credentials: {error_info}") from http_err
103
111
  except requests.exceptions.RequestException as req_err:
@@ -0,0 +1,38 @@
1
+ import logging
2
+ import sys
3
+
4
+ import gevent
5
+ import locust.env
6
+ from locust import events
7
+
8
+ logger = logging.getLogger(__name__)
9
+
10
+
11
+ class IdleExit:
12
+ def __init__(self, environment: locust.env.Environment):
13
+ self.environment = environment
14
+ self._destroy_task: gevent.Greenlet | None = None
15
+ events.test_start.add_listener(self.on_locust_state_change)
16
+ events.test_stop.add_listener(self.on_test_stop)
17
+ events.quit.add_listener(self.on_locust_state_change)
18
+
19
+ if not self.environment.parsed_options.autostart:
20
+ self._destroy_task = gevent.spawn(self._destroy)
21
+
22
+ def _destroy(self):
23
+ gevent.sleep(1800)
24
+ logger.info("Locust was detected as idle (no test running) for more than 30 minutes")
25
+ self.environment.runner.quit()
26
+
27
+ if self.environment.web_ui:
28
+ self.environment.web_ui.greenlet.kill(timeout=5)
29
+
30
+ if self.environment.web_ui.greenlet.started:
31
+ sys.exit(1)
32
+
33
+ def on_test_stop(self, **_kwargs):
34
+ self._destroy_task = gevent.spawn(self._destroy)
35
+
36
+ def on_locust_state_change(self, **_kwargs):
37
+ if self._destroy_task:
38
+ self._destroy_task.kill()
@@ -22,11 +22,19 @@ def safe_serialize(obj):
22
22
  return json.dumps(obj, default=default)
23
23
 
24
24
 
25
+ def format_datetime(d: datetime):
26
+ return d.strftime("%Y-%m-%d, %H:%M:%S.%f")
27
+
28
+
29
+ def parse_datetime(s: str):
30
+ return datetime.strptime(s, "%Y-%m-%d, %H:%M:%S.%f").replace(tzinfo=UTC)
31
+
32
+
25
33
  class Exporter:
26
34
  def __init__(self, environment: locust.env.Environment, pool):
27
35
  self.env = environment
28
36
  self._run_id = None
29
- self._samples: list[dict] = []
37
+ self._samples: list[tuple] = []
30
38
  self._background = gevent.spawn(self._run)
31
39
  self._hostname = socket.gethostname()
32
40
  self._finished = False
@@ -42,13 +50,6 @@ class Exporter:
42
50
  events.spawning_complete.add_listener(self.spawning_complete)
43
51
  atexit.register(self.log_stop_test_run)
44
52
 
45
- if self.env.runner is not None:
46
- self.env.runner.register_message("run_id", self.set_run_id)
47
-
48
- def set_run_id(self, environment, msg, **kwargs):
49
- logging.debug(f"run id from master: {msg.data}")
50
- self._run_id = datetime.strptime(msg.data, "%Y-%m-%d, %H:%M:%S.%f").replace(tzinfo=UTC)
51
-
52
53
  def on_cpu_warning(self, environment: locust.env.Environment, cpu_usage, message=None, timestamp=None, **kwargs):
53
54
  # passing a custom message & timestamp to the event is a haxx to allow using this event for reporting generic events
54
55
  if not timestamp:
@@ -63,12 +64,12 @@ class Exporter:
63
64
  def on_test_start(self, environment: locust.env.Environment):
64
65
  if not self.env.parsed_options or not self.env.parsed_options.worker:
65
66
  self._run_id = environment._run_id = datetime.now(UTC) # type: ignore
66
- msg = environment._run_id.strftime("%Y-%m-%d, %H:%M:%S.%f") # type: ignore
67
- if environment.runner is not None:
68
- logging.debug(f"about to send run_id to workers: {msg}")
69
- environment.runner.send_message("run_id", msg)
67
+ self.env.parsed_options.run_id = format_datetime(environment._run_id) # type: ignore
70
68
  self.log_start_testrun()
71
69
  self._user_count_logger = gevent.spawn(self._log_user_count)
70
+ self._update_end_time_task = gevent.spawn(self._update_end_time)
71
+ if self.env.parsed_options.worker:
72
+ self._run_id = parse_datetime(self.env.parsed_options.run_id)
72
73
 
73
74
  def _log_user_count(self):
74
75
  while True:
@@ -97,22 +98,40 @@ class Exporter:
97
98
  break
98
99
  gevent.sleep(0.5)
99
100
 
101
+ def _update_end_time(self):
102
+ # delay setting first end time
103
+ # so UI doesn't display temporary value
104
+ gevent.sleep(5)
105
+
106
+ # Regularly update endtime to prevent missing endtimes when a test crashes
107
+ while True:
108
+ current_end_time = datetime.now(UTC)
109
+ try:
110
+ with self.pool.connection() as conn:
111
+ conn.execute(
112
+ "UPDATE testruns SET end_time = %s WHERE id = %s",
113
+ (current_end_time, self._run_id),
114
+ )
115
+ gevent.sleep(60)
116
+ except psycopg.Error as error:
117
+ logging.error("Failed to update testruns table with end time: " + repr(error))
118
+ gevent.sleep(1)
119
+
100
120
  def write_samples_to_db(self, samples):
101
121
  try:
102
122
  with self.pool.connection() as conn:
103
123
  conn: psycopg.connection.Connection
104
- with conn.cursor() as cur:
105
- cur.executemany(
106
- """
107
- INSERT INTO requests (time,run_id,greenlet_id,loadgen,name,request_type,response_time,success,response_length,exception,pid,url,context)
108
- VALUES (%(time)s, %(run_id)s, %(greenlet_id)s, %(loadgen)s, %(name)s, %(request_type)s, %(response_time)s, %(success)s, %(response_length)s, %(exception)s, %(pid)s, %(url)s, %(context)s)
109
- """,
110
- samples,
111
- )
124
+ with conn.cursor().copy(
125
+ "COPY requests (time,run_id,greenlet_id,loadgen,name,request_type,response_time,success,response_length,exception,pid,url,context) FROM STDIN"
126
+ ) as copy:
127
+ for sample in samples:
128
+ copy.write_row(sample)
112
129
  except psycopg.Error as error:
113
130
  logging.error("Failed to write samples to Postgresql timescale database: " + repr(error))
114
131
 
115
132
  def on_test_stop(self, environment):
133
+ if getattr(self, "_update_end_time_task", False):
134
+ self._update_end_time_task.kill()
116
135
  if getattr(self, "_user_count_logger", False):
117
136
  self._user_count_logger.kill()
118
137
  with self.pool.connection() as conn:
@@ -126,6 +145,8 @@ class Exporter:
126
145
  self._finished = True
127
146
  atexit._clear() # make sure we dont capture additional ctrl-c:s
128
147
  self._background.join(timeout=10)
148
+ if getattr(self, "_update_end_time_task", False):
149
+ self._update_end_time_task.kill()
129
150
  if getattr(self, "_user_count_logger", False):
130
151
  self._user_count_logger.kill()
131
152
  self.log_stop_test_run(exit_code)
@@ -142,6 +163,9 @@ class Exporter:
142
163
  url=None,
143
164
  **kwargs,
144
165
  ):
166
+ # handle if a worker connects after test_start
167
+ if not self._run_id:
168
+ self._run_id = parse_datetime(self.env.parsed_options.run_id)
145
169
  success = 0 if exception else 1
146
170
  if start_time:
147
171
  time = datetime.fromtimestamp(start_time, tz=UTC)
@@ -150,35 +174,38 @@ class Exporter:
150
174
  # (which will be horribly wrong if users spend a lot of time in a with/catch_response-block)
151
175
  time = datetime.now(UTC) - timedelta(milliseconds=response_time or 0)
152
176
  greenlet_id = getattr(greenlet.getcurrent(), "minimal_ident", 0) # if we're debugging there is no greenlet
153
- sample = {
154
- "time": time,
155
- "run_id": self._run_id,
156
- "greenlet_id": greenlet_id,
157
- "loadgen": self._hostname,
158
- "name": name,
159
- "request_type": request_type,
160
- "response_time": response_time,
161
- "success": success,
162
- "url": url[0:255] if url else None,
163
- "pid": self._pid,
164
- "context": psycopg.types.json.Json(context, safe_serialize),
165
- }
166
177
 
167
178
  if response_length >= 0:
168
- sample["response_length"] = response_length
179
+ response_length = response_length
169
180
  else:
170
- sample["response_length"] = None
181
+ response_length = None
171
182
 
172
183
  if exception:
173
184
  if isinstance(exception, CatchResponseError):
174
- sample["exception"] = str(exception)
185
+ exception = str(exception)
175
186
  else:
176
187
  try:
177
- sample["exception"] = repr(exception)
188
+ exception = repr(exception)
178
189
  except AttributeError:
179
- sample["exception"] = f"{exception.__class__} (and it has no string representation)"
190
+ exception = f"{exception.__class__} (and it has no string representation)"
180
191
  else:
181
- sample["exception"] = None
192
+ exception = None
193
+
194
+ sample = (
195
+ time,
196
+ self._run_id,
197
+ greenlet_id,
198
+ self._hostname,
199
+ name,
200
+ request_type,
201
+ response_time,
202
+ success,
203
+ response_length,
204
+ exception,
205
+ self._pid,
206
+ url[0:255] if url else None,
207
+ psycopg.types.json.Json(context, safe_serialize),
208
+ )
182
209
 
183
210
  self._samples.append(sample)
184
211
 
@@ -21,7 +21,7 @@ def register_query(environment, pool):
21
21
  # start_time = time.perf_counter()
22
22
  with pool.connection() as conn:
23
23
  # get_conn_time = (time.perf_counter() - start_time) * 1000
24
- sql_params = request.get_json()
24
+ sql_params = request.get_json() if request.content_type == "application/json" else {}
25
25
  # start_time = time.perf_counter()
26
26
  from datetime import datetime, timedelta
27
27
 
@@ -29,7 +29,7 @@ def register_query(environment, pool):
29
29
  # protect the database against huge queries
30
30
  start_time = datetime.fromisoformat(sql_params["start"])
31
31
  end_time = datetime.fromisoformat(sql_params["end"])
32
- if end_time >= start_time + timedelta(hours=6):
32
+ if end_time >= start_time + timedelta(hours=48):
33
33
  logger.warning(
34
34
  f"UI asked for too long time interval. Start was {sql_params['start']}, end was {sql_params['end']}"
35
35
  )