locust-cloud 1.7.0__py3-none-any.whl → 1.8.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
locust_cloud/__init__.py CHANGED
@@ -19,22 +19,16 @@ from locust_cloud.timescale.query import register_query
19
19
  from psycopg.conninfo import make_conninfo
20
20
  from psycopg_pool import ConnectionPool
21
21
 
22
- PG_USER = os.environ.get("PG_USER")
23
- PG_HOST = os.environ.get("PG_HOST")
24
- PG_PASSWORD = os.environ.get("PG_PASSWORD")
25
- PG_DATABASE = os.environ.get("PG_DATABASE")
26
- PG_PORT = os.environ.get("PG_PORT", 5432)
27
22
  GRAPH_VIEWER = os.environ.get("GRAPH_VIEWER")
28
- MAX_USER_COUNT = os.environ.get("MAX_USER_COUNT")
29
23
  logger = logging.getLogger(__name__)
30
24
 
31
25
 
32
26
  @events.init_command_line_parser.add_listener
33
27
  def add_arguments(parser: LocustArgumentParser):
34
- if not (PG_HOST or GRAPH_VIEWER):
28
+ if not (os.environ.get("PGHOST") or GRAPH_VIEWER):
35
29
  parser.add_argument_group(
36
30
  "locust-cloud",
37
- "locust-cloud disabled, because PG_HOST was not set - this is normal for local runs",
31
+ "locust-cloud disabled, because PGHOST was not set - this is normal for local runs",
38
32
  )
39
33
  return
40
34
 
@@ -73,30 +67,23 @@ def set_autocommit(conn: psycopg.Connection):
73
67
 
74
68
  @events.init.add_listener
75
69
  def on_locust_init(environment: locust.env.Environment, **_args):
76
- if not (PG_HOST and PG_USER and PG_PASSWORD and PG_DATABASE and PG_PORT):
70
+ if not (os.environ.get("PGHOST")):
77
71
  return
78
72
 
79
73
  try:
80
74
  conninfo = make_conninfo(
81
- dbname=PG_DATABASE,
82
- user=PG_USER,
83
- port=PG_PORT,
84
- password=PG_PASSWORD,
85
- host=PG_HOST,
86
75
  sslmode="require",
87
- # options="-c statement_timeout=55000",
88
76
  )
89
77
  pool = ConnectionPool(
90
78
  conninfo,
91
79
  min_size=1,
92
- max_size=10,
80
+ max_size=20,
93
81
  configure=set_autocommit,
94
82
  check=ConnectionPool.check_connection,
95
83
  )
96
84
  pool.wait()
97
85
  except Exception as e:
98
86
  logger.exception(e)
99
- logger.error(f"{PG_HOST=}")
100
87
  raise
101
88
 
102
89
  if not GRAPH_VIEWER:
@@ -106,8 +93,6 @@ def on_locust_init(environment: locust.env.Environment, **_args):
106
93
  Exporter(environment, pool)
107
94
 
108
95
  if environment.web_ui:
109
- environment.web_ui.template_args["maxUserCount"] = MAX_USER_COUNT
110
-
111
96
  if GRAPH_VIEWER:
112
97
  environment.web_ui.template_args["isGraphViewer"] = True
113
98
 
locust_cloud/cloud.py CHANGED
@@ -199,12 +199,7 @@ def main() -> None:
199
199
  deployments: list[Any] = []
200
200
  worker_count: int = max(options.workers or math.ceil(options.users / USERS_PER_WORKER), 2)
201
201
  os.environ["AWS_DEFAULT_REGION"] = options.region
202
- if options.users > 5000000:
203
- logger.error("You asked for more than 5000000 Users, that isn't allowed.")
204
- sys.exit(1)
205
- if worker_count > 1000:
206
- logger.error("You asked for more than 20 workers, that isn't allowed.")
207
- sys.exit(1)
202
+
208
203
  try:
209
204
  if not (
210
205
  (options.username and options.password) or (options.aws_access_key_id and options.aws_secret_access_key)
@@ -295,6 +290,7 @@ def main() -> None:
295
290
  *locust_env_variables,
296
291
  ],
297
292
  "worker_count": worker_count,
293
+ "user_count": options.users,
298
294
  "image_tag": options.image_tag,
299
295
  }
300
296
  headers = {
locust_cloud/idle_exit.py CHANGED
@@ -30,9 +30,9 @@ class IdleExit:
30
30
  if self.environment.web_ui.greenlet.started:
31
31
  sys.exit(1)
32
32
 
33
- def on_test_stop(self, **_kwargs):
33
+ def on_test_stop(self, **kwargs):
34
34
  self._destroy_task = gevent.spawn(self._destroy)
35
35
 
36
- def on_locust_state_change(self, **_kwargs):
36
+ def on_locust_state_change(self, **kwargs):
37
37
  if self._destroy_task:
38
38
  self._destroy_task.kill()
@@ -38,6 +38,7 @@ class Exporter:
38
38
  self._background = gevent.spawn(self._run)
39
39
  self._hostname = socket.gethostname()
40
40
  self._finished = False
41
+ self._has_logged_test_stop = False
41
42
  self._pid = os.getpid()
42
43
  self.pool = pool
43
44
 
@@ -58,11 +59,13 @@ class Exporter:
58
59
  message = f"High CPU usage ({cpu_usage}%)"
59
60
  with self.pool.connection() as conn:
60
61
  conn.execute(
61
- "INSERT INTO events (time, text, run_id) VALUES (%s, %s, %s)", (timestamp, message, self._run_id)
62
+ "INSERT INTO events (time, text, run_id, customer) VALUES (%s, %s, %s, current_user)",
63
+ (timestamp, message, self._run_id),
62
64
  )
63
65
 
64
66
  def on_test_start(self, environment: locust.env.Environment):
65
67
  if not self.env.parsed_options or not self.env.parsed_options.worker:
68
+ self._has_logged_test_stop = False
66
69
  self._run_id = environment._run_id = datetime.now(UTC) # type: ignore
67
70
  self.env.parsed_options.run_id = format_datetime(environment._run_id) # type: ignore
68
71
  self.log_start_testrun()
@@ -78,7 +81,7 @@ class Exporter:
78
81
  try:
79
82
  with self.pool.connection() as conn:
80
83
  conn.execute(
81
- """INSERT INTO number_of_users(time, run_id, user_count) VALUES (%s, %s, %s)""",
84
+ """INSERT INTO number_of_users(time, run_id, user_count, customer) VALUES (%s, %s, %s, current_user)""",
82
85
  (datetime.now(UTC).isoformat(), self._run_id, self.env.runner.user_count),
83
86
  )
84
87
  except psycopg.Error as error:
@@ -136,10 +139,11 @@ class Exporter:
136
139
  self._user_count_logger.kill()
137
140
  with self.pool.connection() as conn:
138
141
  conn.execute(
139
- """INSERT INTO number_of_users(time, run_id, user_count) VALUES (%s, %s, %s)""",
142
+ """INSERT INTO number_of_users(time, run_id, user_count, customer) VALUES (%s, %s, %s, current_user)""",
140
143
  (datetime.now(UTC).isoformat(), self._run_id, 0),
141
144
  )
142
145
  self.log_stop_test_run()
146
+ self._has_logged_test_stop = True
143
147
 
144
148
  def on_quit(self, exit_code, **kwargs):
145
149
  self._finished = True
@@ -149,7 +153,10 @@ class Exporter:
149
153
  self._update_end_time_task.kill()
150
154
  if getattr(self, "_user_count_logger", False):
151
155
  self._user_count_logger.kill()
152
- self.log_stop_test_run(exit_code)
156
+ if not self._has_logged_test_stop:
157
+ self.log_stop_test_run()
158
+ if not self.env.parsed_options.worker:
159
+ self.log_exit_code(exit_code)
153
160
 
154
161
  def on_request(
155
162
  self,
@@ -175,11 +182,6 @@ class Exporter:
175
182
  time = datetime.now(UTC) - timedelta(milliseconds=response_time or 0)
176
183
  greenlet_id = getattr(greenlet.getcurrent(), "minimal_ident", 0) # if we're debugging there is no greenlet
177
184
 
178
- if response_length >= 0:
179
- response_length = response_length
180
- else:
181
- response_length = None
182
-
183
185
  if exception:
184
186
  if isinstance(exception, CatchResponseError):
185
187
  exception = str(exception)
@@ -188,6 +190,8 @@ class Exporter:
188
190
  exception = repr(exception)
189
191
  except AttributeError:
190
192
  exception = f"{exception.__class__} (and it has no string representation)"
193
+
194
+ exception = exception[:300]
191
195
  else:
192
196
  exception = None
193
197
 
@@ -213,7 +217,7 @@ class Exporter:
213
217
  cmd = sys.argv[1:]
214
218
  with self.pool.connection() as conn:
215
219
  conn.execute(
216
- "INSERT INTO testruns (id, num_users, worker_count, username, locustfile, description, arguments) VALUES (%s,%s,%s,%s,%s,%s,%s)",
220
+ "INSERT INTO testruns (id, num_users, worker_count, username, locustfile, description, arguments, customer) VALUES (%s,%s,%s,%s,%s,%s,%s,current_user)",
217
221
  (
218
222
  self._run_id,
219
223
  self.env.runner.target_user_count if self.env.runner else 1,
@@ -230,7 +234,7 @@ class Exporter:
230
234
  ),
231
235
  )
232
236
  conn.execute(
233
- "INSERT INTO events (time, text, run_id) VALUES (%s, %s, %s)",
237
+ "INSERT INTO events (time, text, run_id, customer) VALUES (%s, %s, %s, current_user)",
234
238
  (datetime.now(UTC).isoformat(), "Test run started", self._run_id),
235
239
  )
236
240
 
@@ -240,7 +244,7 @@ class Exporter:
240
244
  try:
241
245
  with self.pool.connection() as conn:
242
246
  conn.execute(
243
- "INSERT INTO events (time, text, run_id) VALUES (%s, %s, %s)",
247
+ "INSERT INTO events (time, text, run_id, customer) VALUES (%s, %s, %s, current_user)",
244
248
  (end_time, f"Rampup complete, {user_count} users spawned", self._run_id),
245
249
  )
246
250
  except psycopg.Error as error:
@@ -248,7 +252,7 @@ class Exporter:
248
252
  "Failed to insert rampup complete event time to Postgresql timescale database: " + repr(error)
249
253
  )
250
254
 
251
- def log_stop_test_run(self, exit_code=None):
255
+ def log_stop_test_run(self):
252
256
  logging.debug(f"Test run id {self._run_id} stopping")
253
257
  if self.env.parsed_options.worker:
254
258
  return # only run on master or standalone
@@ -256,17 +260,14 @@ class Exporter:
256
260
  try:
257
261
  with self.pool.connection() as conn:
258
262
  conn.execute(
259
- "UPDATE testruns SET end_time = %s, exit_code = %s where id = %s",
260
- (end_time, exit_code, self._run_id),
261
- )
262
- conn.execute(
263
- "INSERT INTO events (time, text, run_id) VALUES (%s, %s, %s)",
264
- (end_time, f"Finished with exit code: {exit_code}", self._run_id),
263
+ "UPDATE testruns SET end_time = %s WHERE id = %s",
264
+ (end_time, self._run_id),
265
265
  )
266
- # The AND time > run_id clause in the following statements are there to help Timescale performance
267
- # We dont use start_time / end_time to calculate RPS, instead we use the time between the actual first and last request
268
- # (as this is a more accurate measurement of the actual test)
266
+
269
267
  try:
268
+ # The AND time > run_id clause in the following statements are there to help Timescale performance
269
+ # We dont use start_time / end_time to calculate RPS, instead we use the time between the actual first and last request
270
+ # (as this is a more accurate measurement of the actual test)
270
271
  conn.execute(
271
272
  """
272
273
  UPDATE testruns
@@ -275,12 +276,12 @@ SET (requests, resp_time_avg, rps_avg, fail_ratio) =
275
276
  (SELECT
276
277
  COUNT(*)::numeric AS reqs,
277
278
  AVG(response_time)::numeric as resp_time
278
- FROM requests WHERE run_id = %(run_id)s AND time > %(run_id)s) AS _,
279
+ FROM requests_view WHERE run_id = %(run_id)s AND time > %(run_id)s) AS _,
279
280
  (SELECT
280
- EXTRACT(epoch FROM (SELECT MAX(time)-MIN(time) FROM requests WHERE run_id = %(run_id)s AND time > %(run_id)s))::numeric AS duration) AS __,
281
+ EXTRACT(epoch FROM (SELECT MAX(time)-MIN(time) FROM requests_view WHERE run_id = %(run_id)s AND time > %(run_id)s))::numeric AS duration) AS __,
281
282
  (SELECT
282
283
  COUNT(*)::numeric AS fails
283
- FROM requests WHERE run_id = %(run_id)s AND time > %(run_id)s AND success = 0) AS ___
284
+ FROM requests_view WHERE run_id = %(run_id)s AND time > %(run_id)s AND success = 0) AS ___
284
285
  WHERE id = %(run_id)s""",
285
286
  {"run_id": self._run_id},
286
287
  )
@@ -293,3 +294,20 @@ WHERE id = %(run_id)s""",
293
294
  "Failed to update testruns record (or events) with end time to Postgresql timescale database: "
294
295
  + repr(error)
295
296
  )
297
+
298
+ def log_exit_code(self, exit_code=None):
299
+ try:
300
+ with self.pool.connection() as conn:
301
+ conn.execute(
302
+ "UPDATE testruns SET exit_code = %s WHERE id = %s",
303
+ (exit_code, self._run_id),
304
+ )
305
+ conn.execute(
306
+ "INSERT INTO events (time, text, run_id, customer) VALUES (%s, %s, %s, current_user)",
307
+ (datetime.now(UTC).isoformat(), f"Finished with exit code: {exit_code}", self._run_id),
308
+ )
309
+ except psycopg.Error as error:
310
+ logging.error(
311
+ "Failed to update testruns record (or events) with end time to Postgresql timescale database: "
312
+ + repr(error)
313
+ )
@@ -10,7 +10,7 @@ SELECT
10
10
  MIN(min),
11
11
  MAX(max),
12
12
  SUM(failed_count) / SUM(count) * 100 as "errorPercentage"
13
- FROM requests_summary
13
+ FROM requests_summary_view
14
14
  WHERE bucket BETWEEN %(start)s AND %(end)s
15
15
  AND run_id = %(testrun)s
16
16
  GROUP BY name, method
@@ -20,13 +20,13 @@ GROUP BY name, method
20
20
  failures_query = """
21
21
  SELECT
22
22
  name as name,
23
- left(exception,300) as exception,
23
+ exception,
24
24
  count(*)
25
- FROM requests
25
+ FROM requests_view
26
26
  WHERE time BETWEEN %(start)s AND %(end)s AND
27
- exception is not null
27
+ success = 0
28
28
  AND run_id = %(testrun)s
29
- GROUP BY "name",left(exception,300)
29
+ GROUP BY "name",exception
30
30
  """
31
31
 
32
32
 
@@ -35,7 +35,7 @@ WITH request_count_agg AS (
35
35
  SELECT
36
36
  time_bucket_gapfill(%(resolution)s * interval '1 second', bucket) AS time,
37
37
  COALESCE(SUM(count)/%(resolution)s, 0) as rps
38
- FROM requests_summary
38
+ FROM requests_summary_view
39
39
  WHERE bucket BETWEEN %(start)s AND %(end)s
40
40
  AND run_id = %(testrun)s
41
41
  GROUP BY 1
@@ -55,7 +55,7 @@ errors_per_s_agg AS (
55
55
  SELECT
56
56
  time_bucket_gapfill(%(resolution)s * interval '1 second', bucket) AS time,
57
57
  COALESCE(SUM(failed_count)/%(resolution)s, 0) as error_rate
58
- FROM requests_summary
58
+ FROM requests_summary_view
59
59
  WHERE bucket BETWEEN %(start)s AND %(end)s
60
60
  AND run_id = %(testrun)s
61
61
  GROUP BY 1
@@ -76,7 +76,7 @@ ORDER BY r.time;
76
76
  total_requests = """
77
77
  SELECT
78
78
  SUM(count) as "totalRequests"
79
- FROM requests_summary
79
+ FROM requests_summary_view
80
80
  WHERE bucket BETWEEN %(start)s AND %(end)s
81
81
  AND run_id = %(testrun)s
82
82
  """
@@ -85,7 +85,7 @@ AND run_id = %(testrun)s
85
85
  total_failed = """
86
86
  SELECT
87
87
  SUM(failed_count) as "totalFailures"
88
- FROM requests_summary
88
+ FROM requests_summary_view
89
89
  WHERE bucket BETWEEN %(start)s AND %(end)s
90
90
  AND run_id = %(testrun)s
91
91
  """
@@ -94,7 +94,7 @@ AND run_id = %(testrun)s
94
94
  error_percentage = """
95
95
  SELECT
96
96
  SUM(failed_count) / SUM(count) * 100 "errorPercentage"
97
- FROM requests_summary
97
+ FROM requests_summary_view
98
98
  WHERE bucket BETWEEN %(start)s AND %(end)s
99
99
  AND run_id = %(testrun)s
100
100
  """
@@ -104,7 +104,7 @@ SELECT
104
104
  time_bucket_gapfill(%(resolution)s * interval '1 second', bucket) AS time,
105
105
  name,
106
106
  COALESCE(SUM(count)/%(resolution)s, 0) as throughput
107
- FROM requests_summary
107
+ FROM requests_summary_view
108
108
  WHERE bucket BETWEEN %(start)s AND %(end)s
109
109
  AND run_id = %(testrun)s
110
110
  GROUP BY 1, name
@@ -117,7 +117,7 @@ SELECT
117
117
  time_bucket_gapfill(%(resolution)s * interval '1 second', bucket) as time,
118
118
  name,
119
119
  avg(average) as "responseTime"
120
- FROM requests_summary
120
+ FROM requests_summary_view
121
121
  WHERE bucket BETWEEN %(start)s AND %(end)s
122
122
  AND run_id = %(testrun)s
123
123
  GROUP BY 1, name
@@ -129,7 +129,7 @@ SELECT
129
129
  time_bucket_gapfill(%(resolution)s * interval '1 second', bucket) AS time,
130
130
  name,
131
131
  SUM(failed_count)/%(resolution)s as "errorRate"
132
- FROM requests_summary
132
+ FROM requests_summary_view
133
133
  WHERE bucket BETWEEN %(start)s AND %(end)s
134
134
  AND run_id = %(testrun)s
135
135
  GROUP BY 1, name
@@ -138,10 +138,10 @@ ORDER BY 1
138
138
 
139
139
 
140
140
  perc99_response_times = """
141
- SELECT time_bucket(%(resolution)s * interval '1 second', bucket) AS time,
141
+ SELECT time_bucket_gapfill(%(resolution)s * interval '1 second', bucket) AS time,
142
142
  name,
143
143
  MAX(perc99) as perc99
144
- FROM requests_summary
144
+ FROM requests_summary_view
145
145
  WHERE bucket BETWEEN %(start)s AND %(end)s
146
146
  AND run_id = %(testrun)s
147
147
  GROUP BY 1, name
@@ -151,10 +151,10 @@ ORDER BY 1
151
151
 
152
152
  response_length = """
153
153
  SELECT
154
- time_bucket(%(resolution)s * interval '1 second', bucket) as time,
154
+ time_bucket_gapfill(%(resolution)s * interval '1 second', bucket) as time,
155
155
  AVG(response_length) as "responseLength",
156
156
  name
157
- FROM requests_summary
157
+ FROM requests_summary_view
158
158
  WHERE bucket BETWEEN %(start)s AND %(end)s
159
159
  AND run_id = %(testrun)s
160
160
  GROUP BY 1, name
@@ -164,7 +164,7 @@ ORDER BY 1
164
164
 
165
165
  request_names = """
166
166
  SELECT DISTINCT name
167
- FROM requests_summary
167
+ FROM requests_summary_view
168
168
  WHERE bucket BETWEEN %(start)s AND %(end)s
169
169
  AND run_id = %(testrun)s
170
170
  """
@@ -174,7 +174,7 @@ SELECT
174
174
  time,
175
175
  name,
176
176
  response_time as "responseTime"
177
- FROM requests
177
+ FROM requests_view
178
178
  WHERE time BETWEEN %(start)s AND %(end)s
179
179
  AND run_id = %(testrun)s
180
180
  ORDER BY 1,2
@@ -260,13 +260,23 @@ JOIN avg_response_time_failed f ON a.time = f.time
260
260
  ORDER BY a.time
261
261
  """
262
262
 
263
- total_runtime = """
263
+ total_vuh = """
264
264
  SELECT
265
265
  SUM((end_time - id) * num_users) AS "totalVuh"
266
266
  FROM testruns
267
267
  WHERE id >= date_trunc('month', NOW()) AND NOT refund
268
268
  """
269
269
 
270
+ customer = """
271
+ SELECT
272
+ max_vuh as "maxVuh",
273
+ max_workers as "maxWorkers",
274
+ max_users as "maxUsers",
275
+ users_per_worker as "usersPerWorker"
276
+ FROM customers
277
+ WHERE customer = current_user
278
+ """
279
+
270
280
  queries: dict["str", LiteralString] = {
271
281
  "request-names": request_names,
272
282
  "requests": requests_query,
@@ -285,5 +295,6 @@ queries: dict["str", LiteralString] = {
285
295
  "testruns-table": testruns_table,
286
296
  "testruns-rps": testruns_rps,
287
297
  "testruns-response-time": testruns_response_time,
288
- "total-runtime": total_runtime,
298
+ "total-vuh": total_vuh,
299
+ "customer": customer,
289
300
  }