locust-cloud 1.0.1__py3-none-any.whl → 1.0.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,28 +1,18 @@
1
+ import atexit
1
2
  import json
2
3
  import logging
3
4
  import os
4
5
  import socket
5
6
  import sys
6
- from contextlib import contextmanager
7
+ from datetime import UTC, datetime, timedelta
7
8
 
8
9
  import gevent
10
+ import greenlet
9
11
  import locust.env
10
- from gevent.lock import Semaphore
12
+ import psycopg
13
+ import psycopg.types.json
11
14
  from locust.exception import CatchResponseError
12
-
13
- try:
14
- import psycogreen.gevent
15
- except ModuleNotFoundError as e:
16
- logging.error(f"'{e}', you need to install it using 'pip install psycogreen'")
17
- sys.exit(1)
18
-
19
- psycogreen.gevent.patch_psycopg()
20
- import atexit
21
- from datetime import UTC, datetime, timedelta
22
-
23
- import greenlet
24
- import psycopg2
25
- import psycopg2.extras
15
+ from psycopg_pool import ConnectionPool
26
16
 
27
17
 
28
18
  def safe_serialize(obj):
@@ -32,39 +22,29 @@ def safe_serialize(obj):
32
22
  return json.dumps(obj, default=default)
33
23
 
34
24
 
35
- def print_t(s):
36
- print(str(s), end="\t")
37
-
38
-
39
25
  class Timescale:
40
- """
41
- See timescale_listener_ex.py for documentation
42
- """
43
-
44
- dblock = Semaphore()
45
- first_instance = True
46
-
47
26
  def __init__(self, environment: locust.env.Environment, pg_user, pg_host, pg_password, pg_database, pg_port):
48
- if not Timescale.first_instance:
49
- # we should refactor this into a module as it is much more pythonic
50
- raise Exception(
51
- "You tried to initialize the Timescale listener twice, maybe both in your locustfile and using command line --timescale? Ignoring second initialization."
52
- )
53
- Timescale.first_instance = False
54
27
  self.env = environment
55
28
  self._run_id = ""
56
- self.dbconn = None
57
29
  self._samples: list[dict] = []
58
30
  self._background = gevent.spawn(self._run)
59
31
  self._hostname = socket.gethostname()
60
32
  self._finished = False
61
33
  self._pid = os.getpid()
62
34
 
63
- self.pg_user = pg_user
64
- self.pg_host = pg_host
65
- self.pg_password = pg_password
66
- self.pg_database = pg_database
67
- self.pg_port = pg_port
35
+ def set_autocommit(conn: psycopg.Connection):
36
+ conn.autocommit = True
37
+
38
+ try:
39
+ self.pool = ConnectionPool(
40
+ conninfo=f"postgres://{pg_user}:{pg_password}@{pg_host}:{pg_port}/{pg_database}?sslmode=require",
41
+ min_size=1,
42
+ max_size=5,
43
+ configure=set_autocommit,
44
+ )
45
+ except Exception:
46
+ sys.stderr.write(f"Could not connect to postgres ({pg_user}@{pg_host}:{pg_port}).")
47
+ sys.exit(1)
68
48
 
69
49
  events = self.env.events
70
50
  events.test_start.add_listener(self.on_test_start)
@@ -82,40 +62,18 @@ class Timescale:
82
62
  logging.debug(f"run id from master: {msg.data}")
83
63
  self._run_id = datetime.strptime(msg.data, "%Y-%m-%d, %H:%M:%S.%f").replace(tzinfo=UTC)
84
64
 
85
- @contextmanager
86
- def dbcursor(self):
87
- with self.dblock:
88
- try:
89
- if self.dbconn:
90
- if self.dbconn.closed:
91
- self.dbconn = self._dbconn()
92
- yield self.dbconn.cursor()
93
- except psycopg2.Error:
94
- try:
95
- # try to recreate connection
96
- self.dbconn = self._dbconn()
97
- except Exception:
98
- pass
99
- raise
100
-
101
65
  def on_cpu_warning(self, environment: locust.env.Environment, cpu_usage, message=None, timestamp=None, **kwargs):
102
66
  # passing a custom message & timestamp to the event is a haxx to allow using this event for reporting generic events
103
67
  if not timestamp:
104
68
  timestamp = datetime.now(UTC).isoformat()
105
69
  if not message:
106
70
  message = f"High CPU usage ({cpu_usage}%)"
107
- with self.dbcursor() as cur:
108
- cur.execute(
71
+ with self.pool.connection() as conn:
72
+ conn.execute(
109
73
  "INSERT INTO events (time, text, run_id) VALUES (%s, %s, %s)", (timestamp, message, self._run_id)
110
74
  )
111
75
 
112
76
  def on_test_start(self, environment: locust.env.Environment):
113
- try:
114
- self.dbconn = self._dbconn()
115
- except psycopg2.OperationalError as e:
116
- logging.error(e)
117
- sys.exit(1)
118
-
119
77
  if not self.env.parsed_options or not self.env.parsed_options.worker:
120
78
  environment._run_id = datetime.now(UTC) # type: ignore
121
79
  msg = environment._run_id.strftime("%Y-%m-%d, %H:%M:%S.%f") # type: ignore
@@ -125,43 +83,18 @@ class Timescale:
125
83
  self.log_start_testrun()
126
84
  self._user_count_logger = gevent.spawn(self._log_user_count)
127
85
 
128
- def _dbconn(self) -> psycopg2.extensions.connection:
129
- try:
130
- conn = psycopg2.connect(
131
- host=self.pg_host,
132
- user=self.pg_user,
133
- password=self.pg_password,
134
- database=self.pg_database,
135
- port=self.pg_port,
136
- keepalives_idle=120,
137
- keepalives_interval=20,
138
- keepalives_count=6,
139
- )
140
-
141
- conn.autocommit = True
142
- except Exception:
143
- sys.stderr.write(f"Could not connect to postgres ({self.pg_user}@{self.pg_host}:{self.pg_port}).")
144
- sys.exit(1)
145
-
146
- return conn
147
-
148
86
  def _log_user_count(self):
149
87
  while True:
150
88
  if self.env.runner is None:
151
89
  return # there is no runner, so nothing to log...
152
90
  try:
153
- with self.dbcursor() as cur:
154
- cur.execute(
91
+ with self.pool.connection() as conn:
92
+ conn.execute(
155
93
  """INSERT INTO number_of_users(time, run_id, user_count) VALUES (%s, %s, %s)""",
156
94
  (datetime.now(UTC).isoformat(), self._run_id, self.env.runner.user_count),
157
95
  )
158
- except psycopg2.Error as error:
96
+ except psycopg.Error as error:
159
97
  logging.error("Failed to write user count to Postgresql: " + repr(error))
160
- try:
161
- # try to recreate connection
162
- self.user_conn = self._dbconn()
163
- except Exception:
164
- pass
165
98
  gevent.sleep(2.0)
166
99
 
167
100
  def _run(self):
@@ -179,17 +112,16 @@ class Timescale:
179
112
 
180
113
  def write_samples_to_db(self, samples):
181
114
  try:
182
- with self.dbcursor() as cur:
183
- psycopg2.extras.execute_values(
184
- cur,
185
- """INSERT INTO requests(time,run_id,greenlet_id,loadgen,name,request_type,response_time,success,response_length,exception,pid,url,context) VALUES %s""",
115
+ with self.pool.connection() as conn:
116
+ conn.cursor().executemany(
117
+ """
118
+ INSERT INTO requests (time,run_id,greenlet_id,loadgen,name,request_type,response_time,success,response_length,exception,pid,url,context)
119
+ VALUES (%(time)s, %(run_id)s, %(greenlet_id)s, %(loadgen)s, %(name)s, %(request_type)s, %(response_time)s, %(success)s, %(response_length)s, %(exception)s, %(pid)s, %(url)s, %(context)s)
120
+ """,
186
121
  samples,
187
- template="(%(time)s, %(run_id)s, %(greenlet_id)s, %(loadgen)s, %(name)s, %(request_type)s, %(response_time)s, %(success)s, %(response_length)s, %(exception)s, %(pid)s, %(url)s, %(context)s)",
188
122
  )
189
-
190
- except psycopg2.Error as error:
123
+ except psycopg.Error as error:
191
124
  logging.error("Failed to write samples to Postgresql timescale database: " + repr(error))
192
- sys.exit(1)
193
125
 
194
126
  def on_test_stop(self, environment):
195
127
  if getattr(self, "_user_count_logger", False):
@@ -235,7 +167,7 @@ class Timescale:
235
167
  "success": success,
236
168
  "url": url[0:255] if url else None,
237
169
  "pid": self._pid,
238
- "context": psycopg2.extras.Json(context, safe_serialize),
170
+ "context": psycopg.types.json.Json(context, safe_serialize),
239
171
  }
240
172
 
241
173
  if response_length >= 0:
@@ -258,8 +190,8 @@ class Timescale:
258
190
 
259
191
  def log_start_testrun(self):
260
192
  cmd = sys.argv[1:]
261
- with self.dbcursor() as cur:
262
- cur.execute(
193
+ with self.pool.connection() as conn:
194
+ conn.execute(
263
195
  "INSERT INTO testruns (id, num_users, description, arguments) VALUES (%s,%s,%s,%s)",
264
196
  (
265
197
  self._run_id,
@@ -268,7 +200,7 @@ class Timescale:
268
200
  " ".join(cmd),
269
201
  ),
270
202
  )
271
- cur.execute(
203
+ conn.execute(
272
204
  "INSERT INTO events (time, text, run_id) VALUES (%s, %s, %s)",
273
205
  (datetime.now(UTC).isoformat(), "Test run started", self._run_id),
274
206
  )
@@ -277,12 +209,12 @@ class Timescale:
277
209
  if not self.env.parsed_options.worker: # only log for master/standalone
278
210
  end_time = datetime.now(UTC)
279
211
  try:
280
- with self.dbcursor() as cur:
281
- cur.execute(
212
+ with self.pool.connection() as conn:
213
+ conn.execute(
282
214
  "INSERT INTO events (time, text, run_id) VALUES (%s, %s, %s)",
283
215
  (end_time, f"Rampup complete, {user_count} users spawned", self._run_id),
284
216
  )
285
- except psycopg2.Error as error:
217
+ except psycopg.Error as error:
286
218
  logging.error(
287
219
  "Failed to insert rampup complete event time to Postgresql timescale database: " + repr(error)
288
220
  )
@@ -291,16 +223,14 @@ class Timescale:
291
223
  logging.debug(f"Test run id {self._run_id} stopping")
292
224
  if self.env.parsed_options.worker:
293
225
  return # only run on master or standalone
294
- if getattr(self, "dbconn", None) is None:
295
- return # test_start never ran, so there's not much for us to do
296
226
  end_time = datetime.now(UTC)
297
227
  try:
298
- with self.dbcursor() as cur:
299
- cur.execute(
228
+ with self.pool.connection() as conn:
229
+ conn.execute(
300
230
  "UPDATE testruns SET end_time = %s, exit_code = %s where id = %s",
301
231
  (end_time, exit_code, self._run_id),
302
232
  )
303
- cur.execute(
233
+ conn.execute(
304
234
  "INSERT INTO events (time, text, run_id) VALUES (%s, %s, %s)",
305
235
  (end_time, f"Finished with exit code: {exit_code}", self._run_id),
306
236
  )
@@ -308,7 +238,7 @@ class Timescale:
308
238
  # We dont use start_time / end_time to calculate RPS, instead we use the time between the actual first and last request
309
239
  # (as this is a more accurate measurement of the actual test)
310
240
  try:
311
- cur.execute(
241
+ conn.execute(
312
242
  """
313
243
  UPDATE testruns
314
244
  SET (requests, resp_time_avg, rps_avg, fail_ratio) =
@@ -316,20 +246,20 @@ SET (requests, resp_time_avg, rps_avg, fail_ratio) =
316
246
  (SELECT
317
247
  COUNT(*)::numeric AS reqs,
318
248
  AVG(response_time)::numeric as resp_time
319
- FROM requests WHERE run_id = %s AND time > %s) AS _,
249
+ FROM requests WHERE run_id = %(run_id)s AND time > %(run_id)s) AS _,
320
250
  (SELECT
321
- EXTRACT(epoch FROM (SELECT MAX(time)-MIN(time) FROM requests WHERE run_id = %s AND time > %s))::numeric AS duration) AS __,
251
+ EXTRACT(epoch FROM (SELECT MAX(time)-MIN(time) FROM requests WHERE run_id = %(run_id)s AND time > %(run_id)s))::numeric AS duration) AS __,
322
252
  (SELECT
323
253
  COUNT(*)::numeric AS fails
324
- FROM requests WHERE run_id = %s AND time > %s AND success = 0) AS ___
325
- WHERE id = %s""",
326
- [self._run_id] * 7,
254
+ FROM requests WHERE run_id = %(run_id)s AND time > %(run_id)s AND success = 0) AS ___
255
+ WHERE id = %(run_id)s""",
256
+ {"run_id": self._run_id},
327
257
  )
328
- except psycopg2.errors.DivisionByZero:
258
+ except psycopg.errors.DivisionByZero:
329
259
  logging.info(
330
260
  "Got DivisionByZero error when trying to update testruns, most likely because there were no requests logged"
331
261
  )
332
- except psycopg2.Error as error:
262
+ except psycopg.Error as error:
333
263
  logging.error(
334
264
  "Failed to update testruns record (or events) with end time to Postgresql timescale database: "
335
265
  + repr(error)