locust-cloud 1.0.10__py3-none-any.whl → 1.0.15__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
locust_cloud/__init__.py CHANGED
@@ -45,7 +45,7 @@ def add_arguments(parser: LocustArgumentParser):
45
45
 
46
46
 
47
47
  @events.init.add_listener
48
- def on_locust_init(environment, **args):
48
+ def on_locust_init(environment, **_args):
49
49
  if not PG_HOST:
50
50
  return
51
51
 
locust_cloud/cloud.py CHANGED
@@ -5,28 +5,38 @@ import sys
5
5
  import time
6
6
  import tomllib
7
7
  from collections import OrderedDict
8
- from datetime import datetime, timedelta
8
+ from datetime import UTC, datetime, timedelta
9
+ from typing import IO, Any
9
10
 
10
- import boto3
11
11
  import configargparse
12
12
  import requests
13
13
  from botocore.exceptions import ClientError
14
+ from locust_cloud.constants import (
15
+ DEFAULT_CLUSTER_NAME,
16
+ DEFAULT_NAMESPACE,
17
+ DEFAULT_REGION_NAME,
18
+ LAMBDA_URL,
19
+ )
20
+ from locust_cloud.credential_manager import CredentialError, CredentialManager
21
+
22
+ logging.basicConfig(
23
+ format="[LOCUST-CLOUD] %(levelname)s: %(message)s",
24
+ level=logging.INFO,
25
+ )
26
+ logger = logging.getLogger(__name__)
27
+
14
28
 
15
- LAMBDA = "https://deployer.locust.cloud/1"
16
- DEFAULT_CLUSTER_NAME = "locust"
17
- DEFAULT_REGION_NAME = "eu-north-1"
18
29
  LOCUST_ENV_VARIABLE_IGNORE_LIST = ["LOCUST_BUILD_PATH", "LOCUST_SKIP_MONKEY_PATCH"]
19
30
 
20
31
 
21
32
  class LocustTomlConfigParser(configargparse.TomlConfigParser):
22
- def parse(self, stream):
33
+ def parse(self, stream: IO[str]) -> OrderedDict[str, Any]:
23
34
  try:
24
35
  config = tomllib.loads(stream.read())
25
36
  except Exception as e:
26
37
  raise configargparse.ConfigFileParserException(f"Couldn't parse TOML file: {e}")
27
38
 
28
- # convert to dict and filter based on section names
29
- result = OrderedDict()
39
+ result: OrderedDict[str, Any] = OrderedDict()
30
40
 
31
41
  for section in self.sections:
32
42
  data = configargparse.get_toml_section(config, section)
@@ -34,20 +44,13 @@ class LocustTomlConfigParser(configargparse.TomlConfigParser):
34
44
  for key, value in data.items():
35
45
  if isinstance(value, list):
36
46
  result[key] = value
37
- elif value is None:
38
- pass
39
- else:
47
+ elif value is not None:
40
48
  result[key] = str(value)
41
49
  break
42
50
 
43
51
  return result
44
52
 
45
53
 
46
- logging.basicConfig(
47
- format="[LOCUST-CLOUD] %(levelname)s: %(message)s",
48
- level=logging.INFO,
49
- )
50
-
51
54
  parser = configargparse.ArgumentParser(
52
55
  default_config_files=[
53
56
  "~/.locust.conf",
@@ -68,7 +71,6 @@ parser = configargparse.ArgumentParser(
68
71
 
69
72
  Example: locust-cloud -f locust.py --aws-access-key-id 123 --aws-secret-access-key 456""",
70
73
  epilog="""Any parameters not listed here are forwarded to locust master unmodified, so go ahead and use things like --users, --host, --run-time, ...
71
-
72
74
  Locust config can also be set using config file (~/.locust.conf, locust.conf, pyproject.toml, ~/.cloud.conf or cloud.conf).
73
75
  Parameters specified on command line override env vars, which in turn override config files.""",
74
76
  add_config_file_help=False,
@@ -80,7 +82,7 @@ parser.add_argument(
80
82
  "--locustfile",
81
83
  metavar="<filename>",
82
84
  default="locustfile.py",
83
- help="The Python file or module that contains your test, e.g. 'my_test.py'. Defaults to 'locustfile'.",
85
+ help="The Python file or module that contains your test, e.g. 'my_test.py'. Defaults to 'locustfile.py'.",
84
86
  env_var="LOCUST_LOCUSTFILE",
85
87
  )
86
88
  parser.add_argument(
@@ -90,30 +92,6 @@ parser.add_argument(
90
92
  help="Optional requirements.txt file that contains your external libraries.",
91
93
  env_var="LOCUST_REQUIREMENTS",
92
94
  )
93
- parser.add_argument(
94
- "--aws-access-key-id",
95
- type=str,
96
- help="Authentication for deploying with Locust Cloud",
97
- env_var="AWS_ACCESS_KEY_ID",
98
- )
99
- parser.add_argument(
100
- "--aws-secret-access-key",
101
- type=str,
102
- help="Authentication for deploying with Locust Cloud",
103
- env_var="AWS_SECRET_ACCESS_KEY",
104
- )
105
- parser.add_argument(
106
- "--username",
107
- type=str,
108
- help="Authentication for deploying with Locust Cloud",
109
- env_var="LOCUST_CLOUD_USERNAME",
110
- )
111
- parser.add_argument(
112
- "--password",
113
- type=str,
114
- help="Authentication for deploying with Locust Cloud",
115
- env_var="LOCUST_CLOUD_PASSWORD",
116
- )
117
95
  parser.add_argument(
118
96
  "--aws-region-name",
119
97
  type=str,
@@ -125,276 +103,242 @@ parser.add_argument(
125
103
  "--kube-cluster-name",
126
104
  type=str,
127
105
  default=DEFAULT_CLUSTER_NAME,
128
- help="Sets the name of the kubernetes cluster",
106
+ help="Sets the name of the Kubernetes cluster",
129
107
  env_var="KUBE_CLUSTER_NAME",
130
108
  )
131
109
  parser.add_argument(
132
110
  "--kube-namespace",
133
111
  type=str,
134
- default="default",
112
+ default=DEFAULT_NAMESPACE,
135
113
  help="Sets the namespace for scoping the deployed cluster",
136
114
  env_var="KUBE_NAMESPACE",
137
115
  )
116
+ parser.add_argument(
117
+ "--aws-access-key-id",
118
+ type=str,
119
+ help="Authentication for deploying with Locust Cloud",
120
+ env_var="AWS_ACCESS_KEY_ID",
121
+ )
122
+ parser.add_argument(
123
+ "--aws-secret-access-key",
124
+ type=str,
125
+ help="Authentication for deploying with Locust Cloud",
126
+ env_var="AWS_SECRET_ACCESS_KEY",
127
+ )
138
128
 
139
129
  options, locust_options = parser.parse_known_args()
140
130
 
131
+ username = os.environ.get("LOCUST_CLOUD_USERNAME")
132
+ password = os.environ.get("LOCUST_CLOUD_PASSWORD")
141
133
 
142
- def main():
143
- aws_access_key_id = options.aws_access_key_id
144
- aws_secret_access_key = options.aws_secret_access_key
145
- aws_session_token = None
146
-
147
- if not ((options.aws_access_key_id and options.aws_secret_access_key) or (options.username and options.password)):
148
- logging.error(
149
- "Authentication is required to use Locust Cloud. Ensure your username and password are provided, or provide an aws_access_key_id and aws_secret_access_key directly."
150
- )
151
- sys.exit(1)
152
-
153
- if options.username and options.password:
154
- if options.aws_access_key_id or options.aws_secret_access_key:
155
- logging.info("A username and password have been provided, the AWS keys will be ignored.")
156
- logging.info("Authenticating...")
157
- response = requests.post(
158
- f"{LAMBDA}/auth/login", json={"username": options.username, "password": options.password}
159
- )
160
-
161
- if response.status_code == 200:
162
- credentials = response.json()
163
- aws_access_key_id = credentials["aws_access_key_id"]
164
- aws_secret_access_key = credentials["aws_secret_access_key"]
165
- aws_session_token = credentials["aws_session_token"]
166
- cognito_client_id_token = credentials["cognito_client_id_token"]
167
- else:
168
- logging.error(
169
- f"HTTP {response.status_code}/{response.reason} - Response: {response.text} - URL: {response.request.url}"
170
- )
171
- sys.exit(1)
172
134
 
135
+ def main() -> None:
173
136
  s3_bucket = f"{options.kube_cluster_name}-{options.kube_namespace}"
137
+ deployed_pods: list[Any] = []
174
138
 
175
139
  try:
176
- session = boto3.session.Session(
177
- region_name=options.aws_region_name,
178
- aws_access_key_id=aws_access_key_id,
179
- aws_secret_access_key=aws_secret_access_key,
180
- aws_session_token=aws_session_token,
181
- )
182
- locustfile_url = upload_file(
183
- session,
184
- s3_bucket=s3_bucket,
185
- region_name=options.aws_region_name,
186
- filename=options.locustfile,
187
- )
188
- requirements_url = ""
189
- if options.requirements:
190
- requirements_url = upload_file(
191
- session,
192
- s3_bucket=s3_bucket,
140
+ if options.aws_access_key_id and options.aws_secret_access_key:
141
+ credential_manager = CredentialManager(
142
+ lambda_url=LAMBDA_URL,
193
143
  region_name=options.aws_region_name,
194
- filename=options.requirements,
195
- remote_filename="requirements.txt",
144
+ access_key=options.aws_access_key_id,
145
+ secret_key=options.aws_secret_access_key,
196
146
  )
147
+ elif username and password:
148
+ credential_manager = CredentialManager(
149
+ lambda_url=LAMBDA_URL,
150
+ username=username,
151
+ password=password,
152
+ region_name=options.aws_region_name,
153
+ )
154
+ else:
155
+ logger.error(
156
+ "Authentication is required to use Locust Cloud. Provide either AWS credentials or set the LOCUST_CLOUD_USERNAME and LOCUST_CLOUD_PASSWORD environment variables."
157
+ )
158
+ sys.exit(1)
197
159
 
198
- deployed_pods = deploy(
199
- aws_access_key_id,
200
- aws_secret_access_key,
201
- aws_session_token,
202
- cognito_client_id_token,
203
- locustfile_url,
204
- region_name=options.aws_region_name,
205
- cluster_name=options.kube_cluster_name,
206
- namespace=options.kube_namespace,
207
- requirements=requirements_url,
208
- )
209
- stream_pod_logs(
210
- session,
211
- deployed_pods=deployed_pods,
212
- cluster_name=options.kube_cluster_name,
213
- namespace=options.kube_namespace,
214
- )
215
- except KeyboardInterrupt:
216
- pass
217
- except Exception as e:
218
- logging.exception(e)
219
- sys.exit(1)
220
-
221
- try:
222
- logging.info("Tearing down Locust cloud...")
223
- teardown_cluster(
224
- aws_access_key_id,
225
- aws_secret_access_key,
226
- aws_session_token,
227
- cognito_client_id_token,
228
- region_name=options.aws_region_name,
229
- cluster_name=options.kube_cluster_name,
230
- namespace=options.kube_namespace,
231
- )
232
- teardown_s3(
233
- session,
234
- s3_bucket=s3_bucket,
235
- )
236
- except Exception as e:
237
- logging.error(f"Could not automatically tear down Locust Cloud: {e}")
238
- sys.exit(1)
239
-
240
-
241
- def upload_file(session, s3_bucket, region_name, filename, remote_filename=None):
242
- if not remote_filename:
243
- remote_filename = filename.split("/")[-1]
244
-
245
- logging.debug(f"Uploading {remote_filename}...")
246
-
247
- s3 = session.client("s3")
248
-
249
- try:
250
- s3.upload_file(filename, s3_bucket, remote_filename)
251
-
252
- presigned_url = s3.generate_presigned_url(
253
- ClientMethod="get_object",
254
- Params={"Bucket": s3_bucket, "Key": remote_filename},
255
- ExpiresIn=3600, # 1 hour
256
- )
160
+ credentials = credential_manager.get_current_credentials()
161
+ cognito_client_id_token: str = credentials["cognito_client_id_token"]
162
+ aws_access_key_id = credentials.get("access_key")
163
+ aws_secret_access_key = credentials.get("secret_key")
164
+ aws_session_token = credentials.get("token")
257
165
 
258
- return presigned_url
259
- except FileNotFoundError:
260
- logging.error(f"Could not find '{filename}'")
261
- sys.exit(1)
166
+ if not all([aws_access_key_id, aws_secret_access_key]):
167
+ logger.error("Authentication failed: Missing AWS credentials.")
168
+ sys.exit(1)
262
169
 
170
+ logger.info(f"Uploading {options.locustfile} to S3 bucket {s3_bucket}...")
171
+ s3 = credential_manager.session.client("s3")
172
+ try:
173
+ s3.upload_file(options.locustfile, s3_bucket, os.path.basename(options.locustfile))
174
+ locustfile_url = s3.generate_presigned_url(
175
+ ClientMethod="get_object",
176
+ Params={"Bucket": s3_bucket, "Key": os.path.basename(options.locustfile)},
177
+ ExpiresIn=3600,
178
+ )
179
+ logger.info(f"Uploaded {options.locustfile} successfully.")
180
+ except FileNotFoundError:
181
+ logger.error(f"File not found: {options.locustfile}")
182
+ sys.exit(1)
183
+ except ClientError as e:
184
+ logger.error(f"Failed to upload {options.locustfile} to S3: {e}")
185
+ sys.exit(1)
263
186
 
264
- def deploy(
265
- aws_access_key_id,
266
- aws_secret_access_key,
267
- aws_session_token,
268
- cognito_client_id_token,
269
- locustfile,
270
- region_name=None,
271
- cluster_name=DEFAULT_CLUSTER_NAME,
272
- namespace=None,
273
- requirements="",
274
- ):
275
- logging.info("Deploying load generators...")
276
- locust_env_variables = [
277
- {"name": env_variable, "value": str(os.environ[env_variable])}
278
- for env_variable in os.environ
279
- if env_variable.startswith("LOCUST_") and env_variable not in LOCUST_ENV_VARIABLE_IGNORE_LIST
280
- ]
281
-
282
- response = requests.post(
283
- f"{LAMBDA}/{cluster_name}",
284
- headers={
285
- "AWS_ACCESS_KEY_ID": aws_access_key_id,
286
- "AWS_SECRET_ACCESS_KEY": aws_secret_access_key,
287
- "AWS_SESSION_TOKEN": aws_session_token,
288
- "Authorization": f"Bearer {cognito_client_id_token}",
289
- },
290
- json={
187
+ requirements_url = ""
188
+ if options.requirements:
189
+ logger.info(f"Uploading {options.requirements} to S3 bucket {s3_bucket} as requirements.txt...")
190
+ try:
191
+ s3.upload_file(options.requirements, s3_bucket, "requirements.txt")
192
+ requirements_url = s3.generate_presigned_url(
193
+ ClientMethod="get_object",
194
+ Params={"Bucket": s3_bucket, "Key": "requirements.txt"},
195
+ ExpiresIn=3600,
196
+ )
197
+ logger.info(f"Uploaded {options.requirements} successfully.")
198
+ except FileNotFoundError:
199
+ logger.error(f"File not found: {options.requirements}")
200
+ sys.exit(1)
201
+ except ClientError as e:
202
+ logger.error(f"Failed to upload {options.requirements} to S3: {e}")
203
+ sys.exit(1)
204
+
205
+ logger.info("Deploying load generators via API Gateway...")
206
+ locust_env_variables = [
207
+ {"name": env_variable, "value": str(os.environ[env_variable])}
208
+ for env_variable in os.environ
209
+ if env_variable.startswith("LOCUST_") and os.environ[env_variable]
210
+ ]
211
+ deploy_endpoint = f"{LAMBDA_URL}/{options.kube_cluster_name}"
212
+ payload = {
291
213
  "locust_args": [
292
- {
293
- "name": "LOCUST_LOCUSTFILE",
294
- "value": locustfile,
295
- },
296
- {"name": "LOCUST_REQUIREMENTS_URL", "value": requirements},
214
+ {"name": "LOCUST_LOCUSTFILE", "value": locustfile_url},
215
+ {"name": "LOCUST_REQUIREMENTS_URL", "value": requirements_url},
297
216
  {"name": "LOCUST_FLAGS", "value": " ".join(locust_options)},
298
217
  *locust_env_variables,
299
218
  ]
300
- },
301
- params={"region_name": region_name, "namespace": namespace},
302
- )
303
-
304
- if response.status_code != 200:
305
- if response.json().get("message"):
306
- sys.stderr.write(f"{response.json().get('message')}\n")
219
+ }
220
+ headers = {
221
+ "Authorization": f"Bearer {cognito_client_id_token}",
222
+ "Content-Type": "application/json",
223
+ "AWS_ACCESS_KEY_ID": aws_access_key_id,
224
+ "AWS_SECRET_ACCESS_KEY": aws_secret_access_key,
225
+ "AWS_SESSION_TOKEN": aws_session_token if aws_session_token else "",
226
+ }
227
+ try:
228
+ response = requests.post(deploy_endpoint, json=payload, headers=headers)
229
+ except requests.exceptions.RequestException as e:
230
+ logger.error(f"HTTP request failed: {e}")
231
+ sys.exit(1)
232
+ if response.status_code == 200:
233
+ deployed_pods = response.json().get("pods", [])
234
+ logger.info("Load generators deployed successfully.")
307
235
  else:
308
- sys.stderr.write("An unkown error occured during deployment. Please contact an administrator\n")
309
-
236
+ logger.error(
237
+ f"HTTP {response.status_code}/{response.reason} - Response: {response.text} - URL: {response.request.url}"
238
+ )
239
+ sys.exit(1)
240
+ except CredentialError as ce:
241
+ logger.error(f"Credential error: {ce}")
310
242
  sys.exit(1)
311
243
 
312
- logging.info("Load generators deployed.")
313
- return response.json()["pods"]
314
-
315
-
316
- def stream_pod_logs(
317
- session,
318
- deployed_pods,
319
- cluster_name=DEFAULT_CLUSTER_NAME,
320
- namespace=None,
321
- ):
322
- logging.info("Waiting for pods to be ready...")
323
- client = session.client("logs")
324
-
325
- log_group_name = f"/eks/{cluster_name}-{namespace}"
326
- master_pod_name = [pod_name for pod_name in deployed_pods if "master" in pod_name][0]
327
-
328
- log_stream = None
329
- while log_stream is None:
244
+ try:
245
+ logger.info("Waiting for pods to be ready...")
246
+ log_group_name = f"/eks/{options.kube_cluster_name}-{options.kube_namespace}"
247
+ master_pod_name = next((pod for pod in deployed_pods if "master" in pod), None)
248
+ if not master_pod_name:
249
+ logger.error("Master pod not found among deployed pods.")
250
+ sys.exit(1)
251
+ log_stream: str | None = None
252
+ while log_stream is None:
253
+ try:
254
+ client = credential_manager.session.client("logs")
255
+ response = client.describe_log_streams(
256
+ logGroupName=log_group_name,
257
+ logStreamNamePrefix=f"from-fluent-bit-kube.var.log.containers.{master_pod_name}",
258
+ )
259
+ all_streams = response.get("logStreams", [])
260
+ if all_streams:
261
+ log_stream = all_streams[0].get("logStreamName")
262
+ else:
263
+ time.sleep(1)
264
+ except ClientError as e:
265
+ logger.error(f"Error describing log streams: {e}")
266
+ time.sleep(5)
267
+ logger.info("Pods are ready, switching to Locust logs.")
268
+
269
+ timestamp = int((datetime.now(UTC) - timedelta(minutes=5)).timestamp() * 1000)
270
+
271
+ while True:
272
+ try:
273
+ client = credential_manager.session.client("logs")
274
+ response = client.get_log_events(
275
+ logGroupName=log_group_name,
276
+ logStreamName=log_stream,
277
+ startTime=timestamp,
278
+ startFromHead=True,
279
+ )
280
+ for event in response.get("events", []):
281
+ message = event.get("message", "")
282
+ event_timestamp = event.get("timestamp", timestamp) + 1
283
+ try:
284
+ message_json = json.loads(message)
285
+ if "log" in message_json:
286
+ print(message_json["log"])
287
+ except json.JSONDecodeError:
288
+ print(message)
289
+ timestamp = event_timestamp
290
+ time.sleep(5)
291
+ except ClientError as e:
292
+ error_code = e.response.get("Error", {}).get("Code", "")
293
+ if error_code == "ExpiredTokenException":
294
+ logger.warning("AWS session token expired during log streaming. Refreshing credentials...")
295
+ time.sleep(5)
296
+ except KeyboardInterrupt:
297
+ logger.debug("Interrupted by user.")
298
+ except Exception as e:
299
+ logger.exception(e)
300
+ sys.exit(1)
301
+ finally:
330
302
  try:
331
- response = client.describe_log_streams(
332
- logGroupName=log_group_name,
333
- logStreamNamePrefix=f"from-fluent-bit-kube.var.log.containers.{master_pod_name}",
303
+ logger.info("Tearing down Locust cloud...")
304
+ credential_manager.refresh_credentials()
305
+ refreshed_credentials = credential_manager.get_current_credentials()
306
+
307
+ headers = {
308
+ "AWS_ACCESS_KEY_ID": refreshed_credentials.get("access_key", ""),
309
+ "AWS_SECRET_ACCESS_KEY": refreshed_credentials.get("secret_key", ""),
310
+ "Authorization": f"Bearer {refreshed_credentials.get('cognito_client_id_token', '')}",
311
+ }
312
+
313
+ token = refreshed_credentials.get("token")
314
+ if token:
315
+ headers["AWS_SESSION_TOKEN"] = token
316
+
317
+ response = requests.delete(
318
+ f"{LAMBDA_URL}/{options.kube_cluster_name}",
319
+ headers=headers,
320
+ params={"namespace": options.kube_namespace} if options.kube_namespace else {},
334
321
  )
335
- all_streams = response.get("logStreams")
336
- if all_streams:
337
- log_stream = all_streams[0].get("logStreamName")
322
+
323
+ if response.status_code != 200:
324
+ logger.error(
325
+ f"HTTP {response.status_code}/{response.reason} - Response: {response.text} - URL: {response.request.url}"
326
+ )
338
327
  else:
339
- time.sleep(1)
340
- except ClientError:
341
- # log group name does not exist yet
342
- time.sleep(1)
343
- continue
344
-
345
- logging.info("Pods are ready, switching to Locust logs.")
346
-
347
- timestamp = int((datetime.now() - timedelta(minutes=5)).timestamp())
348
- while True:
349
- response = client.get_log_events(
350
- logGroupName=log_group_name,
351
- logStreamName=log_stream,
352
- startTime=timestamp,
353
- startFromHead=True,
354
- )
355
-
356
- for event in response["events"]:
357
- message = event["message"]
358
- timestamp = event["timestamp"] + 1
328
+ logger.info("Cluster teardown initiated successfully.")
329
+ except Exception as e:
330
+ logger.error(f"Could not automatically tear down Locust Cloud: {e}")
359
331
 
360
- try:
361
- message = json.loads(message)
362
- if "log" in message:
363
- print(message["log"])
364
- except json.JSONDecodeError:
365
- pass
366
-
367
- time.sleep(5)
368
-
369
-
370
- def teardown_cluster(
371
- aws_access_key_id,
372
- aws_secret_access_key,
373
- aws_session_token,
374
- cognito_client_id_token,
375
- region_name=None,
376
- cluster_name=DEFAULT_CLUSTER_NAME,
377
- namespace=None,
378
- ):
379
- response = requests.delete(
380
- f"{LAMBDA}/{cluster_name}",
381
- headers={
382
- "AWS_ACCESS_KEY_ID": aws_access_key_id,
383
- "AWS_SECRET_ACCESS_KEY": aws_secret_access_key,
384
- "AWS_SESSION_TOKEN": aws_session_token,
385
- "Authorization": f"Bearer {cognito_client_id_token}",
386
- },
387
- params={"region_name": region_name, "namespace": namespace},
388
- )
389
-
390
- if response.status_code != 200:
391
- logging.error(
392
- f"HTTP {response.status_code}/{response.reason} - Response: {response.text} - URL: {response.request.url}"
393
- )
394
- sys.exit(1)
332
+ try:
333
+ logger.info(f"Cleaning up S3 bucket: {s3_bucket}")
334
+ s3 = credential_manager.session.resource("s3")
335
+ bucket = s3.Bucket(s3_bucket)
336
+ bucket.objects.all().delete()
337
+ logger.info(f"S3 bucket {s3_bucket} cleaned up successfully.")
338
+ except ClientError as e:
339
+ logger.error(f"Failed to clean up S3 bucket {s3_bucket}: {e}")
340
+ sys.exit(1)
395
341
 
396
342
 
397
- def teardown_s3(session, s3_bucket):
398
- s3 = session.resource("s3")
399
- bucket = s3.Bucket(s3_bucket)
400
- bucket.objects.delete()
343
+ if __name__ == "__main__":
344
+ main()
@@ -0,0 +1,5 @@
1
+ DEFAULT_REGION_NAME = "eu-north-1"
2
+ DEFAULT_CLUSTER_NAME = "locust"
3
+ DEFAULT_NAMESPACE = "default"
4
+
5
+ LAMBDA_URL = "https://deployer.locust.cloud/1"