outerbounds 0.3.174__py3-none-any.whl → 0.3.175rc0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,293 @@
1
+ import json
2
+ import os
3
+ from outerbounds._vendor import yaml
4
+ from typing import Dict, Any
5
+ from .cli_to_config import build_config_from_options
6
+
7
+ CODE_PACKAGE_PREFIX = "mf.obp-apps"
8
+
9
+ CAPSULE_DEBUG = os.environ.get("OUTERBOUNDS_CAPSULE_DEBUG", False)
10
+
11
+
12
+ class classproperty(property):
13
+ def __get__(self, owner_self, owner_cls):
14
+ return self.fget(owner_cls)
15
+
16
+
17
+ class AppConfigError(Exception):
18
+ """Exception raised when app configuration is invalid."""
19
+
20
+ pass
21
+
22
+
23
+ class AuthType:
24
+ BROWSER = "Browser"
25
+ API = "API"
26
+
27
+ @classmethod
28
+ def enums(cls):
29
+ return [cls.BROWSER, cls.API]
30
+
31
+ @classproperty
32
+ def default(cls):
33
+ return cls.BROWSER
34
+
35
+
36
+ class AppConfig:
37
+ """Class representing an Outerbounds App configuration."""
38
+
39
+ def __init__(self, config_dict: Dict[str, Any]):
40
+ """Initialize configuration from a dictionary."""
41
+ self.config = config_dict or {}
42
+ self.schema = self._load_schema()
43
+ self._final_state = {}
44
+
45
+ def set_state(self, key, value):
46
+ self._final_state[key] = value
47
+ return self
48
+
49
+ def get_state(self, key, default=None):
50
+ return self._final_state.get(key, self.config.get(key, default))
51
+
52
+ def dump_state(self):
53
+ x = {k: v for k, v in self.config.items()}
54
+ for k, v in self._final_state.items():
55
+ x[k] = v
56
+ return x
57
+
58
+ @staticmethod
59
+ def _load_schema():
60
+ """Load the configuration schema from the YAML file."""
61
+ schema_path = os.path.join(os.path.dirname(__file__), "config_schema.yaml")
62
+ with open(schema_path, "r") as f:
63
+ return yaml.safe_load(f)
64
+
65
+ def get(self, key: str, default: Any = None) -> Any:
66
+ """Get a configuration value by key."""
67
+ return self.config.get(key, default)
68
+
69
+ def validate(self) -> None:
70
+ """Validate the configuration against the schema."""
71
+ self._validate_required_fields()
72
+ self._validate_field_types()
73
+ self._validate_field_constraints()
74
+
75
+ def set_deploy_defaults(self, packaging_directory: str) -> None:
76
+ """Set default values for fields that are not provided."""
77
+ if not self.config.get("auth"):
78
+ self.config["auth"] = {}
79
+ if not self.config["auth"].get("public"):
80
+ self.config["auth"]["public"] = True
81
+ if not self.config["auth"].get("type"):
82
+ self.config["auth"]["type"] = AuthType.BROWSER
83
+
84
+ if not self.config.get("health_check"):
85
+ self.config["health_check"] = {}
86
+ if not self.config["health_check"].get("enabled"):
87
+ self.config["health_check"]["enabled"] = False
88
+
89
+ if not self.config.get("resources"):
90
+ self.config["resources"] = {}
91
+ if not self.config["resources"].get("cpu"):
92
+ self.config["resources"]["cpu"] = 1
93
+ if not self.config["resources"].get("memory"):
94
+ self.config["resources"]["memory"] = "4096Mi"
95
+ if not self.config["resources"].get("disk"):
96
+ self.config["resources"]["disk"] = "20Gi"
97
+
98
+ if not self.config.get("replicas", None):
99
+ self.config["replicas"] = {
100
+ "min": 1,
101
+ "max": 1,
102
+ }
103
+ else:
104
+ max_is_set = self.config["replicas"].get("max", None) is not None
105
+ min_is_set = self.config["replicas"].get("min", None) is not None
106
+ if max_is_set and not min_is_set:
107
+ # If users want to set 0 replicas for min,
108
+ # then they need explicitly specify min to 0
109
+ self.config["replicas"]["min"] = 1 # Atleast set 1 replica
110
+ if min_is_set and not max_is_set:
111
+ # In the situations where we dont have min/max replicas, we can
112
+ # set max to min.
113
+ self.config["replicas"]["max"] = self.config["replicas"].get("min")
114
+
115
+ def _validate_required_fields(self) -> None:
116
+ """Validate that all required fields are present."""
117
+ required_fields = self.schema.get("required", [])
118
+ for field in required_fields:
119
+ if field not in self.config:
120
+ raise AppConfigError(
121
+ f"Required field '{field}' is missing from the configuration."
122
+ )
123
+
124
+ def _validate_field_types(self) -> None:
125
+ """Validate that fields have correct types."""
126
+ properties = self.schema.get("properties", {})
127
+
128
+ for field, value in self.config.items():
129
+ if field not in properties:
130
+ raise AppConfigError(f"Unknown field '{field}' in configuration.")
131
+
132
+ field_schema = properties[field]
133
+ field_type = field_schema.get("type")
134
+
135
+ if field_type == "string" and not isinstance(value, str):
136
+ raise AppConfigError(f"Field '{field}' must be a string.")
137
+
138
+ elif field_type == "integer" and not isinstance(value, int):
139
+ raise AppConfigError(f"Field '{field}' must be an integer.")
140
+
141
+ elif field_type == "boolean" and not isinstance(value, bool):
142
+ raise AppConfigError(f"Field '{field}' must be a boolean.")
143
+
144
+ elif field_type == "array" and not isinstance(value, list):
145
+ raise AppConfigError(f"Field '{field}' must be an array.")
146
+
147
+ elif field_type == "object" and not isinstance(value, dict):
148
+ raise AppConfigError(f"Field '{field}' must be an object.")
149
+
150
+ def _validate_field_constraints(self) -> None:
151
+ """Validate field-specific constraints."""
152
+ properties = self.schema.get("properties", {})
153
+
154
+ # Validate name
155
+ if "name" in self.config:
156
+ name = self.config["name"]
157
+ max_length = properties["name"].get("maxLength", 20)
158
+ if len(name) > max_length:
159
+ raise AppConfigError(
160
+ f"App name '{name}' exceeds maximum length of {max_length} characters."
161
+ )
162
+
163
+ # Validate port
164
+ if "port" in self.config:
165
+ port = self.config["port"]
166
+ min_port = properties["port"].get("minimum", 1)
167
+ max_port = properties["port"].get("maximum", 65535)
168
+ if port < min_port or port > max_port:
169
+ raise AppConfigError(
170
+ f"Port number {port} is outside valid range ({min_port}-{max_port})."
171
+ )
172
+
173
+ # Validate dependencies (only one type allowed)
174
+ if "dependencies" in self.config:
175
+ deps = self.config["dependencies"]
176
+ if not isinstance(deps, dict):
177
+ raise AppConfigError("Dependencies must be an object.")
178
+
179
+ valid_dep_types = [
180
+ "from_requirements_file",
181
+ "from_pyproject_toml",
182
+ ]
183
+
184
+ found_types = [dep_type for dep_type in valid_dep_types if dep_type in deps]
185
+
186
+ if len(found_types) > 1:
187
+ raise AppConfigError(
188
+ f"You can only specify one mode of specifying dependencies. You have specified : {found_types} . Please only set one."
189
+ )
190
+
191
+ # Validate that each tag has exactly one key
192
+ if "tags" in self.config:
193
+ tags = self.config["tags"]
194
+ for tag in tags:
195
+ if not isinstance(tag, dict):
196
+ raise AppConfigError(
197
+ "Each tag must be a dictionary. %s is of type %s"
198
+ % (str(tag), type(tag))
199
+ )
200
+ if len(tag.keys()) != 1:
201
+ raise AppConfigError(
202
+ "Each tag must have exactly one key-value pair. Tag %s has %d key-value pairs."
203
+ % (str(tag), len(tag.keys()))
204
+ )
205
+ if "replicas" in self.config:
206
+ replicas = self.config["replicas"]
207
+ if not isinstance(replicas, dict):
208
+ raise AppConfigError("Replicas must be an object.")
209
+ max_is_set = self.config["replicas"].get("max", None) is not None
210
+ if max_is_set:
211
+ if replicas.get("max") == 0:
212
+ raise AppConfigError("Max replicas must be greater than 0.")
213
+
214
+ if replicas.get("min", 1) > replicas.get("max"):
215
+ raise AppConfigError(
216
+ "Min replicas must be less than max replicas. %s > %s"
217
+ % (replicas.get("min", 1), replicas.get("max", 1))
218
+ )
219
+
220
+ def to_dict(self) -> Dict[str, Any]:
221
+ """Return the configuration as a dictionary."""
222
+ return self.config
223
+
224
+ def to_yaml(self) -> str:
225
+ """Return the configuration as a YAML string."""
226
+ return yaml.dump(self.config, default_flow_style=False)
227
+
228
+ def to_json(self) -> str:
229
+ """Return the configuration as a JSON string."""
230
+ return json.dumps(self.config, indent=2)
231
+
232
+ @classmethod
233
+ def from_file(cls, file_path: str) -> "AppConfig":
234
+ """Create a configuration from a file."""
235
+ if not os.path.exists(file_path):
236
+ raise AppConfigError(f"Configuration file '{file_path}' does not exist.")
237
+
238
+ with open(file_path, "r") as f:
239
+ try:
240
+ config_dict = yaml.safe_load(f)
241
+ except Exception as e:
242
+ raise AppConfigError(f"Failed to parse configuration file: {e}")
243
+
244
+ return cls(config_dict)
245
+
246
+ def update_from_cli_options(self, options):
247
+ """
248
+ Update configuration from CLI options using the same logic as build_config_from_options.
249
+ This ensures consistent handling of CLI options whether they come from a config file
250
+ or direct CLI input.
251
+ """
252
+ cli_config = build_config_from_options(options)
253
+
254
+ # Process each field using allow_union property
255
+ for key, value in cli_config.items():
256
+ if key in self.schema.get("properties", {}):
257
+ self._update_field(key, value)
258
+
259
+ return self
260
+
261
+ def _update_field(self, field_name, new_value):
262
+ """Update a field based on its allow_union property."""
263
+ properties = self.schema.get("properties", {})
264
+
265
+ # Skip if field doesn't exist in schema
266
+ if field_name not in properties:
267
+ return
268
+
269
+ field_schema = properties[field_name]
270
+ allow_union = field_schema.get("allow_union", False)
271
+
272
+ # If field doesn't exist in config, just set it
273
+ if field_name not in self.config:
274
+ self.config[field_name] = new_value
275
+ return
276
+
277
+ # If allow_union is True, merge values based on type
278
+ if allow_union:
279
+ current_value = self.config[field_name]
280
+
281
+ if isinstance(current_value, list) and isinstance(new_value, list):
282
+ # For lists, append new items
283
+ self.config[field_name].extend(new_value)
284
+ elif isinstance(current_value, dict) and isinstance(new_value, dict):
285
+ # For dicts, update with new values
286
+ self.config[field_name].update(new_value)
287
+ else:
288
+ # For other types, replace with new value
289
+ self.config[field_name] = new_value
290
+ else:
291
+ raise AppConfigError(
292
+ f"Field '{field_name}' does not allow union. Current value: {self.config[field_name]}, new value: {new_value}"
293
+ )
File without changes
@@ -0,0 +1,373 @@
1
+ import json
2
+ import os
3
+ import pathlib
4
+ import requests
5
+ import time
6
+ import shlex
7
+ from typing import Optional
8
+ from .utils import TODOException, safe_requests_wrapper
9
+ from .app_config import AppConfig, CAPSULE_DEBUG, AuthType
10
+ from . import experimental
11
+
12
+
13
+ class CapsuleStateMachine:
14
+ """
15
+ Since capsules are a kinda newer concept, we will treat the state transitions based on the conditions and the
16
+ availability of certain fields in the status dictionary.
17
+ """
18
+
19
+ CONDITIONS = ["Ready", "DeploymentReplicasAvailable", "IngressObjectReady"]
20
+
21
+ def __init__(self, capsule_id: str):
22
+ self._capsule_id = capsule_id
23
+ self._status_trail = []
24
+
25
+ def is_completely_new_capsule(self):
26
+ # This is a heuristic. Not a fully tested.
27
+ # If we create a completely new capsule then the status
28
+ # field might be a completely empty dictionary.
29
+ assert (
30
+ len(self._status_trail) > 0
31
+ ), "status_trail cannot be none to infer if its a new capsule"
32
+ return self._empty_status(self._status_trail[0].get("status"))
33
+
34
+ def get_status_trail(self):
35
+ return self._status_trail
36
+
37
+ @staticmethod
38
+ def _empty_status(status):
39
+ if json.dumps(status) == "{}":
40
+ return True
41
+ return False
42
+
43
+ @staticmethod
44
+ def _parse_conditions(conditions):
45
+ curr_conditons = {}
46
+ for condition in conditions:
47
+ curr_conditons[condition["type"]] = condition["status"]
48
+ return curr_conditons
49
+
50
+ def add_status(self, status: dict):
51
+ assert type(status) == dict, "TODO: Make this check somewhere else"
52
+ self._status_trail.append({"timestamp": time.time(), "status": status})
53
+
54
+ @staticmethod
55
+ def _condition_change_emoji(previous_condition_status, current_condition_status):
56
+ if previous_condition_status == current_condition_status:
57
+ if previous_condition_status == "True":
58
+ return "✅"
59
+ else:
60
+ return "❌"
61
+ if previous_condition_status == "True" and current_condition_status == "False":
62
+ return "🔴 --> 🟢"
63
+ if previous_condition_status == "False" and current_condition_status == "True":
64
+ return "🚀"
65
+ return "🟡"
66
+
67
+ @property
68
+ def current_status(self):
69
+ return self._status_trail[-1].get("status")
70
+
71
+ @property
72
+ def out_of_cluster_url(self):
73
+ url = self.current_status.get("accessInfo", {}).get("outOfClusterURL", None)
74
+ if url is not None:
75
+ return f"https://{url}"
76
+ return None
77
+
78
+ @property
79
+ def in_cluster_url(self):
80
+ url = self.current_status.get("accessInfo", {}).get("inClusterURL", None)
81
+ if url is not None:
82
+ return f"https://{url}"
83
+ return None
84
+
85
+ @property
86
+ def ready_to_serve_traffic(self):
87
+ if self.current_status.get("readyToServeTraffic", False):
88
+ return any(
89
+ i is not None for i in [self.out_of_cluster_url, self.in_cluster_url]
90
+ )
91
+ return False
92
+
93
+ @property
94
+ def available_replicas(self):
95
+ return self.current_status.get("availableReplicas", 0)
96
+
97
+ def report_current_status(self, logger):
98
+ if len(self._status_trail) < 2:
99
+ return
100
+ previous_status, current_status = self._status_trail[-2].get(
101
+ "status"
102
+ ), self._status_trail[-1].get("status")
103
+ if self._empty_status(current_status):
104
+ return
105
+
106
+ if self._empty_status(previous_status):
107
+ logger("💊 %s Deployment has started ... 🚀" % self._capsule_id)
108
+ return
109
+
110
+ def check_for_debug(self, state_dir: str):
111
+ if CAPSULE_DEBUG:
112
+ debug_path = os.path.join(
113
+ state_dir, f"debug_capsule_{self._capsule_id}.json"
114
+ )
115
+ with open(debug_path, "w") as f:
116
+ json.dump(self._status_trail, f, indent=4)
117
+
118
+
119
+ class CapsuleInput:
120
+ @classmethod
121
+ def construct_exec_command(cls, commands: list[str]):
122
+ commands = ["set -eEuo pipefail"] + commands
123
+ command_string = "\n".join(commands)
124
+ # First constuct a base64 encoded string of the quoted command
125
+ # One of the reasons we don't directly pass the command string to the backend with a `\n` join
126
+ # is because the backend controller doesnt play nice when the command can be a multi-line string.
127
+ # So we encode it to a base64 string and then decode it back to a command string at runtime to provide to
128
+ # `bash -c`. The ideal thing to have done is to run "bash -c {shlex.quote(command_string)}" and call it a day
129
+ # but the backend controller yields the following error:
130
+ # `error parsing template: error converting YAML to JSON: yaml: line 111: mapping values are not allowed in this context`
131
+ # So we go to great length to ensure the command is provided in base64 to avoid any issues with the backend controller.
132
+ import base64
133
+
134
+ encoded_command = base64.b64encode(command_string.encode()).decode()
135
+ decode_cmd = f"echo {encoded_command} | base64 -d > ./_ob_app_run.sh"
136
+ return (
137
+ f"bash -c '{decode_cmd} && cat ./_ob_app_run.sh && bash ./_ob_app_run.sh'"
138
+ )
139
+
140
+ @classmethod
141
+ def _marshal_environment_variables(cls, app_config: AppConfig):
142
+ envs = app_config.get_state("environment", {}).copy()
143
+ _return = []
144
+ for k, v in envs.items():
145
+ _v = v
146
+ if isinstance(v, dict):
147
+ _v = json.dumps(v)
148
+ elif isinstance(v, list):
149
+ _v = json.dumps(v)
150
+ else:
151
+ _v = str(v)
152
+ _return.append(
153
+ {
154
+ "name": k,
155
+ "value": _v,
156
+ }
157
+ )
158
+ return _return
159
+
160
+ @classmethod
161
+ def from_app_config(self, app_config: AppConfig):
162
+ gpu_resource = app_config.get_state("resources").get("gpu")
163
+ resources = {}
164
+ shared_memory = app_config.get_state("resources").get("shared_memory")
165
+ if gpu_resource:
166
+ resources["gpu"] = gpu_resource
167
+ if shared_memory:
168
+ resources["sharedMemory"] = shared_memory
169
+
170
+ _scheduling_config = {}
171
+ if app_config.get_state("compute_pools", None):
172
+ _scheduling_config["computePools"] = [
173
+ {"name": x} for x in app_config.get_state("compute_pools")
174
+ ]
175
+
176
+ return {
177
+ "perimeter": app_config.get_state("perimeter"),
178
+ # "environmentPath": "environments/python3.9",
179
+ "codePackagePath": app_config.get_state("code_package_url"),
180
+ "image": app_config.get_state("image"),
181
+ "resourceIntegrations": [
182
+ {"name": x} for x in app_config.get_state("secrets", [])
183
+ ],
184
+ "resourceConfig": {
185
+ "cpu": str(app_config.get_state("resources").get("cpu")),
186
+ "memory": str(app_config.get_state("resources").get("memory")),
187
+ "ephemeralStorage": str(app_config.get_state("resources").get("disk")),
188
+ **resources,
189
+ },
190
+ "autoscalingConfig": {
191
+ "minReplicas": app_config.get_state("replicas", {}).get("min", 1),
192
+ "maxReplicas": app_config.get_state("replicas", {}).get("max", 1),
193
+ },
194
+ **_scheduling_config,
195
+ "containerStartupConfig": {
196
+ "entrypoint": self.construct_exec_command(
197
+ app_config.get_state("commands")
198
+ )
199
+ },
200
+ "environmentVariables": self._marshal_environment_variables(app_config),
201
+ # "assets": [{"name": "startup-script.sh"}],
202
+ "authConfig": {
203
+ "authType": app_config.get_state("auth").get("type"),
204
+ "publicToDeployment": app_config.get_state("auth").get("public"),
205
+ },
206
+ "tags": [
207
+ dict(key=k, value=v)
208
+ for tag in app_config.get_state("tags", [])
209
+ for k, v in tag.items()
210
+ ],
211
+ "port": app_config.get_state("port"),
212
+ "displayName": app_config.get_state("name"),
213
+ }
214
+
215
+
216
+ def create_capsule(capsule_input: dict, api_url: str, request_headers: dict):
217
+ _data = json.dumps(capsule_input)
218
+ response = safe_requests_wrapper(
219
+ requests.post,
220
+ api_url,
221
+ data=_data,
222
+ headers=request_headers,
223
+ conn_error_retries=2,
224
+ retryable_status_codes=[409], # todo : verify me
225
+ )
226
+
227
+ if response.status_code > 400:
228
+ raise TODOException(
229
+ f"Failed to create capsule: {response.status_code} {response.text}"
230
+ )
231
+ return response.json()
232
+
233
+
234
+ def list_capsules(api_url: str, request_headers: dict):
235
+ response = safe_requests_wrapper(
236
+ requests.get,
237
+ api_url,
238
+ headers=request_headers,
239
+ retryable_status_codes=[409], # todo : verify me
240
+ conn_error_retries=3,
241
+ )
242
+ if response.status_code > 400:
243
+ raise TODOException(
244
+ f"Failed to list capsules: {response.status_code} {response.text}"
245
+ )
246
+ return response.json()
247
+
248
+
249
+ def get_capsule(capsule_id: str, api_url: str, request_headers: dict):
250
+ # params = {"instance_id": capsule_id}
251
+ url = os.path.join(api_url, capsule_id)
252
+ response = safe_requests_wrapper(
253
+ requests.get,
254
+ url,
255
+ headers=request_headers,
256
+ retryable_status_codes=[409, 404], # todo : verify me
257
+ conn_error_retries=3,
258
+ )
259
+ if response.status_code > 400:
260
+ raise TODOException(
261
+ f"Failed to get capsule: {response.status_code} {response.text}"
262
+ )
263
+ return response.json()
264
+
265
+
266
+ def delete_capsule(capsule_id: str, api_url: str, request_headers: dict):
267
+ response = safe_requests_wrapper(
268
+ requests.delete,
269
+ os.path.join(api_url, capsule_id),
270
+ headers=request_headers,
271
+ retryable_status_codes=[409], # todo : verify me
272
+ )
273
+ if response.status_code > 400:
274
+ raise TODOException(
275
+ f"Failed to delete capsule: {response.status_code} {response.text}"
276
+ )
277
+
278
+ return response.json()
279
+
280
+
281
+ class Capsule:
282
+
283
+ status: CapsuleStateMachine
284
+
285
+ identifier = None
286
+
287
+ # TODO: Current default timeout is very large of 5 minutes. Ideally we should have finished the deployed in less than 1 minutes.
288
+ def __init__(
289
+ self,
290
+ app_config: AppConfig,
291
+ base_url: str,
292
+ create_timeout: int = 60 * 5,
293
+ debug_dir: Optional[str] = None,
294
+ ):
295
+ self._app_config = app_config
296
+ self._base_url = os.path.join(
297
+ base_url,
298
+ "v1",
299
+ "perimeters",
300
+ app_config.get_state("perimeter"),
301
+ "capsules",
302
+ )
303
+ self._create_timeout = create_timeout
304
+ self._debug_dir = debug_dir
305
+ from metaflow.metaflow_config import SERVICE_HEADERS
306
+
307
+ self._request_headers = {
308
+ **{"Content-Type": "application/json", "Connection": "keep-alive"},
309
+ **(SERVICE_HEADERS or {}),
310
+ }
311
+
312
+ @property
313
+ def capsule_type(self):
314
+ auth_type = self._app_config.get_state("auth", {}).get("type", AuthType.default)
315
+ if auth_type == AuthType.BROWSER:
316
+ return "App"
317
+ elif auth_type == AuthType.API:
318
+ return "Endpoint"
319
+ else:
320
+ raise TODOException(f"Unknown auth type: {auth_type}")
321
+
322
+ @property
323
+ def name(self):
324
+ return self._app_config.get_state("name")
325
+
326
+ def create_input(self):
327
+ return experimental.capsule_input_overrides(
328
+ self._app_config, CapsuleInput.from_app_config(self._app_config)
329
+ )
330
+
331
+ def create(self):
332
+ capsule_response = create_capsule(
333
+ self.create_input(), self._base_url, self._request_headers
334
+ )
335
+ self.identifier = capsule_response.get("id")
336
+ return self.identifier
337
+
338
+ def get(self):
339
+ # TODO: [FIX ME]: This need to work in the reverse lookup way too.
340
+ return get_capsule(self.identifier, self._base_url, self._request_headers)
341
+
342
+ def wait_for_terminal_state(self, logger=print):
343
+ state_machine = CapsuleStateMachine(self.identifier)
344
+ logger(
345
+ "💊 Waiting for %s %s to be ready to serve traffic"
346
+ % (self.capsule_type.lower(), self.identifier)
347
+ )
348
+ for i in range(self._create_timeout):
349
+ capsule_response = self.get()
350
+ state_machine.add_status(capsule_response.get("status", {}))
351
+ time.sleep(1)
352
+ state_machine.report_current_status(logger)
353
+ if state_machine.ready_to_serve_traffic:
354
+ logger(
355
+ "💊 %s %s is ready to serve traffic on the URL: %s"
356
+ % (
357
+ self.capsule_type,
358
+ self.identifier,
359
+ state_machine.out_of_cluster_url,
360
+ ),
361
+ )
362
+ self.status = state_machine
363
+ break
364
+ if self._debug_dir:
365
+ state_machine.check_for_debug(self._debug_dir)
366
+
367
+ return capsule_response
368
+
369
+ def list(self):
370
+ return list_capsules(self._base_url, self._request_headers)
371
+
372
+ def delete(self):
373
+ return delete_capsule(self.identifier, self._base_url, self._request_headers)