outerbounds 0.3.173rc0__py3-none-any.whl → 0.3.174__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- outerbounds/command_groups/apps_cli.py +5 -1
- {outerbounds-0.3.173rc0.dist-info → outerbounds-0.3.174.dist-info}/METADATA +3 -3
- {outerbounds-0.3.173rc0.dist-info → outerbounds-0.3.174.dist-info}/RECORD +5 -19
- outerbounds/apps/__init__.py +0 -0
- outerbounds/apps/app_cli.py +0 -519
- outerbounds/apps/app_config.py +0 -308
- outerbounds/apps/artifacts.py +0 -0
- outerbounds/apps/capsule.py +0 -382
- outerbounds/apps/code_package/__init__.py +0 -3
- outerbounds/apps/code_package/code_packager.py +0 -612
- outerbounds/apps/code_package/examples.py +0 -125
- outerbounds/apps/config_schema.yaml +0 -194
- outerbounds/apps/dependencies.py +0 -115
- outerbounds/apps/deployer.py +0 -0
- outerbounds/apps/secrets.py +0 -164
- outerbounds/apps/utils.py +0 -228
- outerbounds/apps/validations.py +0 -34
- {outerbounds-0.3.173rc0.dist-info → outerbounds-0.3.174.dist-info}/WHEEL +0 -0
- {outerbounds-0.3.173rc0.dist-info → outerbounds-0.3.174.dist-info}/entry_points.txt +0 -0
outerbounds/apps/app_config.py
DELETED
@@ -1,308 +0,0 @@
|
|
1
|
-
import json
|
2
|
-
import os
|
3
|
-
from outerbounds._vendor import yaml
|
4
|
-
from typing import Dict, Any
|
5
|
-
|
6
|
-
CODE_PACKAGE_PREFIX = "mf.obp-apps"
|
7
|
-
|
8
|
-
CAPSULE_DEBUG = os.environ.get("OUTERBOUNDS_CAPSULE_DEBUG", False)
|
9
|
-
|
10
|
-
|
11
|
-
def build_config_from_options(options):
|
12
|
-
"""Build an app configuration from CLI options."""
|
13
|
-
config = {}
|
14
|
-
|
15
|
-
# Set basic fields
|
16
|
-
for key in ["name", "port", "image", "compute_pools"]:
|
17
|
-
if options.get(key):
|
18
|
-
config[key] = options[key]
|
19
|
-
|
20
|
-
# Handle list fields
|
21
|
-
if options.get("tags"):
|
22
|
-
config["tags"] = list(options["tags"])
|
23
|
-
if options.get("secrets"):
|
24
|
-
config["secrets"] = list(options["secrets"])
|
25
|
-
|
26
|
-
# Build env dict from key-value pairs
|
27
|
-
if options.get("envs"):
|
28
|
-
env_dict = {}
|
29
|
-
for env_item in options["envs"]:
|
30
|
-
env_dict.update(env_item)
|
31
|
-
config["environment"] = env_dict
|
32
|
-
|
33
|
-
# Handle dependencies (only one type allowed)
|
34
|
-
deps = {}
|
35
|
-
if options.get("dep_from_task"):
|
36
|
-
deps["from_task"] = options["dep_from_task"]
|
37
|
-
elif options.get("dep_from_run"):
|
38
|
-
deps["from_run"] = options["dep_from_run"]
|
39
|
-
elif options.get("dep_from_requirements"):
|
40
|
-
deps["from_requirements_file"] = options["dep_from_requirements"]
|
41
|
-
elif options.get("dep_from_pyproject"):
|
42
|
-
deps["from_pyproject_toml"] = options["dep_from_pyproject"]
|
43
|
-
|
44
|
-
# TODO: [FIX ME]: Get better CLI abstraction for pypi/conda dependencies
|
45
|
-
|
46
|
-
if deps:
|
47
|
-
config["dependencies"] = deps
|
48
|
-
|
49
|
-
# Handle resources
|
50
|
-
resources = {}
|
51
|
-
for key in ["cpu", "memory", "gpu", "storage"]:
|
52
|
-
if options.get(key):
|
53
|
-
resources[key] = options[key]
|
54
|
-
|
55
|
-
if resources:
|
56
|
-
config["resources"] = resources
|
57
|
-
|
58
|
-
# Handle health check options
|
59
|
-
health_check = {}
|
60
|
-
if options.get("health_check_enabled") is not None:
|
61
|
-
health_check["enabled"] = options["health_check_enabled"]
|
62
|
-
if options.get("health_check_path"):
|
63
|
-
health_check["path"] = options["health_check_path"]
|
64
|
-
if options.get("health_check_initial_delay") is not None:
|
65
|
-
health_check["initial_delay_seconds"] = options["health_check_initial_delay"]
|
66
|
-
if options.get("health_check_period") is not None:
|
67
|
-
health_check["period_seconds"] = options["health_check_period"]
|
68
|
-
|
69
|
-
if health_check:
|
70
|
-
config["health_check"] = health_check
|
71
|
-
|
72
|
-
# Handle package options
|
73
|
-
if options.get("package_src_path") or options.get("package_suffixes"):
|
74
|
-
config["package"] = {}
|
75
|
-
if options.get("package_src_path"):
|
76
|
-
config["package"]["src_path"] = options["package_src_path"]
|
77
|
-
if options.get("package_suffixes"):
|
78
|
-
config["package"]["suffixes"] = options["package_suffixes"]
|
79
|
-
|
80
|
-
# Handle auth options
|
81
|
-
if options.get("auth_type") or options.get("auth_public"):
|
82
|
-
config["auth"] = {}
|
83
|
-
if options.get("auth_type"):
|
84
|
-
config["auth"]["type"] = options["auth_type"]
|
85
|
-
if options.get("auth_public"):
|
86
|
-
config["auth"]["public"] = options["auth_public"]
|
87
|
-
|
88
|
-
return config
|
89
|
-
|
90
|
-
|
91
|
-
class AppConfigError(Exception):
|
92
|
-
"""Exception raised when app configuration is invalid."""
|
93
|
-
|
94
|
-
pass
|
95
|
-
|
96
|
-
|
97
|
-
class AppConfig:
|
98
|
-
"""Class representing an Outerbounds App configuration."""
|
99
|
-
|
100
|
-
def __init__(self, config_dict: Dict[str, Any]):
|
101
|
-
"""Initialize configuration from a dictionary."""
|
102
|
-
self.config = config_dict or {}
|
103
|
-
self.schema = self._load_schema()
|
104
|
-
self._final_state = {}
|
105
|
-
|
106
|
-
def set_state(self, key, value):
|
107
|
-
self._final_state[key] = value
|
108
|
-
return self
|
109
|
-
|
110
|
-
def get_state(self, key, default=None):
|
111
|
-
return self._final_state.get(key, self.config.get(key, default))
|
112
|
-
|
113
|
-
def dump_state(self):
|
114
|
-
x = {k: v for k, v in self.config.items()}
|
115
|
-
for k, v in self._final_state.items():
|
116
|
-
x[k] = v
|
117
|
-
return x
|
118
|
-
|
119
|
-
@staticmethod
|
120
|
-
def _load_schema():
|
121
|
-
"""Load the configuration schema from the YAML file."""
|
122
|
-
schema_path = os.path.join(os.path.dirname(__file__), "config_schema.yaml")
|
123
|
-
with open(schema_path, "r") as f:
|
124
|
-
return yaml.safe_load(f)
|
125
|
-
|
126
|
-
def get(self, key: str, default: Any = None) -> Any:
|
127
|
-
"""Get a configuration value by key."""
|
128
|
-
return self.config.get(key, default)
|
129
|
-
|
130
|
-
def validate(self) -> None:
|
131
|
-
"""Validate the configuration against the schema."""
|
132
|
-
self._validate_required_fields()
|
133
|
-
self._validate_field_types()
|
134
|
-
self._validate_field_constraints()
|
135
|
-
|
136
|
-
def set_deploy_defaults(self, packaging_directory: str) -> None:
|
137
|
-
"""Set default values for fields that are not provided."""
|
138
|
-
if not self.config.get("auth"):
|
139
|
-
self.config["auth"] = {}
|
140
|
-
if not self.config["auth"].get("public"):
|
141
|
-
self.config["auth"]["public"] = True
|
142
|
-
if not self.config["auth"].get("type"):
|
143
|
-
self.config["auth"]["type"] = "SSO"
|
144
|
-
|
145
|
-
if not self.config.get("health_check"):
|
146
|
-
self.config["health_check"] = {}
|
147
|
-
if not self.config["health_check"].get("enabled"):
|
148
|
-
self.config["health_check"]["enabled"] = False
|
149
|
-
|
150
|
-
if not self.config.get("resources"):
|
151
|
-
self.config["resources"] = {}
|
152
|
-
if not self.config["resources"].get("cpu"):
|
153
|
-
self.config["resources"]["cpu"] = 1
|
154
|
-
if not self.config["resources"].get("memory"):
|
155
|
-
self.config["resources"]["memory"] = "4096Mi"
|
156
|
-
if not self.config["resources"].get("disk"):
|
157
|
-
self.config["resources"]["disk"] = "20Gi"
|
158
|
-
|
159
|
-
def _validate_required_fields(self) -> None:
|
160
|
-
"""Validate that all required fields are present."""
|
161
|
-
required_fields = self.schema.get("required", [])
|
162
|
-
for field in required_fields:
|
163
|
-
if field not in self.config:
|
164
|
-
raise AppConfigError(
|
165
|
-
f"Required field '{field}' is missing from the configuration."
|
166
|
-
)
|
167
|
-
|
168
|
-
def _validate_field_types(self) -> None:
|
169
|
-
"""Validate that fields have correct types."""
|
170
|
-
properties = self.schema.get("properties", {})
|
171
|
-
|
172
|
-
for field, value in self.config.items():
|
173
|
-
if field not in properties:
|
174
|
-
raise AppConfigError(f"Unknown field '{field}' in configuration.")
|
175
|
-
|
176
|
-
field_schema = properties[field]
|
177
|
-
field_type = field_schema.get("type")
|
178
|
-
|
179
|
-
if field_type == "string" and not isinstance(value, str):
|
180
|
-
raise AppConfigError(f"Field '{field}' must be a string.")
|
181
|
-
|
182
|
-
elif field_type == "integer" and not isinstance(value, int):
|
183
|
-
raise AppConfigError(f"Field '{field}' must be an integer.")
|
184
|
-
|
185
|
-
elif field_type == "boolean" and not isinstance(value, bool):
|
186
|
-
raise AppConfigError(f"Field '{field}' must be a boolean.")
|
187
|
-
|
188
|
-
elif field_type == "array" and not isinstance(value, list):
|
189
|
-
raise AppConfigError(f"Field '{field}' must be an array.")
|
190
|
-
|
191
|
-
elif field_type == "object" and not isinstance(value, dict):
|
192
|
-
raise AppConfigError(f"Field '{field}' must be an object.")
|
193
|
-
|
194
|
-
def _validate_field_constraints(self) -> None:
|
195
|
-
"""Validate field-specific constraints."""
|
196
|
-
properties = self.schema.get("properties", {})
|
197
|
-
|
198
|
-
# Validate name
|
199
|
-
if "name" in self.config:
|
200
|
-
name = self.config["name"]
|
201
|
-
max_length = properties["name"].get("maxLength", 20)
|
202
|
-
if len(name) > max_length:
|
203
|
-
raise AppConfigError(
|
204
|
-
f"App name '{name}' exceeds maximum length of {max_length} characters."
|
205
|
-
)
|
206
|
-
|
207
|
-
# Validate port
|
208
|
-
if "port" in self.config:
|
209
|
-
port = self.config["port"]
|
210
|
-
min_port = properties["port"].get("minimum", 1)
|
211
|
-
max_port = properties["port"].get("maximum", 65535)
|
212
|
-
if port < min_port or port > max_port:
|
213
|
-
raise AppConfigError(
|
214
|
-
f"Port number {port} is outside valid range ({min_port}-{max_port})."
|
215
|
-
)
|
216
|
-
|
217
|
-
# Validate dependencies (only one type allowed)
|
218
|
-
if "dependencies" in self.config:
|
219
|
-
deps = self.config["dependencies"]
|
220
|
-
if not isinstance(deps, dict):
|
221
|
-
raise AppConfigError("Dependencies must be an object.")
|
222
|
-
|
223
|
-
valid_dep_types = [
|
224
|
-
"from_requirements_file",
|
225
|
-
"from_pyproject_toml",
|
226
|
-
]
|
227
|
-
|
228
|
-
found_types = [dep_type for dep_type in valid_dep_types if dep_type in deps]
|
229
|
-
|
230
|
-
if len(found_types) > 1:
|
231
|
-
raise AppConfigError(
|
232
|
-
f"You can only specify one mode of specifying dependencies. You have specified : {found_types} . Please only set one."
|
233
|
-
)
|
234
|
-
|
235
|
-
def to_dict(self) -> Dict[str, Any]:
|
236
|
-
"""Return the configuration as a dictionary."""
|
237
|
-
return self.config
|
238
|
-
|
239
|
-
def to_yaml(self) -> str:
|
240
|
-
"""Return the configuration as a YAML string."""
|
241
|
-
return yaml.dump(self.config, default_flow_style=False)
|
242
|
-
|
243
|
-
def to_json(self) -> str:
|
244
|
-
"""Return the configuration as a JSON string."""
|
245
|
-
return json.dumps(self.config, indent=2)
|
246
|
-
|
247
|
-
@classmethod
|
248
|
-
def from_file(cls, file_path: str) -> "AppConfig":
|
249
|
-
"""Create a configuration from a file."""
|
250
|
-
if not os.path.exists(file_path):
|
251
|
-
raise AppConfigError(f"Configuration file '{file_path}' does not exist.")
|
252
|
-
|
253
|
-
with open(file_path, "r") as f:
|
254
|
-
try:
|
255
|
-
config_dict = yaml.safe_load(f)
|
256
|
-
except Exception as e:
|
257
|
-
raise AppConfigError(f"Failed to parse configuration file: {e}")
|
258
|
-
|
259
|
-
return cls(config_dict)
|
260
|
-
|
261
|
-
def update_from_cli_options(self, options):
|
262
|
-
"""
|
263
|
-
Update configuration from CLI options using the same logic as build_config_from_options.
|
264
|
-
This ensures consistent handling of CLI options whether they come from a config file
|
265
|
-
or direct CLI input.
|
266
|
-
"""
|
267
|
-
cli_config = build_config_from_options(options)
|
268
|
-
|
269
|
-
# Process each field using allow_union property
|
270
|
-
for key, value in cli_config.items():
|
271
|
-
if key in self.schema.get("properties", {}):
|
272
|
-
self._update_field(key, value)
|
273
|
-
|
274
|
-
return self
|
275
|
-
|
276
|
-
def _update_field(self, field_name, new_value):
|
277
|
-
"""Update a field based on its allow_union property."""
|
278
|
-
properties = self.schema.get("properties", {})
|
279
|
-
|
280
|
-
# Skip if field doesn't exist in schema
|
281
|
-
if field_name not in properties:
|
282
|
-
return
|
283
|
-
|
284
|
-
field_schema = properties[field_name]
|
285
|
-
allow_union = field_schema.get("allow_union", False)
|
286
|
-
|
287
|
-
# If field doesn't exist in config, just set it
|
288
|
-
if field_name not in self.config:
|
289
|
-
self.config[field_name] = new_value
|
290
|
-
return
|
291
|
-
|
292
|
-
# If allow_union is True, merge values based on type
|
293
|
-
if allow_union:
|
294
|
-
current_value = self.config[field_name]
|
295
|
-
|
296
|
-
if isinstance(current_value, list) and isinstance(new_value, list):
|
297
|
-
# For lists, append new items
|
298
|
-
self.config[field_name].extend(new_value)
|
299
|
-
elif isinstance(current_value, dict) and isinstance(new_value, dict):
|
300
|
-
# For dicts, update with new values
|
301
|
-
self.config[field_name].update(new_value)
|
302
|
-
else:
|
303
|
-
# For other types, replace with new value
|
304
|
-
self.config[field_name] = new_value
|
305
|
-
else:
|
306
|
-
raise AppConfigError(
|
307
|
-
f"Field '{field_name}' does not allow union. Current value: {self.config[field_name]}, new value: {new_value}"
|
308
|
-
)
|
outerbounds/apps/artifacts.py
DELETED
File without changes
|
outerbounds/apps/capsule.py
DELETED
@@ -1,382 +0,0 @@
|
|
1
|
-
import json
|
2
|
-
import os
|
3
|
-
import pathlib
|
4
|
-
import requests
|
5
|
-
import time
|
6
|
-
import shlex
|
7
|
-
from typing import Optional
|
8
|
-
from .utils import TODOException, safe_requests_wrapper
|
9
|
-
from .app_config import AppConfig, CAPSULE_DEBUG
|
10
|
-
|
11
|
-
|
12
|
-
class CapsuleStateMachine:
|
13
|
-
"""
|
14
|
-
Since capsules are a kinda newer concept, we will treat the state transitions based on the conditions and the
|
15
|
-
availability of certain fields in the status dictionary.
|
16
|
-
"""
|
17
|
-
|
18
|
-
CONDITIONS = ["Ready", "DeploymentReplicasAvailable", "IngressObjectReady"]
|
19
|
-
|
20
|
-
def __init__(self, capsule_id: str):
|
21
|
-
self._capsule_id = capsule_id
|
22
|
-
self._status_trail = []
|
23
|
-
|
24
|
-
def is_completely_new_capsule(self):
|
25
|
-
# This is a heuristic. Not a fully tested.
|
26
|
-
# If we create a completely new capsule then the status
|
27
|
-
# field might be a completely empty dictionary.
|
28
|
-
assert (
|
29
|
-
len(self._status_trail) > 0
|
30
|
-
), "status_trail cannot be none to infer if its a new capsule"
|
31
|
-
return self._empty_status(self._status_trail[0].get("status"))
|
32
|
-
|
33
|
-
def get_status_trail(self):
|
34
|
-
return self._status_trail
|
35
|
-
|
36
|
-
@staticmethod
|
37
|
-
def _empty_status(status):
|
38
|
-
if json.dumps(status) == "{}":
|
39
|
-
return True
|
40
|
-
return False
|
41
|
-
|
42
|
-
@staticmethod
|
43
|
-
def _parse_conditions(conditions):
|
44
|
-
curr_conditons = {}
|
45
|
-
for condition in conditions:
|
46
|
-
curr_conditons[condition["type"]] = condition["status"]
|
47
|
-
return curr_conditons
|
48
|
-
|
49
|
-
def add_status(self, status: dict):
|
50
|
-
assert type(status) == dict, "TODO: Make this check somewhere else"
|
51
|
-
self._status_trail.append({"timestamp": time.time(), "status": status})
|
52
|
-
|
53
|
-
@staticmethod
|
54
|
-
def _condition_change_emoji(previous_condition_status, current_condition_status):
|
55
|
-
if previous_condition_status == current_condition_status:
|
56
|
-
if previous_condition_status == "True":
|
57
|
-
return "✅"
|
58
|
-
else:
|
59
|
-
return "❌"
|
60
|
-
if previous_condition_status == "True" and current_condition_status == "False":
|
61
|
-
return "🔴 --> 🟢"
|
62
|
-
if previous_condition_status == "False" and current_condition_status == "True":
|
63
|
-
return "🚀"
|
64
|
-
return "🟡"
|
65
|
-
|
66
|
-
@property
|
67
|
-
def current_status(self):
|
68
|
-
return self._status_trail[-1].get("status")
|
69
|
-
|
70
|
-
@property
|
71
|
-
def out_of_cluster_url(self):
|
72
|
-
url = self.current_status.get("accessInfo", {}).get("outOfClusterURL", None)
|
73
|
-
if url is not None:
|
74
|
-
return f"https://{url}"
|
75
|
-
return None
|
76
|
-
|
77
|
-
@property
|
78
|
-
def in_cluster_url(self):
|
79
|
-
url = self.current_status.get("accessInfo", {}).get("inClusterURL", None)
|
80
|
-
if url is not None:
|
81
|
-
return f"https://{url}"
|
82
|
-
return None
|
83
|
-
|
84
|
-
@property
|
85
|
-
def ready_to_serve_traffic(self):
|
86
|
-
if self.current_status.get("readyToServeTraffic", False):
|
87
|
-
return any(
|
88
|
-
i is not None for i in [self.out_of_cluster_url, self.in_cluster_url]
|
89
|
-
)
|
90
|
-
return False
|
91
|
-
|
92
|
-
@property
|
93
|
-
def available_replicas(self):
|
94
|
-
return self.current_status.get("availableReplicas", 0)
|
95
|
-
|
96
|
-
def report_current_status(self, logger):
|
97
|
-
if len(self._status_trail) < 2:
|
98
|
-
return
|
99
|
-
previous_status, current_status = self._status_trail[-2].get(
|
100
|
-
"status"
|
101
|
-
), self._status_trail[-1].get("status")
|
102
|
-
if self._empty_status(current_status):
|
103
|
-
return
|
104
|
-
|
105
|
-
if self._empty_status(previous_status):
|
106
|
-
logger("💊 %s Deployment has started ... 🚀" % self._capsule_id)
|
107
|
-
return
|
108
|
-
|
109
|
-
def check_for_debug(self, state_dir: str):
|
110
|
-
if CAPSULE_DEBUG:
|
111
|
-
debug_path = os.path.join(
|
112
|
-
state_dir, f"debug_capsule_{self._capsule_id}.json"
|
113
|
-
)
|
114
|
-
with open(debug_path, "w") as f:
|
115
|
-
json.dump(self._status_trail, f, indent=4)
|
116
|
-
|
117
|
-
|
118
|
-
class CapsuleInput:
|
119
|
-
@classmethod
|
120
|
-
def construct_exec_command(cls, commands: list[str]):
|
121
|
-
commands = ["set -eEuo pipefail"] + commands
|
122
|
-
command_string = "\n".join(commands)
|
123
|
-
# First constuct a base64 encoded string of the quoted command
|
124
|
-
# One of the reasons we don't directly pass the command string to the backend with a `\n` join
|
125
|
-
# is because the backend controller doesnt play nice when the command can be a multi-line string.
|
126
|
-
# So we encode it to a base64 string and then decode it back to a command string at runtime to provide to
|
127
|
-
# `bash -c`. The ideal thing to have done is to run "bash -c {shlex.quote(command_string)}" and call it a day
|
128
|
-
# but the backend controller yields the following error:
|
129
|
-
# `error parsing template: error converting YAML to JSON: yaml: line 111: mapping values are not allowed in this context`
|
130
|
-
# So we go to great length to ensure the command is provided in base64 to avoid any issues with the backend controller.
|
131
|
-
import base64
|
132
|
-
|
133
|
-
encoded_command = base64.b64encode(command_string.encode()).decode()
|
134
|
-
decode_cmd = f"echo {encoded_command} | base64 -d > ./_ob_app_run.sh"
|
135
|
-
return (
|
136
|
-
f"bash -c '{decode_cmd} && cat ./_ob_app_run.sh && bash ./_ob_app_run.sh'"
|
137
|
-
)
|
138
|
-
|
139
|
-
@classmethod
|
140
|
-
def _marshal_environment_variables(cls, app_config: AppConfig):
|
141
|
-
envs = app_config.get_state("environment", {}).copy()
|
142
|
-
_return = []
|
143
|
-
for k, v in envs.items():
|
144
|
-
_v = v
|
145
|
-
if isinstance(v, dict):
|
146
|
-
_v = json.dumps(v)
|
147
|
-
elif isinstance(v, list):
|
148
|
-
_v = json.dumps(v)
|
149
|
-
else:
|
150
|
-
_v = str(v)
|
151
|
-
_return.append(
|
152
|
-
{
|
153
|
-
"name": k,
|
154
|
-
"value": _v,
|
155
|
-
}
|
156
|
-
)
|
157
|
-
return _return
|
158
|
-
|
159
|
-
@classmethod
|
160
|
-
def from_app_config(self, app_config: AppConfig):
|
161
|
-
gpu_resource = app_config.get_state("resources").get("gpu")
|
162
|
-
resources = {}
|
163
|
-
shared_memory = app_config.get_state("resources").get("shared_memory")
|
164
|
-
if gpu_resource:
|
165
|
-
resources["gpu"] = gpu_resource
|
166
|
-
if shared_memory:
|
167
|
-
resources["sharedMemory"] = shared_memory
|
168
|
-
|
169
|
-
_scheduling_config = {}
|
170
|
-
if app_config.get_state("compute_pools", None):
|
171
|
-
_scheduling_config["computePools"] = [
|
172
|
-
{"name": x} for x in app_config.get_state("compute_pools")
|
173
|
-
]
|
174
|
-
|
175
|
-
return {
|
176
|
-
"perimeter": app_config.get_state("perimeter"),
|
177
|
-
# "environmentPath": "environments/python3.9",
|
178
|
-
"codePackagePath": app_config.get_state("code_package_url"),
|
179
|
-
"image": app_config.get_state("image"),
|
180
|
-
"resourceIntegrations": [
|
181
|
-
{"name": x} for x in app_config.get_state("secrets", [])
|
182
|
-
],
|
183
|
-
"resourceConfig": {
|
184
|
-
"cpu": str(app_config.get_state("resources").get("cpu")),
|
185
|
-
"memory": str(app_config.get_state("resources").get("memory")),
|
186
|
-
"ephemeralStorage": str(app_config.get_state("resources").get("disk")),
|
187
|
-
**resources,
|
188
|
-
},
|
189
|
-
"autoscalingConfig": { # TODO [FIX ME]: Make this configurable from top level
|
190
|
-
"minReplicas": 1,
|
191
|
-
"maxReplicas": 1,
|
192
|
-
},
|
193
|
-
**_scheduling_config,
|
194
|
-
"containerStartupConfig": {
|
195
|
-
"entrypoint": self.construct_exec_command(
|
196
|
-
app_config.get_state("commands")
|
197
|
-
)
|
198
|
-
},
|
199
|
-
"environmentVariables": self._marshal_environment_variables(app_config),
|
200
|
-
# "assets": [{"name": "startup-script.sh"}],
|
201
|
-
"authConfig": {
|
202
|
-
"authType": app_config.get_state("auth").get("type"),
|
203
|
-
"publicToDeployment": app_config.get_state("auth").get("public"),
|
204
|
-
},
|
205
|
-
"tags": app_config.get_state("tags", []),
|
206
|
-
"port": app_config.get_state("port"),
|
207
|
-
"displayName": app_config.get_state("name"),
|
208
|
-
}
|
209
|
-
|
210
|
-
|
211
|
-
def create_capsule(app_config: AppConfig, api_url: str, request_headers: dict):
|
212
|
-
_data = json.dumps(CapsuleInput.from_app_config(app_config))
|
213
|
-
response = safe_requests_wrapper(
|
214
|
-
requests.post,
|
215
|
-
api_url,
|
216
|
-
data=_data,
|
217
|
-
headers=request_headers,
|
218
|
-
conn_error_retries=2,
|
219
|
-
retryable_status_codes=[409], # todo : verify me
|
220
|
-
)
|
221
|
-
|
222
|
-
if response.status_code > 400:
|
223
|
-
raise TODOException(
|
224
|
-
f"Failed to create capsule: {response.status_code} {response.text}"
|
225
|
-
)
|
226
|
-
return response.json()
|
227
|
-
|
228
|
-
|
229
|
-
def list_capsules(api_url: str, request_headers: dict):
|
230
|
-
response = safe_requests_wrapper(
|
231
|
-
requests.get,
|
232
|
-
api_url,
|
233
|
-
headers=request_headers,
|
234
|
-
retryable_status_codes=[409], # todo : verify me
|
235
|
-
conn_error_retries=3,
|
236
|
-
)
|
237
|
-
if response.status_code > 400:
|
238
|
-
raise TODOException(
|
239
|
-
f"Failed to list capsules: {response.status_code} {response.text}"
|
240
|
-
)
|
241
|
-
return response.json()
|
242
|
-
|
243
|
-
|
244
|
-
def get_capsule(capsule_id: str, api_url: str, request_headers: dict):
|
245
|
-
# params = {"instance_id": capsule_id}
|
246
|
-
url = os.path.join(api_url, capsule_id)
|
247
|
-
response = safe_requests_wrapper(
|
248
|
-
requests.get,
|
249
|
-
url,
|
250
|
-
headers=request_headers,
|
251
|
-
retryable_status_codes=[409, 404], # todo : verify me
|
252
|
-
conn_error_retries=3,
|
253
|
-
)
|
254
|
-
if response.status_code > 400:
|
255
|
-
raise TODOException(
|
256
|
-
f"Failed to get capsule: {response.status_code} {response.text}"
|
257
|
-
)
|
258
|
-
return response.json()
|
259
|
-
|
260
|
-
|
261
|
-
def delete_capsule(capsule_id: str, api_url: str, request_headers: dict):
|
262
|
-
response = safe_requests_wrapper(
|
263
|
-
requests.delete,
|
264
|
-
os.path.join(api_url, capsule_id),
|
265
|
-
headers=request_headers,
|
266
|
-
retryable_status_codes=[409], # todo : verify me
|
267
|
-
)
|
268
|
-
if response.status_code > 400:
|
269
|
-
raise TODOException(
|
270
|
-
f"Failed to delete capsule: {response.status_code} {response.text}"
|
271
|
-
)
|
272
|
-
|
273
|
-
return response.json()
|
274
|
-
|
275
|
-
|
276
|
-
class StatusTrail:
|
277
|
-
def __init__(self, capsule_id: str):
|
278
|
-
self._capsule_id = capsule_id
|
279
|
-
self._status_trail = []
|
280
|
-
|
281
|
-
def add_status(self, status: dict):
|
282
|
-
self._status_trail.append({"timestamp": time.time(), "status": status})
|
283
|
-
|
284
|
-
def get_status_trail(self):
|
285
|
-
return self._status_trail
|
286
|
-
|
287
|
-
|
288
|
-
class Capsule:
|
289
|
-
|
290
|
-
status: CapsuleStateMachine
|
291
|
-
|
292
|
-
identifier = None
|
293
|
-
|
294
|
-
# TODO: Current default timeout is very large of 5 minutes. Ideally we should have finished the deployed in less than 1 minutes.
|
295
|
-
def __init__(
|
296
|
-
self,
|
297
|
-
app_config: AppConfig,
|
298
|
-
base_url: str,
|
299
|
-
create_timeout: int = 60 * 5,
|
300
|
-
debug_dir: Optional[str] = None,
|
301
|
-
):
|
302
|
-
self._app_config = app_config
|
303
|
-
self._base_url = os.path.join(
|
304
|
-
base_url,
|
305
|
-
"v1",
|
306
|
-
"perimeters",
|
307
|
-
app_config.get_state("perimeter"),
|
308
|
-
"capsules",
|
309
|
-
)
|
310
|
-
self._create_timeout = create_timeout
|
311
|
-
self._debug_dir = debug_dir
|
312
|
-
from metaflow.metaflow_config import SERVICE_HEADERS
|
313
|
-
|
314
|
-
self._request_headers = {
|
315
|
-
**{"Content-Type": "application/json", "Connection": "keep-alive"},
|
316
|
-
**(SERVICE_HEADERS or {}),
|
317
|
-
}
|
318
|
-
|
319
|
-
@property
|
320
|
-
def capsule_type(self):
|
321
|
-
auth_type = self._app_config.get_state("auth", {}).get("type", "SSO")
|
322
|
-
if auth_type == "SSO":
|
323
|
-
return "App"
|
324
|
-
elif auth_type == "API":
|
325
|
-
return "Endpoint"
|
326
|
-
else:
|
327
|
-
raise TODOException(f"Unknown auth type: {auth_type}")
|
328
|
-
|
329
|
-
@property
|
330
|
-
def name(self):
|
331
|
-
return self._app_config.get_state("name")
|
332
|
-
|
333
|
-
def create_input(self):
|
334
|
-
return CapsuleInput.from_app_config(self._app_config)
|
335
|
-
|
336
|
-
def create(self):
|
337
|
-
capsule_response = create_capsule(
|
338
|
-
self._app_config, self._base_url, self._request_headers
|
339
|
-
)
|
340
|
-
self.identifier = capsule_response.get("id")
|
341
|
-
return self.identifier
|
342
|
-
|
343
|
-
def get(self):
|
344
|
-
# TODO: [FIX ME]: This need to work in the reverse lookup way too.
|
345
|
-
return get_capsule(self.identifier, self._base_url, self._request_headers)
|
346
|
-
|
347
|
-
def wait_for_terminal_state(self, logger=print):
|
348
|
-
state_machine = CapsuleStateMachine(self.identifier)
|
349
|
-
for i in range(self._create_timeout):
|
350
|
-
capsule_response = self.get()
|
351
|
-
state_machine.add_status(capsule_response.get("status", {}))
|
352
|
-
time.sleep(1)
|
353
|
-
if state_machine.is_completely_new_capsule() and i == 0:
|
354
|
-
logger(
|
355
|
-
"🔧 🛠️ Creating new %s with id %s"
|
356
|
-
% (self.capsule_type.lower(), self.identifier)
|
357
|
-
)
|
358
|
-
elif not state_machine.is_completely_new_capsule() and i == 0:
|
359
|
-
logger("🔧 🛠️ Updating %s %s" % (self.capsule_type, self.identifier))
|
360
|
-
|
361
|
-
state_machine.report_current_status(logger)
|
362
|
-
if state_machine.ready_to_serve_traffic:
|
363
|
-
logger(
|
364
|
-
"💊 %s %s is ready to serve traffic on the URL: %s"
|
365
|
-
% (
|
366
|
-
self.capsule_type,
|
367
|
-
self.identifier,
|
368
|
-
state_machine.out_of_cluster_url,
|
369
|
-
),
|
370
|
-
)
|
371
|
-
self.status = state_machine
|
372
|
-
break
|
373
|
-
if self._debug_dir:
|
374
|
-
state_machine.check_for_debug(self._debug_dir)
|
375
|
-
|
376
|
-
return capsule_response
|
377
|
-
|
378
|
-
def list(self):
|
379
|
-
return list_capsules(self._base_url, self._request_headers)
|
380
|
-
|
381
|
-
def delete(self):
|
382
|
-
return delete_capsule(self.identifier, self._base_url, self._request_headers)
|