outerbounds 0.3.176rc6__py3-none-any.whl → 0.3.178__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- outerbounds/command_groups/apps_cli.py +6 -2
- outerbounds/command_groups/cli.py +0 -2
- {outerbounds-0.3.176rc6.dist-info → outerbounds-0.3.178.dist-info}/METADATA +4 -4
- {outerbounds-0.3.176rc6.dist-info → outerbounds-0.3.178.dist-info}/RECORD +6 -23
- outerbounds/apps/__init__.py +0 -0
- outerbounds/apps/app_cli.py +0 -697
- outerbounds/apps/app_config.py +0 -293
- outerbounds/apps/artifacts.py +0 -0
- outerbounds/apps/capsule.py +0 -478
- outerbounds/apps/cli_to_config.py +0 -91
- outerbounds/apps/code_package/__init__.py +0 -3
- outerbounds/apps/code_package/code_packager.py +0 -610
- outerbounds/apps/code_package/examples.py +0 -125
- outerbounds/apps/config_schema.yaml +0 -259
- outerbounds/apps/dependencies.py +0 -115
- outerbounds/apps/deployer.py +0 -0
- outerbounds/apps/experimental/__init__.py +0 -103
- outerbounds/apps/secrets.py +0 -164
- outerbounds/apps/utils.py +0 -254
- outerbounds/apps/validations.py +0 -34
- outerbounds/command_groups/flowprojects_cli.py +0 -137
- {outerbounds-0.3.176rc6.dist-info → outerbounds-0.3.178.dist-info}/WHEEL +0 -0
- {outerbounds-0.3.176rc6.dist-info → outerbounds-0.3.178.dist-info}/entry_points.txt +0 -0
outerbounds/apps/capsule.py
DELETED
@@ -1,478 +0,0 @@
|
|
1
|
-
import json
|
2
|
-
import os
|
3
|
-
import pathlib
|
4
|
-
import requests
|
5
|
-
import time
|
6
|
-
import shlex
|
7
|
-
from typing import Optional
|
8
|
-
from .utils import TODOException, safe_requests_wrapper
|
9
|
-
from .app_config import AppConfig, CAPSULE_DEBUG, AuthType
|
10
|
-
from . import experimental
|
11
|
-
|
12
|
-
|
13
|
-
class CapsuleStateMachine:
|
14
|
-
"""
|
15
|
-
Since capsules are a kinda newer concept, we will treat the state transitions based on the conditions and the
|
16
|
-
availability of certain fields in the status dictionary.
|
17
|
-
"""
|
18
|
-
|
19
|
-
CONDITIONS = ["Ready", "DeploymentReplicasAvailable", "IngressObjectReady"]
|
20
|
-
|
21
|
-
def __init__(self, capsule_id: str):
|
22
|
-
self._capsule_id = capsule_id
|
23
|
-
self._status_trail = []
|
24
|
-
|
25
|
-
def is_completely_new_capsule(self):
|
26
|
-
# This is a heuristic. Not a fully tested.
|
27
|
-
# If we create a completely new capsule then the status
|
28
|
-
# field might be a completely empty dictionary.
|
29
|
-
assert (
|
30
|
-
len(self._status_trail) > 0
|
31
|
-
), "status_trail cannot be none to infer if its a new capsule"
|
32
|
-
return self._empty_status(self._status_trail[0].get("status"))
|
33
|
-
|
34
|
-
def get_status_trail(self):
|
35
|
-
return self._status_trail
|
36
|
-
|
37
|
-
@staticmethod
|
38
|
-
def _empty_status(status):
|
39
|
-
if json.dumps(status) == "{}":
|
40
|
-
return True
|
41
|
-
return False
|
42
|
-
|
43
|
-
@staticmethod
|
44
|
-
def _parse_conditions(conditions):
|
45
|
-
curr_conditons = {}
|
46
|
-
for condition in conditions:
|
47
|
-
curr_conditons[condition["type"]] = condition["status"]
|
48
|
-
return curr_conditons
|
49
|
-
|
50
|
-
def add_status(self, status: dict):
|
51
|
-
assert type(status) == dict, "TODO: Make this check somewhere else"
|
52
|
-
self._status_trail.append({"timestamp": time.time(), "status": status})
|
53
|
-
|
54
|
-
@staticmethod
|
55
|
-
def _condition_change_emoji(previous_condition_status, current_condition_status):
|
56
|
-
if previous_condition_status == current_condition_status:
|
57
|
-
if previous_condition_status == "True":
|
58
|
-
return "✅"
|
59
|
-
else:
|
60
|
-
return "❌"
|
61
|
-
if previous_condition_status == "True" and current_condition_status == "False":
|
62
|
-
return "🔴 --> 🟢"
|
63
|
-
if previous_condition_status == "False" and current_condition_status == "True":
|
64
|
-
return "🚀"
|
65
|
-
return "🟡"
|
66
|
-
|
67
|
-
@property
|
68
|
-
def current_status(self):
|
69
|
-
return self._status_trail[-1].get("status")
|
70
|
-
|
71
|
-
@property
|
72
|
-
def out_of_cluster_url(self):
|
73
|
-
access_info = self.current_status.get("accessInfo", {}) or {}
|
74
|
-
url = access_info.get("outOfClusterURL", None)
|
75
|
-
if url is not None:
|
76
|
-
return f"https://{url}"
|
77
|
-
return None
|
78
|
-
|
79
|
-
@property
|
80
|
-
def in_cluster_url(self):
|
81
|
-
access_info = self.current_status.get("accessInfo", {}) or {}
|
82
|
-
url = access_info.get("inClusterURL", None)
|
83
|
-
if url is not None:
|
84
|
-
return f"https://{url}"
|
85
|
-
return None
|
86
|
-
|
87
|
-
@property
|
88
|
-
def ready_to_serve_traffic(self):
|
89
|
-
if self.current_status.get("readyToServeTraffic", False):
|
90
|
-
return any(
|
91
|
-
i is not None for i in [self.out_of_cluster_url, self.in_cluster_url]
|
92
|
-
)
|
93
|
-
return False
|
94
|
-
|
95
|
-
@property
|
96
|
-
def available_replicas(self):
|
97
|
-
return self.current_status.get("availableReplicas", 0)
|
98
|
-
|
99
|
-
def report_current_status(self, logger):
|
100
|
-
if len(self._status_trail) < 2:
|
101
|
-
return
|
102
|
-
previous_status, current_status = self._status_trail[-2].get(
|
103
|
-
"status"
|
104
|
-
), self._status_trail[-1].get("status")
|
105
|
-
if self._empty_status(current_status):
|
106
|
-
return
|
107
|
-
|
108
|
-
if self._empty_status(previous_status):
|
109
|
-
logger("💊 %s Deployment has started ... 🚀" % self._capsule_id)
|
110
|
-
return
|
111
|
-
|
112
|
-
def check_for_debug(self, state_dir: str):
|
113
|
-
if CAPSULE_DEBUG:
|
114
|
-
debug_path = os.path.join(
|
115
|
-
state_dir, f"debug_capsule_{self._capsule_id}.json"
|
116
|
-
)
|
117
|
-
with open(debug_path, "w") as f:
|
118
|
-
json.dump(self._status_trail, f, indent=4)
|
119
|
-
|
120
|
-
|
121
|
-
class CapsuleInput:
|
122
|
-
@classmethod
|
123
|
-
def construct_exec_command(cls, commands: list[str]):
|
124
|
-
commands = ["set -eEuo pipefail"] + commands
|
125
|
-
command_string = "\n".join(commands)
|
126
|
-
# First constuct a base64 encoded string of the quoted command
|
127
|
-
# One of the reasons we don't directly pass the command string to the backend with a `\n` join
|
128
|
-
# is because the backend controller doesnt play nice when the command can be a multi-line string.
|
129
|
-
# So we encode it to a base64 string and then decode it back to a command string at runtime to provide to
|
130
|
-
# `bash -c`. The ideal thing to have done is to run "bash -c {shlex.quote(command_string)}" and call it a day
|
131
|
-
# but the backend controller yields the following error:
|
132
|
-
# `error parsing template: error converting YAML to JSON: yaml: line 111: mapping values are not allowed in this context`
|
133
|
-
# So we go to great length to ensure the command is provided in base64 to avoid any issues with the backend controller.
|
134
|
-
import base64
|
135
|
-
|
136
|
-
encoded_command = base64.b64encode(command_string.encode()).decode()
|
137
|
-
decode_cmd = f"echo {encoded_command} | base64 -d > ./_ob_app_run.sh"
|
138
|
-
return (
|
139
|
-
f"bash -c '{decode_cmd} && cat ./_ob_app_run.sh && bash ./_ob_app_run.sh'"
|
140
|
-
)
|
141
|
-
|
142
|
-
@classmethod
|
143
|
-
def _marshal_environment_variables(cls, app_config: AppConfig):
|
144
|
-
envs = app_config.get_state("environment", {}).copy()
|
145
|
-
_return = []
|
146
|
-
for k, v in envs.items():
|
147
|
-
_v = v
|
148
|
-
if isinstance(v, dict):
|
149
|
-
_v = json.dumps(v)
|
150
|
-
elif isinstance(v, list):
|
151
|
-
_v = json.dumps(v)
|
152
|
-
else:
|
153
|
-
_v = str(v)
|
154
|
-
_return.append(
|
155
|
-
{
|
156
|
-
"name": k,
|
157
|
-
"value": _v,
|
158
|
-
}
|
159
|
-
)
|
160
|
-
return _return
|
161
|
-
|
162
|
-
@classmethod
|
163
|
-
def from_app_config(self, app_config: AppConfig):
|
164
|
-
gpu_resource = app_config.get_state("resources").get("gpu")
|
165
|
-
resources = {}
|
166
|
-
shared_memory = app_config.get_state("resources").get("shared_memory")
|
167
|
-
if gpu_resource:
|
168
|
-
resources["gpu"] = gpu_resource
|
169
|
-
if shared_memory:
|
170
|
-
resources["sharedMemory"] = shared_memory
|
171
|
-
|
172
|
-
_scheduling_config = {}
|
173
|
-
if app_config.get_state("compute_pools", None):
|
174
|
-
_scheduling_config["schedulingConfig"] = {
|
175
|
-
"computePools": [
|
176
|
-
{"name": x} for x in app_config.get_state("compute_pools")
|
177
|
-
]
|
178
|
-
}
|
179
|
-
_description = app_config.get_state("description")
|
180
|
-
_app_type = app_config.get_state("app_type")
|
181
|
-
_final_info = {}
|
182
|
-
if _description:
|
183
|
-
_final_info["description"] = _description
|
184
|
-
if _app_type:
|
185
|
-
_final_info["endpointType"] = _app_type
|
186
|
-
return {
|
187
|
-
"perimeter": app_config.get_state("perimeter"),
|
188
|
-
**_final_info,
|
189
|
-
"codePackagePath": app_config.get_state("code_package_url"),
|
190
|
-
"image": app_config.get_state("image"),
|
191
|
-
"resourceIntegrations": [
|
192
|
-
{"name": x} for x in app_config.get_state("secrets", [])
|
193
|
-
],
|
194
|
-
"resourceConfig": {
|
195
|
-
"cpu": str(app_config.get_state("resources").get("cpu")),
|
196
|
-
"memory": str(app_config.get_state("resources").get("memory")),
|
197
|
-
"ephemeralStorage": str(app_config.get_state("resources").get("disk")),
|
198
|
-
**resources,
|
199
|
-
},
|
200
|
-
"autoscalingConfig": {
|
201
|
-
"minReplicas": app_config.get_state("replicas", {}).get("min", 1),
|
202
|
-
"maxReplicas": app_config.get_state("replicas", {}).get("max", 1),
|
203
|
-
},
|
204
|
-
**_scheduling_config,
|
205
|
-
"containerStartupConfig": {
|
206
|
-
"entrypoint": self.construct_exec_command(
|
207
|
-
app_config.get_state("commands")
|
208
|
-
)
|
209
|
-
},
|
210
|
-
"environmentVariables": self._marshal_environment_variables(app_config),
|
211
|
-
# "assets": [{"name": "startup-script.sh"}],
|
212
|
-
"authConfig": {
|
213
|
-
"authType": app_config.get_state("auth").get("type"),
|
214
|
-
"publicToDeployment": app_config.get_state("auth").get("public"),
|
215
|
-
},
|
216
|
-
"tags": [
|
217
|
-
dict(key=k, value=v)
|
218
|
-
for tag in app_config.get_state("tags", [])
|
219
|
-
for k, v in tag.items()
|
220
|
-
],
|
221
|
-
"port": app_config.get_state("port"),
|
222
|
-
"displayName": app_config.get_state("name"),
|
223
|
-
}
|
224
|
-
|
225
|
-
|
226
|
-
def create_capsule(capsule_input: dict, api_url: str, request_headers: dict):
|
227
|
-
_data = json.dumps(capsule_input)
|
228
|
-
response = safe_requests_wrapper(
|
229
|
-
requests.post,
|
230
|
-
api_url,
|
231
|
-
data=_data,
|
232
|
-
headers=request_headers,
|
233
|
-
conn_error_retries=2,
|
234
|
-
retryable_status_codes=[409], # todo : verify me
|
235
|
-
)
|
236
|
-
|
237
|
-
if response.status_code >= 400:
|
238
|
-
raise TODOException(
|
239
|
-
f"Failed to create capsule: {response.status_code} {response.text}"
|
240
|
-
)
|
241
|
-
return response.json()
|
242
|
-
|
243
|
-
|
244
|
-
def list_capsules(api_url: str, request_headers: dict):
|
245
|
-
response = safe_requests_wrapper(
|
246
|
-
requests.get,
|
247
|
-
api_url,
|
248
|
-
headers=request_headers,
|
249
|
-
retryable_status_codes=[409], # todo : verify me
|
250
|
-
conn_error_retries=3,
|
251
|
-
)
|
252
|
-
if response.status_code >= 400:
|
253
|
-
raise TODOException(
|
254
|
-
f"Failed to list capsules: {response.status_code} {response.text}"
|
255
|
-
)
|
256
|
-
return response.json()
|
257
|
-
|
258
|
-
|
259
|
-
def get_capsule(capsule_id: str, api_url: str, request_headers: dict):
|
260
|
-
# params = {"instance_id": capsule_id}
|
261
|
-
url = os.path.join(api_url, capsule_id)
|
262
|
-
response = safe_requests_wrapper(
|
263
|
-
requests.get,
|
264
|
-
url,
|
265
|
-
headers=request_headers,
|
266
|
-
retryable_status_codes=[409, 404], # todo : verify me
|
267
|
-
conn_error_retries=3,
|
268
|
-
)
|
269
|
-
if response.status_code >= 400:
|
270
|
-
raise TODOException(
|
271
|
-
f"Failed to get capsule: {response.status_code} {response.text}"
|
272
|
-
)
|
273
|
-
return response.json()
|
274
|
-
|
275
|
-
|
276
|
-
def delete_capsule(capsule_id: str, api_url: str, request_headers: dict):
|
277
|
-
_url = os.path.join(api_url, capsule_id)
|
278
|
-
response = safe_requests_wrapper(
|
279
|
-
requests.delete,
|
280
|
-
_url,
|
281
|
-
headers=request_headers,
|
282
|
-
retryable_status_codes=[409], # todo : verify me
|
283
|
-
)
|
284
|
-
if response.status_code >= 400:
|
285
|
-
raise TODOException(
|
286
|
-
f"Failed to delete capsule: {response.status_code} {response.text}"
|
287
|
-
)
|
288
|
-
|
289
|
-
if response.status_code == 200:
|
290
|
-
return True
|
291
|
-
return False
|
292
|
-
|
293
|
-
|
294
|
-
def list_capsules(api_url: str, request_headers: dict):
|
295
|
-
response = safe_requests_wrapper(
|
296
|
-
requests.get,
|
297
|
-
api_url,
|
298
|
-
headers=request_headers,
|
299
|
-
)
|
300
|
-
if response.status_code >= 400:
|
301
|
-
raise TODOException(
|
302
|
-
f"Failed to list capsules: {response.status_code} {response.text}"
|
303
|
-
)
|
304
|
-
return response.json()
|
305
|
-
|
306
|
-
|
307
|
-
def list_and_filter_capsules(
|
308
|
-
api_url, perimeter, project, branch, name, tags, auth_type, capsule_id
|
309
|
-
):
|
310
|
-
capsules = Capsule.list(api_url, perimeter)
|
311
|
-
|
312
|
-
def _tags_match(tags, key, value):
|
313
|
-
for t in tags:
|
314
|
-
if t["key"] == key and t["value"] == value:
|
315
|
-
return True
|
316
|
-
return False
|
317
|
-
|
318
|
-
def _all_tags_match(tags, tags_to_match):
|
319
|
-
return all([_tags_match(tags, t["key"], t["value"]) for t in tags_to_match])
|
320
|
-
|
321
|
-
def _filter_capsules(capsules, project, branch, name, tags, auth_type, capsule_id):
|
322
|
-
_filtered_capsules = []
|
323
|
-
for capsule in capsules:
|
324
|
-
set_tags = capsule.get("spec", {}).get("tags", [])
|
325
|
-
display_name = capsule.get("spec", {}).get("displayName", None)
|
326
|
-
set_id = capsule.get("id", None)
|
327
|
-
set_auth_type = (
|
328
|
-
capsule.get("spec", {}).get("authConfig", {}).get("authType", None)
|
329
|
-
)
|
330
|
-
|
331
|
-
if auth_type and set_auth_type != auth_type:
|
332
|
-
continue
|
333
|
-
if project and not _tags_match(set_tags, "project", project):
|
334
|
-
continue
|
335
|
-
if branch and not _tags_match(set_tags, "branch", branch):
|
336
|
-
continue
|
337
|
-
if name and display_name != name:
|
338
|
-
continue
|
339
|
-
if tags and not _all_tags_match(set_tags, tags):
|
340
|
-
continue
|
341
|
-
if capsule_id and set_id != capsule_id:
|
342
|
-
continue
|
343
|
-
|
344
|
-
_filtered_capsules.append(capsule)
|
345
|
-
return _filtered_capsules
|
346
|
-
|
347
|
-
return _filter_capsules(
|
348
|
-
capsules, project, branch, name, tags, auth_type, capsule_id
|
349
|
-
)
|
350
|
-
|
351
|
-
|
352
|
-
class Capsule:
|
353
|
-
|
354
|
-
status: CapsuleStateMachine
|
355
|
-
|
356
|
-
identifier = None
|
357
|
-
|
358
|
-
@classmethod
|
359
|
-
def list(cls, base_url: str, perimeter: str):
|
360
|
-
base_url = cls._create_base_url(base_url, perimeter)
|
361
|
-
from metaflow.metaflow_config import SERVICE_HEADERS
|
362
|
-
|
363
|
-
request_headers = {
|
364
|
-
**{"Content-Type": "application/json", "Connection": "keep-alive"},
|
365
|
-
**(SERVICE_HEADERS or {}),
|
366
|
-
}
|
367
|
-
_capsules = list_capsules(base_url, request_headers)
|
368
|
-
if "capsules" not in _capsules:
|
369
|
-
raise TODOException(f"Failed to list capsules")
|
370
|
-
return _capsules.get("capsules", []) or []
|
371
|
-
|
372
|
-
@classmethod
|
373
|
-
def delete(cls, identifier: str, base_url: str, perimeter: str):
|
374
|
-
base_url = cls._create_base_url(base_url, perimeter)
|
375
|
-
from metaflow.metaflow_config import SERVICE_HEADERS
|
376
|
-
|
377
|
-
request_headers = {
|
378
|
-
**{"Content-Type": "application/json", "Connection": "keep-alive"},
|
379
|
-
**(SERVICE_HEADERS or {}),
|
380
|
-
}
|
381
|
-
return delete_capsule(identifier, base_url, request_headers)
|
382
|
-
|
383
|
-
@classmethod
|
384
|
-
def _create_base_url(
|
385
|
-
cls,
|
386
|
-
base_url: str,
|
387
|
-
perimeter: str,
|
388
|
-
):
|
389
|
-
return os.path.join(
|
390
|
-
base_url,
|
391
|
-
"v1",
|
392
|
-
"perimeters",
|
393
|
-
perimeter,
|
394
|
-
"capsules",
|
395
|
-
)
|
396
|
-
|
397
|
-
# TODO: Current default timeout is very large of 5 minutes. Ideally we should have finished the deployed in less than 1 minutes.
|
398
|
-
def __init__(
|
399
|
-
self,
|
400
|
-
app_config: AppConfig,
|
401
|
-
base_url: str,
|
402
|
-
create_timeout: int = 60 * 5,
|
403
|
-
debug_dir: Optional[str] = None,
|
404
|
-
):
|
405
|
-
self._app_config = app_config
|
406
|
-
self._base_url = self._create_base_url(
|
407
|
-
base_url,
|
408
|
-
app_config.get_state("perimeter"),
|
409
|
-
)
|
410
|
-
self._create_timeout = create_timeout
|
411
|
-
self._debug_dir = debug_dir
|
412
|
-
from metaflow.metaflow_config import SERVICE_HEADERS
|
413
|
-
|
414
|
-
self._request_headers = {
|
415
|
-
**{"Content-Type": "application/json", "Connection": "keep-alive"},
|
416
|
-
**(SERVICE_HEADERS or {}),
|
417
|
-
}
|
418
|
-
|
419
|
-
@property
|
420
|
-
def capsule_type(self):
|
421
|
-
auth_type = self._app_config.get_state("auth", {}).get("type", AuthType.default)
|
422
|
-
if auth_type == AuthType.BROWSER:
|
423
|
-
return "App"
|
424
|
-
elif auth_type == AuthType.API:
|
425
|
-
return "Endpoint"
|
426
|
-
else:
|
427
|
-
raise TODOException(f"Unknown auth type: {auth_type}")
|
428
|
-
|
429
|
-
@property
|
430
|
-
def name(self):
|
431
|
-
return self._app_config.get_state("name")
|
432
|
-
|
433
|
-
def create_input(self):
|
434
|
-
return experimental.capsule_input_overrides(
|
435
|
-
self._app_config, CapsuleInput.from_app_config(self._app_config)
|
436
|
-
)
|
437
|
-
|
438
|
-
def create(self):
|
439
|
-
capsule_response = create_capsule(
|
440
|
-
self.create_input(), self._base_url, self._request_headers
|
441
|
-
)
|
442
|
-
self.identifier = capsule_response.get("id")
|
443
|
-
return self.identifier
|
444
|
-
|
445
|
-
def get(self):
|
446
|
-
# TODO: [FIX ME]: This need to work in the reverse lookup way too.
|
447
|
-
return get_capsule(self.identifier, self._base_url, self._request_headers)
|
448
|
-
|
449
|
-
def wait_for_terminal_state(self, logger=print):
|
450
|
-
state_machine = CapsuleStateMachine(self.identifier)
|
451
|
-
logger(
|
452
|
-
"💊 Waiting for %s %s to be ready to serve traffic"
|
453
|
-
% (self.capsule_type.lower(), self.identifier)
|
454
|
-
)
|
455
|
-
self.status = state_machine
|
456
|
-
for i in range(self._create_timeout):
|
457
|
-
capsule_response = self.get()
|
458
|
-
state_machine.add_status(capsule_response.get("status", {}))
|
459
|
-
time.sleep(1)
|
460
|
-
state_machine.report_current_status(logger)
|
461
|
-
if state_machine.ready_to_serve_traffic:
|
462
|
-
logger(
|
463
|
-
"💊 %s %s is ready to serve traffic on the URL: %s"
|
464
|
-
% (
|
465
|
-
self.capsule_type,
|
466
|
-
self.identifier,
|
467
|
-
state_machine.out_of_cluster_url,
|
468
|
-
),
|
469
|
-
)
|
470
|
-
break
|
471
|
-
if self._debug_dir:
|
472
|
-
state_machine.check_for_debug(self._debug_dir)
|
473
|
-
|
474
|
-
if not self.status.ready_to_serve_traffic:
|
475
|
-
raise TODOException(
|
476
|
-
f"Capsule {self.identifier} failed to be ready to serve traffic"
|
477
|
-
)
|
478
|
-
return capsule_response
|
@@ -1,91 +0,0 @@
|
|
1
|
-
from . import experimental
|
2
|
-
|
3
|
-
|
4
|
-
def build_config_from_options(options):
|
5
|
-
"""Build an app configuration from CLI options."""
|
6
|
-
config = {}
|
7
|
-
|
8
|
-
# Set basic fields
|
9
|
-
for key in ["name", "port", "image", "compute_pools", "description", "app_type"]:
|
10
|
-
if options.get(key):
|
11
|
-
config[key] = options[key]
|
12
|
-
|
13
|
-
# Handle list fields
|
14
|
-
if options.get("tags"):
|
15
|
-
config["tags"] = list(options["tags"])
|
16
|
-
if options.get("secrets"):
|
17
|
-
config["secrets"] = list(options["secrets"])
|
18
|
-
|
19
|
-
# Build env dict from key-value pairs
|
20
|
-
if options.get("envs"):
|
21
|
-
env_dict = {}
|
22
|
-
for env_item in options["envs"]:
|
23
|
-
env_dict.update(env_item)
|
24
|
-
config["environment"] = env_dict
|
25
|
-
|
26
|
-
# Handle dependencies (only one type allowed)
|
27
|
-
deps = {}
|
28
|
-
if options.get("dep_from_task"):
|
29
|
-
deps["from_task"] = options["dep_from_task"]
|
30
|
-
elif options.get("dep_from_run"):
|
31
|
-
deps["from_run"] = options["dep_from_run"]
|
32
|
-
elif options.get("dep_from_requirements"):
|
33
|
-
deps["from_requirements_file"] = options["dep_from_requirements"]
|
34
|
-
elif options.get("dep_from_pyproject"):
|
35
|
-
deps["from_pyproject_toml"] = options["dep_from_pyproject"]
|
36
|
-
|
37
|
-
# TODO: [FIX ME]: Get better CLI abstraction for pypi/conda dependencies
|
38
|
-
|
39
|
-
if deps:
|
40
|
-
config["dependencies"] = deps
|
41
|
-
|
42
|
-
# Handle resources
|
43
|
-
resources = {}
|
44
|
-
for key in ["cpu", "memory", "gpu", "storage"]:
|
45
|
-
if options.get(key):
|
46
|
-
resources[key] = options[key]
|
47
|
-
|
48
|
-
if resources:
|
49
|
-
config["resources"] = resources
|
50
|
-
|
51
|
-
# Handle health check options
|
52
|
-
health_check = {}
|
53
|
-
if options.get("health_check_enabled") is not None:
|
54
|
-
health_check["enabled"] = options["health_check_enabled"]
|
55
|
-
if options.get("health_check_path"):
|
56
|
-
health_check["path"] = options["health_check_path"]
|
57
|
-
if options.get("health_check_initial_delay") is not None:
|
58
|
-
health_check["initial_delay_seconds"] = options["health_check_initial_delay"]
|
59
|
-
if options.get("health_check_period") is not None:
|
60
|
-
health_check["period_seconds"] = options["health_check_period"]
|
61
|
-
|
62
|
-
if health_check:
|
63
|
-
config["health_check"] = health_check
|
64
|
-
|
65
|
-
# Handle package options
|
66
|
-
if options.get("package_src_path") or options.get("package_suffixes"):
|
67
|
-
config["package"] = {}
|
68
|
-
if options.get("package_src_path"):
|
69
|
-
config["package"]["src_path"] = options["package_src_path"]
|
70
|
-
if options.get("package_suffixes"):
|
71
|
-
config["package"]["suffixes"] = options["package_suffixes"]
|
72
|
-
|
73
|
-
# Handle auth options
|
74
|
-
if options.get("auth_type") or options.get("auth_public"):
|
75
|
-
config["auth"] = {}
|
76
|
-
if options.get("auth_type"):
|
77
|
-
config["auth"]["type"] = options["auth_type"]
|
78
|
-
if options.get("auth_public"):
|
79
|
-
config["auth"]["public"] = options["auth_public"]
|
80
|
-
|
81
|
-
replicas = {}
|
82
|
-
if options.get("min_replicas"):
|
83
|
-
replicas["min"] = options["min_replicas"]
|
84
|
-
if options.get("max_replicas"):
|
85
|
-
replicas["max"] = options["max_replicas"]
|
86
|
-
if len(replicas) > 0:
|
87
|
-
config["replicas"] = replicas
|
88
|
-
|
89
|
-
config.update(experimental.build_config_from_options(options))
|
90
|
-
|
91
|
-
return config
|