truefoundry 0.5.3rc4__py3-none-any.whl → 0.5.3rc5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of truefoundry might be problematic. Click here for more details.
- truefoundry/__init__.py +10 -1
- truefoundry/autodeploy/cli.py +2 -2
- truefoundry/cli/__main__.py +0 -4
- truefoundry/cli/util.py +12 -3
- truefoundry/common/auth_service_client.py +7 -4
- truefoundry/common/constants.py +3 -1
- truefoundry/common/credential_provider.py +7 -8
- truefoundry/common/exceptions.py +11 -7
- truefoundry/common/request_utils.py +96 -14
- truefoundry/common/servicefoundry_client.py +31 -29
- truefoundry/common/session.py +93 -0
- truefoundry/common/storage_provider_utils.py +331 -0
- truefoundry/common/utils.py +9 -9
- truefoundry/common/warnings.py +21 -0
- truefoundry/deploy/builder/builders/tfy_python_buildpack/dockerfile_template.py +8 -20
- truefoundry/deploy/cli/commands/deploy_command.py +4 -4
- truefoundry/deploy/lib/clients/servicefoundry_client.py +13 -14
- truefoundry/deploy/lib/dao/application.py +2 -2
- truefoundry/deploy/lib/dao/workspace.py +1 -1
- truefoundry/deploy/lib/session.py +1 -1
- truefoundry/deploy/v2/lib/deploy.py +2 -2
- truefoundry/deploy/v2/lib/deploy_workflow.py +1 -1
- truefoundry/deploy/v2/lib/patched_models.py +70 -4
- truefoundry/deploy/v2/lib/source.py +2 -1
- truefoundry/gateway/cli/cli.py +1 -22
- truefoundry/gateway/lib/entities.py +3 -8
- truefoundry/gateway/lib/models.py +0 -38
- truefoundry/ml/artifact/truefoundry_artifact_repo.py +33 -297
- truefoundry/ml/clients/servicefoundry_client.py +36 -15
- truefoundry/ml/exceptions.py +2 -1
- truefoundry/ml/log_types/artifacts/artifact.py +3 -2
- truefoundry/ml/log_types/artifacts/model.py +6 -5
- truefoundry/ml/log_types/artifacts/utils.py +2 -2
- truefoundry/ml/mlfoundry_api.py +6 -38
- truefoundry/ml/mlfoundry_run.py +6 -15
- truefoundry/ml/model_framework.py +2 -1
- truefoundry/ml/session.py +69 -97
- truefoundry/workflow/remote_filesystem/tfy_signed_url_client.py +42 -9
- truefoundry/workflow/remote_filesystem/tfy_signed_url_fs.py +126 -7
- {truefoundry-0.5.3rc4.dist-info → truefoundry-0.5.3rc5.dist-info}/METADATA +1 -1
- {truefoundry-0.5.3rc4.dist-info → truefoundry-0.5.3rc5.dist-info}/RECORD +43 -44
- truefoundry/deploy/lib/auth/servicefoundry_session.py +0 -61
- truefoundry/gateway/lib/client.py +0 -51
- truefoundry/ml/clients/entities.py +0 -8
- truefoundry/ml/clients/utils.py +0 -122
- {truefoundry-0.5.3rc4.dist-info → truefoundry-0.5.3rc5.dist-info}/WHEEL +0 -0
- {truefoundry-0.5.3rc4.dist-info → truefoundry-0.5.3rc5.dist-info}/entry_points.txt +0 -0
|
@@ -0,0 +1,331 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
import math
|
|
3
|
+
import mmap
|
|
4
|
+
import os
|
|
5
|
+
from concurrent.futures import FIRST_EXCEPTION, Future, ThreadPoolExecutor, wait
|
|
6
|
+
from enum import Enum
|
|
7
|
+
from threading import Event
|
|
8
|
+
from typing import List, NamedTuple, Optional
|
|
9
|
+
|
|
10
|
+
import requests
|
|
11
|
+
from rich.progress import Progress
|
|
12
|
+
from tqdm.utils import CallbackIOWrapper
|
|
13
|
+
|
|
14
|
+
from truefoundry.common.constants import ENV_VARS
|
|
15
|
+
from truefoundry.common.exceptions import HttpRequestException
|
|
16
|
+
from truefoundry.common.request_utils import (
|
|
17
|
+
augmented_raise_for_status,
|
|
18
|
+
cloud_storage_http_request,
|
|
19
|
+
)
|
|
20
|
+
from truefoundry.pydantic_v1 import BaseModel
|
|
21
|
+
|
|
22
|
+
logger = logging.getLogger("truefoundry")
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
class MultiPartUploadStorageProvider(str, Enum):
|
|
26
|
+
S3_COMPATIBLE = "S3_COMPATIBLE"
|
|
27
|
+
AZURE_BLOB = "AZURE_BLOB"
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
class SignedURL(BaseModel):
|
|
31
|
+
signed_url: str
|
|
32
|
+
path: Optional[str] = None
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
class MultiPartUpload(BaseModel):
|
|
36
|
+
storage_provider: MultiPartUploadStorageProvider
|
|
37
|
+
part_signed_urls: List[SignedURL]
|
|
38
|
+
s3_compatible_upload_id: Optional[str] = None
|
|
39
|
+
azure_blob_block_ids: Optional[List[str]] = None
|
|
40
|
+
finalize_signed_url: SignedURL
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
class _FileMultiPartInfo(NamedTuple):
|
|
44
|
+
num_parts: int
|
|
45
|
+
part_size: int
|
|
46
|
+
file_size: int
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
class _PartNumberEtag(NamedTuple):
|
|
50
|
+
part_number: int
|
|
51
|
+
etag: str
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
_MIN_BYTES_REQUIRED_FOR_MULTIPART = 100 * 1024 * 1024
|
|
55
|
+
# GCP/S3 Maximum number of parts per upload 10,000
|
|
56
|
+
# Maximum number of blocks in a block blob 50,000 blocks
|
|
57
|
+
# TODO: This number is artificially limited now. Later
|
|
58
|
+
# we will ask for parts signed URI in batches rather than in a single
|
|
59
|
+
# API Calls:
|
|
60
|
+
# Create Multipart Upload (Returns maximum number of parts, size limit of
|
|
61
|
+
# a single part, upload id for s3 etc )
|
|
62
|
+
# Get me signed uris for first 500 parts
|
|
63
|
+
# Upload 500 parts
|
|
64
|
+
# Get me signed uris for the next 500 parts
|
|
65
|
+
# Upload 500 parts
|
|
66
|
+
# ...
|
|
67
|
+
# Finalize the Multipart upload using the finalize signed url returned
|
|
68
|
+
# by Create Multipart Upload or get a new one.
|
|
69
|
+
_MAX_NUM_PARTS_FOR_MULTIPART = 1000
|
|
70
|
+
# Azure Maximum size of a block in a block blob 4000 MiB
|
|
71
|
+
# GCP/S3 Maximum size of an individual part in a multipart upload 5 GiB
|
|
72
|
+
_MAX_PART_SIZE_BYTES_FOR_MULTIPART = 4 * 1024 * 1024 * 1000
|
|
73
|
+
|
|
74
|
+
|
|
75
|
+
class _CallbackIOWrapperForMultiPartUpload(CallbackIOWrapper):
|
|
76
|
+
def __init__(self, callback, stream, method, length: int):
|
|
77
|
+
self.wrapper_setattr("_length", length)
|
|
78
|
+
super().__init__(callback, stream, method)
|
|
79
|
+
|
|
80
|
+
def __len__(self):
|
|
81
|
+
return self.wrapper_getattr("_length")
|
|
82
|
+
|
|
83
|
+
|
|
84
|
+
def _align_part_size_with_mmap_allocation_granularity(part_size: int) -> int:
|
|
85
|
+
modulo = part_size % mmap.ALLOCATIONGRANULARITY
|
|
86
|
+
if modulo == 0:
|
|
87
|
+
return part_size
|
|
88
|
+
|
|
89
|
+
part_size += mmap.ALLOCATIONGRANULARITY - modulo
|
|
90
|
+
return part_size
|
|
91
|
+
|
|
92
|
+
|
|
93
|
+
# Can not be less than 5 * 1024 * 1024
|
|
94
|
+
_PART_SIZE_BYTES_FOR_MULTIPART = _align_part_size_with_mmap_allocation_granularity(
|
|
95
|
+
10 * 1024 * 1024
|
|
96
|
+
)
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
def decide_file_parts(
|
|
100
|
+
file_path: str,
|
|
101
|
+
multipart_upload_allowed: bool = not ENV_VARS.TFY_ARTIFACTS_DISABLE_MULTIPART_UPLOAD,
|
|
102
|
+
min_file_size_bytes_for_multipart: int = _MIN_BYTES_REQUIRED_FOR_MULTIPART,
|
|
103
|
+
) -> _FileMultiPartInfo:
|
|
104
|
+
file_size = os.path.getsize(file_path)
|
|
105
|
+
if not multipart_upload_allowed or file_size < min_file_size_bytes_for_multipart:
|
|
106
|
+
return _FileMultiPartInfo(1, part_size=file_size, file_size=file_size)
|
|
107
|
+
|
|
108
|
+
ideal_num_parts = math.ceil(file_size / _PART_SIZE_BYTES_FOR_MULTIPART)
|
|
109
|
+
if ideal_num_parts <= _MAX_NUM_PARTS_FOR_MULTIPART:
|
|
110
|
+
return _FileMultiPartInfo(
|
|
111
|
+
ideal_num_parts,
|
|
112
|
+
part_size=_PART_SIZE_BYTES_FOR_MULTIPART,
|
|
113
|
+
file_size=file_size,
|
|
114
|
+
)
|
|
115
|
+
|
|
116
|
+
part_size_when_using_max_parts = math.ceil(file_size / _MAX_NUM_PARTS_FOR_MULTIPART)
|
|
117
|
+
part_size_when_using_max_parts = _align_part_size_with_mmap_allocation_granularity(
|
|
118
|
+
part_size_when_using_max_parts
|
|
119
|
+
)
|
|
120
|
+
if part_size_when_using_max_parts > _MAX_PART_SIZE_BYTES_FOR_MULTIPART:
|
|
121
|
+
raise ValueError(
|
|
122
|
+
f"file {file_path!r} is too big for upload. Multipart chunk"
|
|
123
|
+
f" size {part_size_when_using_max_parts} is higher"
|
|
124
|
+
f" than {_MAX_PART_SIZE_BYTES_FOR_MULTIPART}"
|
|
125
|
+
)
|
|
126
|
+
num_parts = math.ceil(file_size / part_size_when_using_max_parts)
|
|
127
|
+
return _FileMultiPartInfo(
|
|
128
|
+
num_parts, part_size=part_size_when_using_max_parts, file_size=file_size
|
|
129
|
+
)
|
|
130
|
+
|
|
131
|
+
|
|
132
|
+
def _get_s3_compatible_completion_body(multi_parts: List[_PartNumberEtag]) -> str:
|
|
133
|
+
body = "<CompleteMultipartUpload>\n"
|
|
134
|
+
for part in multi_parts:
|
|
135
|
+
body += " <Part>\n"
|
|
136
|
+
body += f" <PartNumber>{part.part_number}</PartNumber>\n"
|
|
137
|
+
body += f" <ETag>{part.etag}</ETag>\n"
|
|
138
|
+
body += " </Part>\n"
|
|
139
|
+
body += "</CompleteMultipartUpload>"
|
|
140
|
+
return body
|
|
141
|
+
|
|
142
|
+
|
|
143
|
+
def _get_azure_blob_completion_body(block_ids: List[str]) -> str:
|
|
144
|
+
body = "<BlockList>\n"
|
|
145
|
+
for block_id in block_ids:
|
|
146
|
+
body += f"<Uncommitted>{block_id}</Uncommitted> "
|
|
147
|
+
body += "</BlockList>"
|
|
148
|
+
return body
|
|
149
|
+
|
|
150
|
+
|
|
151
|
+
def _file_part_upload(
|
|
152
|
+
url: str,
|
|
153
|
+
file_path: str,
|
|
154
|
+
seek: int,
|
|
155
|
+
length: int,
|
|
156
|
+
file_size: int,
|
|
157
|
+
abort_event: Optional[Event] = None,
|
|
158
|
+
method: str = "put",
|
|
159
|
+
exception_class=HttpRequestException,
|
|
160
|
+
):
|
|
161
|
+
def callback(*_, **__):
|
|
162
|
+
if abort_event and abort_event.is_set():
|
|
163
|
+
raise Exception("aborting upload")
|
|
164
|
+
|
|
165
|
+
with open(file_path, "rb") as file:
|
|
166
|
+
with mmap.mmap(
|
|
167
|
+
file.fileno(),
|
|
168
|
+
length=min(file_size - seek, length),
|
|
169
|
+
offset=seek,
|
|
170
|
+
access=mmap.ACCESS_READ,
|
|
171
|
+
) as mapped_file:
|
|
172
|
+
wrapped_file = _CallbackIOWrapperForMultiPartUpload(
|
|
173
|
+
callback, mapped_file, "read", len(mapped_file)
|
|
174
|
+
)
|
|
175
|
+
with cloud_storage_http_request(
|
|
176
|
+
method=method,
|
|
177
|
+
url=url,
|
|
178
|
+
data=wrapped_file,
|
|
179
|
+
exception_class=exception_class,
|
|
180
|
+
) as response:
|
|
181
|
+
augmented_raise_for_status(response, exception_class=exception_class)
|
|
182
|
+
return response
|
|
183
|
+
|
|
184
|
+
|
|
185
|
+
def s3_compatible_multipart_upload(
|
|
186
|
+
multipart_upload: MultiPartUpload,
|
|
187
|
+
local_file: str,
|
|
188
|
+
multipart_info: _FileMultiPartInfo,
|
|
189
|
+
executor: ThreadPoolExecutor,
|
|
190
|
+
progress_bar: Optional[Progress] = None,
|
|
191
|
+
abort_event: Optional[Event] = None,
|
|
192
|
+
exception_class=HttpRequestException,
|
|
193
|
+
):
|
|
194
|
+
abort_event = abort_event or Event()
|
|
195
|
+
parts = []
|
|
196
|
+
|
|
197
|
+
if progress_bar:
|
|
198
|
+
multi_part_upload_progress = progress_bar.add_task(
|
|
199
|
+
f"[green]Uploading {local_file}:", start=True
|
|
200
|
+
)
|
|
201
|
+
|
|
202
|
+
def upload(part_number: int, seek: int) -> None:
|
|
203
|
+
logger.debug(
|
|
204
|
+
"Uploading part %d/%d of %s",
|
|
205
|
+
part_number,
|
|
206
|
+
multipart_info.num_parts,
|
|
207
|
+
local_file,
|
|
208
|
+
)
|
|
209
|
+
response = _file_part_upload(
|
|
210
|
+
url=multipart_upload.part_signed_urls[part_number].signed_url,
|
|
211
|
+
file_path=local_file,
|
|
212
|
+
seek=seek,
|
|
213
|
+
length=multipart_info.part_size,
|
|
214
|
+
file_size=multipart_info.file_size,
|
|
215
|
+
abort_event=abort_event,
|
|
216
|
+
exception_class=exception_class,
|
|
217
|
+
)
|
|
218
|
+
logger.debug(
|
|
219
|
+
"Uploaded part %d/%d of %s",
|
|
220
|
+
part_number,
|
|
221
|
+
multipart_info.num_parts,
|
|
222
|
+
local_file,
|
|
223
|
+
)
|
|
224
|
+
if progress_bar:
|
|
225
|
+
progress_bar.update(
|
|
226
|
+
multi_part_upload_progress,
|
|
227
|
+
advance=multipart_info.part_size,
|
|
228
|
+
total=multipart_info.file_size,
|
|
229
|
+
)
|
|
230
|
+
etag = response.headers["ETag"]
|
|
231
|
+
parts.append(_PartNumberEtag(etag=etag, part_number=part_number + 1))
|
|
232
|
+
|
|
233
|
+
futures: List[Future] = []
|
|
234
|
+
for part_number, seek in enumerate(
|
|
235
|
+
range(0, multipart_info.file_size, multipart_info.part_size)
|
|
236
|
+
):
|
|
237
|
+
future = executor.submit(upload, part_number=part_number, seek=seek)
|
|
238
|
+
futures.append(future)
|
|
239
|
+
|
|
240
|
+
done, not_done = wait(futures, return_when=FIRST_EXCEPTION)
|
|
241
|
+
if len(not_done) > 0:
|
|
242
|
+
abort_event.set()
|
|
243
|
+
for future in not_done:
|
|
244
|
+
future.cancel()
|
|
245
|
+
for future in done:
|
|
246
|
+
if future.exception() is not None:
|
|
247
|
+
raise future.exception()
|
|
248
|
+
|
|
249
|
+
logger.debug("Finalizing multipart upload of %s", local_file)
|
|
250
|
+
parts = sorted(parts, key=lambda part: part.part_number)
|
|
251
|
+
response = requests.post(
|
|
252
|
+
multipart_upload.finalize_signed_url.signed_url,
|
|
253
|
+
data=_get_s3_compatible_completion_body(parts),
|
|
254
|
+
timeout=2 * 60,
|
|
255
|
+
)
|
|
256
|
+
response.raise_for_status()
|
|
257
|
+
logger.debug("Multipart upload of %s completed", local_file)
|
|
258
|
+
|
|
259
|
+
|
|
260
|
+
def azure_multi_part_upload(
|
|
261
|
+
multipart_upload: MultiPartUpload,
|
|
262
|
+
local_file: str,
|
|
263
|
+
multipart_info: _FileMultiPartInfo,
|
|
264
|
+
executor: ThreadPoolExecutor,
|
|
265
|
+
progress_bar: Optional[Progress] = None,
|
|
266
|
+
abort_event: Optional[Event] = None,
|
|
267
|
+
exception_class=HttpRequestException,
|
|
268
|
+
):
|
|
269
|
+
abort_event = abort_event or Event()
|
|
270
|
+
|
|
271
|
+
if progress_bar:
|
|
272
|
+
multi_part_upload_progress = progress_bar.add_task(
|
|
273
|
+
f"[green]Uploading {local_file}:", start=True
|
|
274
|
+
)
|
|
275
|
+
|
|
276
|
+
def upload(part_number: int, seek: int):
|
|
277
|
+
logger.debug(
|
|
278
|
+
"Uploading part %d/%d of %s",
|
|
279
|
+
part_number,
|
|
280
|
+
multipart_info.num_parts,
|
|
281
|
+
local_file,
|
|
282
|
+
)
|
|
283
|
+
_file_part_upload(
|
|
284
|
+
url=multipart_upload.part_signed_urls[part_number].signed_url,
|
|
285
|
+
file_path=local_file,
|
|
286
|
+
seek=seek,
|
|
287
|
+
length=multipart_info.part_size,
|
|
288
|
+
file_size=multipart_info.file_size,
|
|
289
|
+
abort_event=abort_event,
|
|
290
|
+
exception_class=exception_class,
|
|
291
|
+
)
|
|
292
|
+
if progress_bar:
|
|
293
|
+
progress_bar.update(
|
|
294
|
+
multi_part_upload_progress,
|
|
295
|
+
advance=multipart_info.part_size,
|
|
296
|
+
total=multipart_info.file_size,
|
|
297
|
+
)
|
|
298
|
+
logger.debug(
|
|
299
|
+
"Uploaded part %d/%d of %s",
|
|
300
|
+
part_number,
|
|
301
|
+
multipart_info.num_parts,
|
|
302
|
+
local_file,
|
|
303
|
+
)
|
|
304
|
+
|
|
305
|
+
futures: List[Future] = []
|
|
306
|
+
for part_number, seek in enumerate(
|
|
307
|
+
range(0, multipart_info.file_size, multipart_info.part_size)
|
|
308
|
+
):
|
|
309
|
+
future = executor.submit(upload, part_number=part_number, seek=seek)
|
|
310
|
+
futures.append(future)
|
|
311
|
+
|
|
312
|
+
done, not_done = wait(futures, return_when=FIRST_EXCEPTION)
|
|
313
|
+
if len(not_done) > 0:
|
|
314
|
+
abort_event.set()
|
|
315
|
+
for future in not_done:
|
|
316
|
+
future.cancel()
|
|
317
|
+
for future in done:
|
|
318
|
+
if future.exception() is not None:
|
|
319
|
+
raise future.exception()
|
|
320
|
+
|
|
321
|
+
logger.debug("Finalizing multipart upload of %s", local_file)
|
|
322
|
+
if multipart_upload.azure_blob_block_ids:
|
|
323
|
+
response = requests.put(
|
|
324
|
+
multipart_upload.finalize_signed_url.signed_url,
|
|
325
|
+
data=_get_azure_blob_completion_body(
|
|
326
|
+
block_ids=multipart_upload.azure_blob_block_ids
|
|
327
|
+
),
|
|
328
|
+
timeout=2 * 60,
|
|
329
|
+
)
|
|
330
|
+
response.raise_for_status()
|
|
331
|
+
logger.debug("Multipart upload of %s completed", local_file)
|
truefoundry/common/utils.py
CHANGED
|
@@ -37,25 +37,25 @@ class _TFYServersConfig(BaseSettings):
|
|
|
37
37
|
mlfoundry_server_url: str
|
|
38
38
|
|
|
39
39
|
@classmethod
|
|
40
|
-
def
|
|
41
|
-
|
|
40
|
+
def from_tfy_host(cls, tfy_host: str) -> "_TFYServersConfig":
|
|
41
|
+
tfy_host = tfy_host.strip("/")
|
|
42
42
|
return cls(
|
|
43
|
-
tenant_host=urlparse(
|
|
44
|
-
servicefoundry_server_url=urljoin(
|
|
45
|
-
mlfoundry_server_url=urljoin(
|
|
43
|
+
tenant_host=urlparse(tfy_host).netloc,
|
|
44
|
+
servicefoundry_server_url=urljoin(tfy_host, API_SERVER_RELATIVE_PATH),
|
|
45
|
+
mlfoundry_server_url=urljoin(tfy_host, MLFOUNDRY_SERVER_RELATIVE_PATH),
|
|
46
46
|
)
|
|
47
47
|
|
|
48
48
|
|
|
49
49
|
_tfy_servers_config = None
|
|
50
50
|
|
|
51
51
|
|
|
52
|
-
def get_tfy_servers_config(
|
|
52
|
+
def get_tfy_servers_config(tfy_host: str) -> _TFYServersConfig:
|
|
53
53
|
global _tfy_servers_config
|
|
54
54
|
if _tfy_servers_config is None:
|
|
55
55
|
if ENV_VARS.TFY_CLI_LOCAL_DEV_MODE:
|
|
56
56
|
_tfy_servers_config = _TFYServersConfig() # type: ignore[call-arg]
|
|
57
57
|
else:
|
|
58
|
-
_tfy_servers_config = _TFYServersConfig.
|
|
58
|
+
_tfy_servers_config = _TFYServersConfig.from_tfy_host(tfy_host)
|
|
59
59
|
return _tfy_servers_config
|
|
60
60
|
|
|
61
61
|
|
|
@@ -106,11 +106,11 @@ def validate_tfy_host(tfy_host: str) -> None:
|
|
|
106
106
|
|
|
107
107
|
|
|
108
108
|
def resolve_tfy_host(tfy_host: Optional[str] = None) -> str:
|
|
109
|
-
|
|
109
|
+
tfy_host = tfy_host or ENV_VARS.TFY_HOST
|
|
110
|
+
if not tfy_host:
|
|
110
111
|
raise ValueError(
|
|
111
112
|
f"Either `host` should be provided using `--host <value>`, or `{TFY_HOST_ENV_KEY}` env must be set"
|
|
112
113
|
)
|
|
113
|
-
tfy_host = tfy_host or ENV_VARS.TFY_HOST
|
|
114
114
|
tfy_host = tfy_host.strip("/")
|
|
115
115
|
validate_tfy_host(tfy_host)
|
|
116
116
|
return tfy_host
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
import warnings
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
class TrueFoundryDeprecationWarning(DeprecationWarning):
|
|
5
|
+
pass
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
def surface_truefoundry_deprecation_warnings() -> None:
|
|
9
|
+
"""Unmute TrueFoundry deprecation warnings."""
|
|
10
|
+
warnings.filterwarnings(
|
|
11
|
+
"default",
|
|
12
|
+
category=TrueFoundryDeprecationWarning,
|
|
13
|
+
)
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
def suppress_truefoundry_deprecation_warnings() -> None:
|
|
17
|
+
"""Mute TrueFoundry deprecation warnings."""
|
|
18
|
+
warnings.filterwarnings(
|
|
19
|
+
"ignore",
|
|
20
|
+
category=TrueFoundryDeprecationWarning,
|
|
21
|
+
)
|
|
@@ -1,4 +1,3 @@
|
|
|
1
|
-
import os
|
|
2
1
|
import shlex
|
|
3
2
|
from typing import Dict, List, Optional
|
|
4
3
|
|
|
@@ -12,7 +11,10 @@ from truefoundry.deploy.builder.constants import (
|
|
|
12
11
|
UV_CONF_BUILDKIT_SECRET_MOUNT,
|
|
13
12
|
UV_CONF_SECRET_MOUNT_AS_ENV,
|
|
14
13
|
)
|
|
15
|
-
from truefoundry.deploy.v2.lib.patched_models import
|
|
14
|
+
from truefoundry.deploy.v2.lib.patched_models import (
|
|
15
|
+
CUDAVersion,
|
|
16
|
+
_resolve_requirements_path,
|
|
17
|
+
)
|
|
16
18
|
|
|
17
19
|
# TODO (chiragjn): Switch to a non-root user inside the container
|
|
18
20
|
|
|
@@ -80,23 +82,6 @@ CUDA_VERSION_TO_IMAGE_TAG: Dict[str, str] = {
|
|
|
80
82
|
}
|
|
81
83
|
|
|
82
84
|
|
|
83
|
-
def resolve_requirements_txt_path(build_configuration: PythonBuild) -> Optional[str]:
|
|
84
|
-
if build_configuration.requirements_path:
|
|
85
|
-
return build_configuration.requirements_path
|
|
86
|
-
|
|
87
|
-
# TODO: what if there is a requirements.txt but user does not wants us to use it.
|
|
88
|
-
possible_requirements_txt_path = os.path.join(
|
|
89
|
-
build_configuration.build_context_path, "requirements.txt"
|
|
90
|
-
)
|
|
91
|
-
|
|
92
|
-
if os.path.isfile(possible_requirements_txt_path):
|
|
93
|
-
return os.path.relpath(
|
|
94
|
-
possible_requirements_txt_path, start=build_configuration.build_context_path
|
|
95
|
-
)
|
|
96
|
-
|
|
97
|
-
return None
|
|
98
|
-
|
|
99
|
-
|
|
100
85
|
def generate_apt_install_command(apt_packages: Optional[List[str]]) -> Optional[str]:
|
|
101
86
|
packages_list = None
|
|
102
87
|
if apt_packages:
|
|
@@ -182,7 +167,10 @@ def generate_dockerfile_content(
|
|
|
182
167
|
mount_python_package_manager_conf_secret: bool = False,
|
|
183
168
|
) -> str:
|
|
184
169
|
# TODO (chiragjn): Handle recursive references to other requirements files e.g. `-r requirements-gpu.txt`
|
|
185
|
-
requirements_path =
|
|
170
|
+
requirements_path = _resolve_requirements_path(
|
|
171
|
+
build_context_path=build_configuration.build_context_path,
|
|
172
|
+
requirements_path=build_configuration.requirements_path,
|
|
173
|
+
)
|
|
186
174
|
requirements_destination_path = (
|
|
187
175
|
"/tmp/requirements.txt" if requirements_path else None
|
|
188
176
|
)
|
|
@@ -61,11 +61,11 @@ def deploy_command(
|
|
|
61
61
|
):
|
|
62
62
|
if ctx.invoked_subcommand is not None:
|
|
63
63
|
return
|
|
64
|
-
from truefoundry.
|
|
64
|
+
from truefoundry.common.session import Session
|
|
65
65
|
from truefoundry.deploy.v2.lib.deployable_patched_models import Application
|
|
66
66
|
|
|
67
67
|
try:
|
|
68
|
-
_ =
|
|
68
|
+
_ = Session.new()
|
|
69
69
|
except Exception as e:
|
|
70
70
|
raise ClickException(message=str(e)) from e
|
|
71
71
|
|
|
@@ -126,10 +126,10 @@ def deploy_command(
|
|
|
126
126
|
)
|
|
127
127
|
@handle_exception_wrapper
|
|
128
128
|
def deploy_workflow_command(name: str, file: str, workspace_fqn: str):
|
|
129
|
-
from truefoundry.
|
|
129
|
+
from truefoundry.common.session import Session
|
|
130
130
|
|
|
131
131
|
try:
|
|
132
|
-
_ =
|
|
132
|
+
_ = Session.new()
|
|
133
133
|
except Exception as e:
|
|
134
134
|
raise ClickException(message=str(e)) from e
|
|
135
135
|
|
|
@@ -23,9 +23,9 @@ from truefoundry.common.servicefoundry_client import (
|
|
|
23
23
|
check_min_cli_version,
|
|
24
24
|
session_with_retries,
|
|
25
25
|
)
|
|
26
|
+
from truefoundry.common.session import Session
|
|
26
27
|
from truefoundry.deploy.auto_gen import models as auto_gen_models
|
|
27
28
|
from truefoundry.deploy.io.output_callback import OutputCallBack
|
|
28
|
-
from truefoundry.deploy.lib.auth.servicefoundry_session import ServiceFoundrySession
|
|
29
29
|
from truefoundry.deploy.lib.model.entity import (
|
|
30
30
|
Application,
|
|
31
31
|
CreateDockerRepositoryResponse,
|
|
@@ -78,17 +78,16 @@ def _upload_packaged_code(metadata, package_file):
|
|
|
78
78
|
|
|
79
79
|
|
|
80
80
|
class ServiceFoundryServiceClient(BaseServiceFoundryServiceClient):
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
self._session: Optional[ServiceFoundrySession] = None
|
|
81
|
+
def __init__(self, init_session: bool = True, tfy_host: Optional[str] = None):
|
|
82
|
+
self._session: Optional[Session] = None
|
|
84
83
|
if init_session:
|
|
85
|
-
if
|
|
86
|
-
logger.warning("Passed
|
|
87
|
-
self._session =
|
|
88
|
-
|
|
89
|
-
elif not
|
|
90
|
-
raise Exception("Neither session, not
|
|
91
|
-
super().__init__(
|
|
84
|
+
if tfy_host:
|
|
85
|
+
logger.warning(f"Passed tfy_host {tfy_host!r} will be ignored")
|
|
86
|
+
self._session = Session.new()
|
|
87
|
+
tfy_host = self._session.tfy_host
|
|
88
|
+
elif not tfy_host:
|
|
89
|
+
raise Exception("Neither session, not tfy_host provided")
|
|
90
|
+
super().__init__(tfy_host=tfy_host)
|
|
92
91
|
|
|
93
92
|
def _get_header(self):
|
|
94
93
|
if not self._session:
|
|
@@ -250,11 +249,11 @@ class ServiceFoundryServiceClient(BaseServiceFoundryServiceClient):
|
|
|
250
249
|
}
|
|
251
250
|
logger.debug(json.dumps(data))
|
|
252
251
|
url = f"{self._api_server_url}/{VERSION_PREFIX}/deployment"
|
|
253
|
-
|
|
252
|
+
response = session_with_retries().post(
|
|
254
253
|
url, json=data, headers=self._get_header()
|
|
255
254
|
)
|
|
256
|
-
|
|
257
|
-
return Deployment.parse_obj(
|
|
255
|
+
response_data = request_handling(response)
|
|
256
|
+
return Deployment.parse_obj(response_data["deployment"])
|
|
258
257
|
|
|
259
258
|
def _get_log_print_line(self, log: dict):
|
|
260
259
|
timestamp = int(log["time"]) / 1e6
|
|
@@ -180,7 +180,7 @@ def trigger_job(
|
|
|
180
180
|
params=params if params else None,
|
|
181
181
|
)
|
|
182
182
|
jobRunName = result.jobRunName
|
|
183
|
-
previous_runs_url = f"{client.
|
|
183
|
+
previous_runs_url = f"{client.tfy_host.strip('/')}/deployments/{application_info.id}?tab=previousRuns"
|
|
184
184
|
logger.info(
|
|
185
185
|
f"{message}.\n"
|
|
186
186
|
f"You can check the status of your job run at {previous_runs_url} with jobRunName: {jobRunName}"
|
|
@@ -259,6 +259,6 @@ def trigger_workflow(application_fqn: str, inputs: Optional[Dict[str, Any]] = No
|
|
|
259
259
|
)
|
|
260
260
|
logger.info(f"Started Execution for Workflow: {application_info.name}")
|
|
261
261
|
executions_page = (
|
|
262
|
-
f"{client.
|
|
262
|
+
f"{client.tfy_host.strip('/')}/deployments/{application_info.id}?tab=executions"
|
|
263
263
|
)
|
|
264
264
|
logger.info(f"You can check the executions at {executions_page}")
|
|
@@ -32,7 +32,7 @@ def create_workspace(
|
|
|
32
32
|
resources=workspace_resources,
|
|
33
33
|
)
|
|
34
34
|
|
|
35
|
-
url = f"{client.
|
|
35
|
+
url = f"{client.tfy_host.strip('/')}/workspaces"
|
|
36
36
|
logger.info(
|
|
37
37
|
"You can find your workspace: '%s' on the dashboard: %s", workspace.name, url
|
|
38
38
|
)
|
|
@@ -68,7 +68,7 @@ def login(
|
|
|
68
68
|
if api_key:
|
|
69
69
|
token = Token(access_token=api_key, refresh_token=None)
|
|
70
70
|
else:
|
|
71
|
-
auth_service = AuthServiceClient.
|
|
71
|
+
auth_service = AuthServiceClient.from_tfy_host(tfy_host=host)
|
|
72
72
|
# interactive login
|
|
73
73
|
token = _login_with_device_code(base_url=host, auth_service=auth_service)
|
|
74
74
|
|
|
@@ -90,7 +90,7 @@ def _log_application_dashboard_url(deployment: Deployment, log_message: str):
|
|
|
90
90
|
# TODO: is there any simpler way to get this? :cry
|
|
91
91
|
client = ServiceFoundryServiceClient()
|
|
92
92
|
|
|
93
|
-
url = f"{client.
|
|
93
|
+
url = f"{client.tfy_host.strip('/')}/applications/{application_id}?tab=deployments"
|
|
94
94
|
logger.info(log_message, url)
|
|
95
95
|
|
|
96
96
|
|
|
@@ -272,7 +272,7 @@ def deploy_component(
|
|
|
272
272
|
if not last_status_printed or DeploymentTransitionStatus.is_failure_state(
|
|
273
273
|
last_status_printed
|
|
274
274
|
):
|
|
275
|
-
deployment_tab_url = f"{client.
|
|
275
|
+
deployment_tab_url = f"{client.tfy_host.strip('/')}/applications/{response.applicationId}?tab=deployments"
|
|
276
276
|
message = f"Deployment Failed. Please refer to the logs for additional details - {deployment_tab_url}"
|
|
277
277
|
sys.exit(message)
|
|
278
278
|
except KeyboardInterrupt:
|
|
@@ -297,5 +297,5 @@ def deploy_workflow(
|
|
|
297
297
|
workflow.name,
|
|
298
298
|
deployment.fqn,
|
|
299
299
|
)
|
|
300
|
-
deployment_url = f"{client.
|
|
300
|
+
deployment_url = f"{client.tfy_host.strip('/')}/applications/{deployment.applicationId}?tab=deployments"
|
|
301
301
|
logger.info("You can find the application on the dashboard:- '%s'", deployment_url)
|