gpustack-runtime 0.1.39__py3-none-any.whl → 0.1.39.post1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- gpustack_runtime/__main__.py +0 -2
- gpustack_runtime/_version.py +2 -2
- gpustack_runtime/_version_appendix.py +1 -1
- gpustack_runtime/cmds/__init__.py +0 -2
- gpustack_runtime/cmds/deployer.py +17 -248
- gpustack_runtime/deployer/docker.py +38 -103
- gpustack_runtime/deployer/podman.py +38 -103
- gpustack_runtime/envs.py +0 -25
- {gpustack_runtime-0.1.39.dist-info → gpustack_runtime-0.1.39.post1.dist-info}/METADATA +2 -2
- {gpustack_runtime-0.1.39.dist-info → gpustack_runtime-0.1.39.post1.dist-info}/RECORD +13 -13
- {gpustack_runtime-0.1.39.dist-info → gpustack_runtime-0.1.39.post1.dist-info}/WHEEL +0 -0
- {gpustack_runtime-0.1.39.dist-info → gpustack_runtime-0.1.39.post1.dist-info}/entry_points.txt +0 -0
- {gpustack_runtime-0.1.39.dist-info → gpustack_runtime-0.1.39.post1.dist-info}/licenses/LICENSE +0 -0
gpustack_runtime/__main__.py
CHANGED
|
@@ -12,7 +12,6 @@ from . import deployer, detector
|
|
|
12
12
|
from ._version import commit_id, version
|
|
13
13
|
from .cmds import (
|
|
14
14
|
CopyImagesSubCommand,
|
|
15
|
-
CreateRunnerWorkloadSubCommand,
|
|
16
15
|
CreateWorkloadSubCommand,
|
|
17
16
|
DeleteWorkloadsSubCommand,
|
|
18
17
|
DeleteWorkloadSubCommand,
|
|
@@ -59,7 +58,6 @@ def main():
|
|
|
59
58
|
subcommand_parser = parser.add_subparsers(
|
|
60
59
|
help="gpustack-runtime command helpers",
|
|
61
60
|
)
|
|
62
|
-
CreateRunnerWorkloadSubCommand.register(subcommand_parser)
|
|
63
61
|
CreateWorkloadSubCommand.register(subcommand_parser)
|
|
64
62
|
DeleteWorkloadSubCommand.register(subcommand_parser)
|
|
65
63
|
DeleteWorkloadsSubCommand.register(subcommand_parser)
|
gpustack_runtime/_version.py
CHANGED
|
@@ -27,8 +27,8 @@ version_tuple: VERSION_TUPLE
|
|
|
27
27
|
__commit_id__: COMMIT_ID
|
|
28
28
|
commit_id: COMMIT_ID
|
|
29
29
|
|
|
30
|
-
__version__ = version = '0.1.39'
|
|
31
|
-
__version_tuple__ = version_tuple = (0, 1, 39)
|
|
30
|
+
__version__ = version = '0.1.39.post1'
|
|
31
|
+
__version_tuple__ = version_tuple = (0, 1, 39, 'post1')
|
|
32
32
|
try:
|
|
33
33
|
from ._version_appendix import git_commit
|
|
34
34
|
__commit_id__ = commit_id = git_commit
|
|
@@ -1 +1 @@
|
|
|
1
|
-
git_commit = "
|
|
1
|
+
git_commit = "dea7237"
|
|
@@ -1,7 +1,6 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
|
|
3
3
|
from .deployer import (
|
|
4
|
-
CreateRunnerWorkloadSubCommand,
|
|
5
4
|
CreateWorkloadSubCommand,
|
|
6
5
|
DeleteWorkloadsSubCommand,
|
|
7
6
|
DeleteWorkloadSubCommand,
|
|
@@ -23,7 +22,6 @@ from .images import (
|
|
|
23
22
|
|
|
24
23
|
__all__ = [
|
|
25
24
|
"CopyImagesSubCommand",
|
|
26
|
-
"CreateRunnerWorkloadSubCommand",
|
|
27
25
|
"CreateWorkloadSubCommand",
|
|
28
26
|
"DeleteWorkloadSubCommand",
|
|
29
27
|
"DeleteWorkloadsSubCommand",
|
|
@@ -92,238 +92,6 @@ _IGNORE_SENSITIVE_ENVS_SUFFIX = (
|
|
|
92
92
|
)
|
|
93
93
|
|
|
94
94
|
|
|
95
|
-
class CreateRunnerWorkloadSubCommand(SubCommand):
|
|
96
|
-
"""
|
|
97
|
-
Command to create a runner workload deployment.
|
|
98
|
-
"""
|
|
99
|
-
|
|
100
|
-
backend: str
|
|
101
|
-
device: str
|
|
102
|
-
command_script: str | None
|
|
103
|
-
port: int
|
|
104
|
-
host_network: bool
|
|
105
|
-
check: bool
|
|
106
|
-
namespace: str
|
|
107
|
-
service: str
|
|
108
|
-
version: str
|
|
109
|
-
name: str
|
|
110
|
-
volume: str
|
|
111
|
-
extra_args: list[str]
|
|
112
|
-
|
|
113
|
-
@staticmethod
|
|
114
|
-
def register(parser: _SubParsersAction):
|
|
115
|
-
deploy_parser = parser.add_parser(
|
|
116
|
-
"create-runner",
|
|
117
|
-
help="Create a runner workload deployment",
|
|
118
|
-
)
|
|
119
|
-
|
|
120
|
-
deploy_parser.add_argument(
|
|
121
|
-
"--backend",
|
|
122
|
-
type=str,
|
|
123
|
-
help="Backend to use (default: detect from current environment)",
|
|
124
|
-
choices=supported_backends(),
|
|
125
|
-
)
|
|
126
|
-
|
|
127
|
-
deploy_parser.add_argument(
|
|
128
|
-
"--device",
|
|
129
|
-
type=str,
|
|
130
|
-
help="Device to use, multiple devices join by comma (default: all devices)",
|
|
131
|
-
default="all",
|
|
132
|
-
)
|
|
133
|
-
|
|
134
|
-
deploy_parser.add_argument(
|
|
135
|
-
"--command-script-file",
|
|
136
|
-
type=str,
|
|
137
|
-
help="Path of command script for the workload",
|
|
138
|
-
)
|
|
139
|
-
|
|
140
|
-
deploy_parser.add_argument(
|
|
141
|
-
"--port",
|
|
142
|
-
type=int,
|
|
143
|
-
help="Port to expose",
|
|
144
|
-
)
|
|
145
|
-
|
|
146
|
-
deploy_parser.add_argument(
|
|
147
|
-
"--host-network",
|
|
148
|
-
action="store_true",
|
|
149
|
-
help="Use host network (default: False)",
|
|
150
|
-
default=False,
|
|
151
|
-
)
|
|
152
|
-
|
|
153
|
-
deploy_parser.add_argument(
|
|
154
|
-
"--check",
|
|
155
|
-
action="store_true",
|
|
156
|
-
help="Enable health check, needs --port (default: False)",
|
|
157
|
-
default=False,
|
|
158
|
-
)
|
|
159
|
-
|
|
160
|
-
deploy_parser.add_argument(
|
|
161
|
-
"--namespace",
|
|
162
|
-
type=str,
|
|
163
|
-
help="Namespace of the runner",
|
|
164
|
-
)
|
|
165
|
-
|
|
166
|
-
deploy_parser.add_argument(
|
|
167
|
-
"service",
|
|
168
|
-
type=str,
|
|
169
|
-
help="Service of the runner",
|
|
170
|
-
)
|
|
171
|
-
|
|
172
|
-
deploy_parser.add_argument(
|
|
173
|
-
"version",
|
|
174
|
-
type=str,
|
|
175
|
-
help="Version of the runner",
|
|
176
|
-
)
|
|
177
|
-
|
|
178
|
-
deploy_parser.add_argument(
|
|
179
|
-
"volume",
|
|
180
|
-
type=str,
|
|
181
|
-
help="Volume to mount",
|
|
182
|
-
)
|
|
183
|
-
|
|
184
|
-
deploy_parser.add_argument(
|
|
185
|
-
"extra_args",
|
|
186
|
-
nargs=REMAINDER,
|
|
187
|
-
help="Extra arguments for the runner",
|
|
188
|
-
)
|
|
189
|
-
|
|
190
|
-
deploy_parser.set_defaults(func=CreateRunnerWorkloadSubCommand)
|
|
191
|
-
|
|
192
|
-
def __init__(self, args: Namespace):
|
|
193
|
-
self.backend = args.backend
|
|
194
|
-
self.device = args.device
|
|
195
|
-
self.command_script = None
|
|
196
|
-
self.port = args.port
|
|
197
|
-
self.host_network = args.host_network
|
|
198
|
-
self.check = args.check
|
|
199
|
-
self.namespace = args.namespace
|
|
200
|
-
self.service = args.service
|
|
201
|
-
self.version = args.version
|
|
202
|
-
self.name = f"{args.service}-{args.version}".lower().replace(".", "-")
|
|
203
|
-
self.volume = args.volume
|
|
204
|
-
self.extra_args = args.extra_args
|
|
205
|
-
|
|
206
|
-
if not self.name or not self.volume:
|
|
207
|
-
msg = "The name and volume arguments are required."
|
|
208
|
-
raise ValueError(msg)
|
|
209
|
-
|
|
210
|
-
if args.command_script_file:
|
|
211
|
-
command_script_file = Path(args.command_script_file)
|
|
212
|
-
if not command_script_file.is_file():
|
|
213
|
-
msg = f"The command script file '{command_script_file}' does not exist."
|
|
214
|
-
raise ValueError(msg)
|
|
215
|
-
self.command_script = command_script_file.read_text(
|
|
216
|
-
encoding="utf-8",
|
|
217
|
-
).strip()
|
|
218
|
-
|
|
219
|
-
def run(self):
|
|
220
|
-
env = [
|
|
221
|
-
ContainerEnv(
|
|
222
|
-
name=name,
|
|
223
|
-
value=value,
|
|
224
|
-
)
|
|
225
|
-
for name, value in os.environ.items()
|
|
226
|
-
if not name.startswith(_IGNORE_ENVS_PREFIX)
|
|
227
|
-
and not name.endswith(_IGNORE_ENVS_SUFFIX)
|
|
228
|
-
]
|
|
229
|
-
if self.backend:
|
|
230
|
-
resources = ContainerResources(
|
|
231
|
-
**{
|
|
232
|
-
v: self.device
|
|
233
|
-
for k, v in envs.GPUSTACK_RUNTIME_DETECT_BACKEND_MAP_RESOURCE_KEY.items()
|
|
234
|
-
if k == self.backend
|
|
235
|
-
},
|
|
236
|
-
)
|
|
237
|
-
else:
|
|
238
|
-
resources = ContainerResources(
|
|
239
|
-
**{
|
|
240
|
-
envs.GPUSTACK_RUNTIME_DEPLOY_AUTOMAP_RESOURCE_KEY: self.device,
|
|
241
|
-
},
|
|
242
|
-
)
|
|
243
|
-
mounts = [
|
|
244
|
-
ContainerMount(
|
|
245
|
-
path=self.volume,
|
|
246
|
-
),
|
|
247
|
-
]
|
|
248
|
-
execution = ContainerExecution(
|
|
249
|
-
command_script=self.command_script,
|
|
250
|
-
args=self.extra_args,
|
|
251
|
-
)
|
|
252
|
-
ports = (
|
|
253
|
-
[
|
|
254
|
-
ContainerPort(
|
|
255
|
-
internal=self.port,
|
|
256
|
-
),
|
|
257
|
-
]
|
|
258
|
-
if self.port
|
|
259
|
-
else None
|
|
260
|
-
)
|
|
261
|
-
checks = (
|
|
262
|
-
[
|
|
263
|
-
ContainerCheck(
|
|
264
|
-
delay=60,
|
|
265
|
-
interval=10,
|
|
266
|
-
timeout=5,
|
|
267
|
-
retries=6,
|
|
268
|
-
tcp=ContainerCheckTCP(port=self.port),
|
|
269
|
-
teardown=True,
|
|
270
|
-
),
|
|
271
|
-
]
|
|
272
|
-
if self.check and self.port
|
|
273
|
-
else None
|
|
274
|
-
)
|
|
275
|
-
plan = WorkloadPlan(
|
|
276
|
-
name=self.name,
|
|
277
|
-
namespace=self.namespace,
|
|
278
|
-
host_network=self.host_network,
|
|
279
|
-
containers=[
|
|
280
|
-
Container(
|
|
281
|
-
restart_policy=(
|
|
282
|
-
ContainerRestartPolicyEnum.NEVER
|
|
283
|
-
if not self.check
|
|
284
|
-
else ContainerRestartPolicyEnum.ALWAYS
|
|
285
|
-
),
|
|
286
|
-
image=f"gpustack/runner:{self.backend if self.backend else 'Host'}X.Y-{self.service}{self.version}",
|
|
287
|
-
name=self.name,
|
|
288
|
-
envs=env,
|
|
289
|
-
resources=resources,
|
|
290
|
-
mounts=mounts,
|
|
291
|
-
execution=execution,
|
|
292
|
-
ports=ports,
|
|
293
|
-
checks=checks,
|
|
294
|
-
),
|
|
295
|
-
],
|
|
296
|
-
)
|
|
297
|
-
create_workload(plan)
|
|
298
|
-
print(f"Created workload '{self.name}'.")
|
|
299
|
-
|
|
300
|
-
while True:
|
|
301
|
-
st = get_workload(
|
|
302
|
-
name=self.name,
|
|
303
|
-
namespace=self.namespace,
|
|
304
|
-
)
|
|
305
|
-
if st and st.state not in (
|
|
306
|
-
WorkloadStatusStateEnum.PENDING,
|
|
307
|
-
WorkloadStatusStateEnum.INITIALIZING,
|
|
308
|
-
):
|
|
309
|
-
break
|
|
310
|
-
time.sleep(1)
|
|
311
|
-
|
|
312
|
-
print("\033[2J\033[H", end="")
|
|
313
|
-
|
|
314
|
-
async def stream_logs():
|
|
315
|
-
logs_result = await async_logs_workload(
|
|
316
|
-
name=self.name,
|
|
317
|
-
namespace=self.namespace,
|
|
318
|
-
tail=-1,
|
|
319
|
-
follow=True,
|
|
320
|
-
)
|
|
321
|
-
async for line in logs_result:
|
|
322
|
-
print(line.decode("utf-8").rstrip())
|
|
323
|
-
|
|
324
|
-
asyncio.run(stream_logs())
|
|
325
|
-
|
|
326
|
-
|
|
327
95
|
class CreateWorkloadSubCommand(SubCommand):
|
|
328
96
|
"""
|
|
329
97
|
Command to create a workload deployment.
|
|
@@ -358,8 +126,7 @@ class CreateWorkloadSubCommand(SubCommand):
|
|
|
358
126
|
deploy_parser.add_argument(
|
|
359
127
|
"--device",
|
|
360
128
|
type=str,
|
|
361
|
-
help="Device to use, multiple devices join by comma
|
|
362
|
-
default="all",
|
|
129
|
+
help="Device to use, multiple devices join by comma, all for all devices",
|
|
363
130
|
)
|
|
364
131
|
|
|
365
132
|
deploy_parser.add_argument(
|
|
@@ -456,20 +223,22 @@ class CreateWorkloadSubCommand(SubCommand):
|
|
|
456
223
|
if not name.startswith(_IGNORE_ENVS_PREFIX)
|
|
457
224
|
and not name.endswith(_IGNORE_ENVS_SUFFIX)
|
|
458
225
|
]
|
|
459
|
-
|
|
460
|
-
|
|
461
|
-
|
|
462
|
-
|
|
463
|
-
|
|
464
|
-
|
|
465
|
-
|
|
466
|
-
|
|
467
|
-
|
|
468
|
-
|
|
469
|
-
|
|
470
|
-
|
|
471
|
-
|
|
472
|
-
|
|
226
|
+
resources = None
|
|
227
|
+
if self.device:
|
|
228
|
+
if self.backend:
|
|
229
|
+
resources = ContainerResources(
|
|
230
|
+
**{
|
|
231
|
+
v: self.device
|
|
232
|
+
for k, v in envs.GPUSTACK_RUNTIME_DETECT_BACKEND_MAP_RESOURCE_KEY.items()
|
|
233
|
+
if k == self.backend
|
|
234
|
+
},
|
|
235
|
+
)
|
|
236
|
+
else:
|
|
237
|
+
resources = ContainerResources(
|
|
238
|
+
**{
|
|
239
|
+
envs.GPUSTACK_RUNTIME_DEPLOY_AUTOMAP_RESOURCE_KEY: self.device,
|
|
240
|
+
},
|
|
241
|
+
)
|
|
473
242
|
mounts = [
|
|
474
243
|
ContainerMount(
|
|
475
244
|
path=self.volume,
|
|
@@ -1,12 +1,14 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
|
|
3
3
|
import contextlib
|
|
4
|
+
import io
|
|
4
5
|
import json
|
|
5
6
|
import logging
|
|
6
7
|
import operator
|
|
7
8
|
import os
|
|
8
9
|
import socket
|
|
9
10
|
import sys
|
|
11
|
+
import tarfile
|
|
10
12
|
from dataclasses import dataclass, field
|
|
11
13
|
from functools import lru_cache, reduce
|
|
12
14
|
from math import ceil
|
|
@@ -303,10 +305,6 @@ class DockerDeployer(Deployer):
|
|
|
303
305
|
"""
|
|
304
306
|
Client for interacting with the Docker daemon.
|
|
305
307
|
"""
|
|
306
|
-
_container_ephemeral_files_dir: Path | None = None
|
|
307
|
-
"""
|
|
308
|
-
Directory for ephemeral files inside containers, internal use only.
|
|
309
|
-
"""
|
|
310
308
|
_mutate_create_options: Callable[[dict[str, Any]], dict[str, Any]] | None = None
|
|
311
309
|
"""
|
|
312
310
|
Function to handle mirrored deployment, internal use only.
|
|
@@ -383,48 +381,6 @@ class DockerDeployer(Deployer):
|
|
|
383
381
|
|
|
384
382
|
return wrapper
|
|
385
383
|
|
|
386
|
-
@staticmethod
|
|
387
|
-
def _create_ephemeral_files(
|
|
388
|
-
workload: DockerWorkloadPlan,
|
|
389
|
-
) -> dict[tuple[int, str], str]:
|
|
390
|
-
"""
|
|
391
|
-
Create ephemeral files as local file for the workload.
|
|
392
|
-
|
|
393
|
-
Returns:
|
|
394
|
-
A mapping from (container index, configured path) to actual filename.
|
|
395
|
-
|
|
396
|
-
Raises:
|
|
397
|
-
OperationError:
|
|
398
|
-
If the ephemeral files fail to create.
|
|
399
|
-
|
|
400
|
-
"""
|
|
401
|
-
# Map (container index, configured path) to actual filename.
|
|
402
|
-
ephemeral_filename_mapping: dict[tuple[int, str], str] = {}
|
|
403
|
-
ephemeral_files: list[tuple[str, str, int]] = []
|
|
404
|
-
for ci, c in enumerate(workload.containers):
|
|
405
|
-
for fi, f in enumerate(c.files or []):
|
|
406
|
-
if f.content is not None:
|
|
407
|
-
fn = f"{workload.name}-{ci}-{fi}"
|
|
408
|
-
ephemeral_filename_mapping[(ci, f.path)] = fn
|
|
409
|
-
ephemeral_files.append((fn, f.content, f.mode))
|
|
410
|
-
if not ephemeral_filename_mapping:
|
|
411
|
-
return ephemeral_filename_mapping
|
|
412
|
-
|
|
413
|
-
# Create ephemeral files directory if not exists.
|
|
414
|
-
try:
|
|
415
|
-
for fn, fc, fm in ephemeral_files:
|
|
416
|
-
fp = envs.GPUSTACK_RUNTIME_DOCKER_EPHEMERAL_FILES_DIR.joinpath(fn)
|
|
417
|
-
with fp.open("w", encoding="utf-8") as f:
|
|
418
|
-
f.write(fc)
|
|
419
|
-
f.flush()
|
|
420
|
-
fp.chmod(fm)
|
|
421
|
-
logger.debug("Created local file %s with mode %s", fp, oct(fm))
|
|
422
|
-
except OSError as e:
|
|
423
|
-
msg = "Failed to create ephemeral files"
|
|
424
|
-
raise OperationError(msg) from e
|
|
425
|
-
|
|
426
|
-
return ephemeral_filename_mapping
|
|
427
|
-
|
|
428
384
|
def _create_ephemeral_volumes(self, workload: DockerWorkloadPlan) -> dict[str, str]:
|
|
429
385
|
"""
|
|
430
386
|
Create ephemeral volumes for the workload.
|
|
@@ -717,12 +673,10 @@ class DockerDeployer(Deployer):
|
|
|
717
673
|
else:
|
|
718
674
|
return d_container
|
|
719
675
|
|
|
676
|
+
@staticmethod
|
|
720
677
|
def _append_container_mounts(
|
|
721
|
-
self,
|
|
722
678
|
create_options: dict[str, Any],
|
|
723
679
|
c: Container,
|
|
724
|
-
ci: int,
|
|
725
|
-
ephemeral_filename_mapping: dict[tuple[int, str] : str],
|
|
726
680
|
ephemeral_volume_name_mapping: dict[str, str],
|
|
727
681
|
):
|
|
728
682
|
"""
|
|
@@ -738,17 +692,7 @@ class DockerDeployer(Deployer):
|
|
|
738
692
|
target="",
|
|
739
693
|
)
|
|
740
694
|
|
|
741
|
-
if f.content is
|
|
742
|
-
# Ephemeral file, use from local ephemeral files directory.
|
|
743
|
-
if (ci, f.path) not in ephemeral_filename_mapping:
|
|
744
|
-
continue
|
|
745
|
-
fn = ephemeral_filename_mapping[(ci, f.path)]
|
|
746
|
-
path = str(
|
|
747
|
-
self._container_ephemeral_files_dir.joinpath(fn),
|
|
748
|
-
)
|
|
749
|
-
binding["Source"] = path
|
|
750
|
-
binding["Target"] = f"/{f.path.lstrip('/')}"
|
|
751
|
-
elif f.path:
|
|
695
|
+
if f.content is None and f.path:
|
|
752
696
|
# Host file, bind directly.
|
|
753
697
|
binding["Source"] = f.path
|
|
754
698
|
binding["Target"] = f.path
|
|
@@ -860,10 +804,39 @@ class DockerDeployer(Deployer):
|
|
|
860
804
|
|
|
861
805
|
return healthcheck
|
|
862
806
|
|
|
807
|
+
@staticmethod
|
|
808
|
+
def _upload_ephemeral_files(
|
|
809
|
+
c: Container,
|
|
810
|
+
container: docker.models.containers.Container,
|
|
811
|
+
):
|
|
812
|
+
if not c.files:
|
|
813
|
+
return
|
|
814
|
+
|
|
815
|
+
f_tar = io.BytesIO()
|
|
816
|
+
with tarfile.open(fileobj=f_tar, mode="w") as tar:
|
|
817
|
+
for f in c.files:
|
|
818
|
+
if f.content is None or not f.path:
|
|
819
|
+
continue
|
|
820
|
+
fc_bytes = f.content.encode("utf-8")
|
|
821
|
+
info = tarfile.TarInfo(name=f.path.lstrip("/"))
|
|
822
|
+
info.size = len(fc_bytes)
|
|
823
|
+
info.mode = f.mode
|
|
824
|
+
tar.addfile(tarinfo=info, fileobj=io.BytesIO(fc_bytes))
|
|
825
|
+
if f_tar.getbuffer().nbytes == 0:
|
|
826
|
+
return
|
|
827
|
+
|
|
828
|
+
f_tar.seek(0)
|
|
829
|
+
uploaded = container.put_archive(
|
|
830
|
+
path="/",
|
|
831
|
+
data=f_tar.getvalue(),
|
|
832
|
+
)
|
|
833
|
+
if not uploaded:
|
|
834
|
+
msg = f"Failed to upload ephemeral files to container {container.name}"
|
|
835
|
+
raise OperationError(msg)
|
|
836
|
+
|
|
863
837
|
def _create_containers(
|
|
864
838
|
self,
|
|
865
839
|
workload: DockerWorkloadPlan,
|
|
866
|
-
ephemeral_filename_mapping: dict[tuple[int, str] : str],
|
|
867
840
|
ephemeral_volume_name_mapping: dict[str, str],
|
|
868
841
|
pause_container: docker.models.containers.Container,
|
|
869
842
|
) -> (
|
|
@@ -1106,8 +1079,6 @@ class DockerDeployer(Deployer):
|
|
|
1106
1079
|
self._append_container_mounts(
|
|
1107
1080
|
create_options,
|
|
1108
1081
|
c,
|
|
1109
|
-
ci,
|
|
1110
|
-
ephemeral_filename_mapping,
|
|
1111
1082
|
ephemeral_volume_name_mapping,
|
|
1112
1083
|
)
|
|
1113
1084
|
|
|
@@ -1149,6 +1120,10 @@ class DockerDeployer(Deployer):
|
|
|
1149
1120
|
detach=detach,
|
|
1150
1121
|
**create_options,
|
|
1151
1122
|
)
|
|
1123
|
+
|
|
1124
|
+
# Upload ephemeral files into the container.
|
|
1125
|
+
self._upload_ephemeral_files(c, d_container)
|
|
1126
|
+
|
|
1152
1127
|
except docker.errors.APIError as e:
|
|
1153
1128
|
msg = f"Failed to create container {container_name}{_detail_api_call_error(e)}"
|
|
1154
1129
|
raise OperationError(msg) from e
|
|
@@ -1198,9 +1173,6 @@ class DockerDeployer(Deployer):
|
|
|
1198
1173
|
def __init__(self):
|
|
1199
1174
|
super().__init__(_NAME)
|
|
1200
1175
|
self._client = self._get_client()
|
|
1201
|
-
self._container_ephemeral_files_dir = (
|
|
1202
|
-
envs.GPUSTACK_RUNTIME_DOCKER_EPHEMERAL_FILES_DIR
|
|
1203
|
-
)
|
|
1204
1176
|
|
|
1205
1177
|
def _prepare_create(self):
|
|
1206
1178
|
"""
|
|
@@ -1434,25 +1406,6 @@ class DockerDeployer(Deployer):
|
|
|
1434
1406
|
|
|
1435
1407
|
self._mutate_create_options = mutate_create_options
|
|
1436
1408
|
|
|
1437
|
-
# Extract ephemeral files dir mutation if any.
|
|
1438
|
-
if mirrored_mounts:
|
|
1439
|
-
e_target = str(envs.GPUSTACK_RUNTIME_DOCKER_EPHEMERAL_FILES_DIR)
|
|
1440
|
-
b_source = ""
|
|
1441
|
-
b_target = ""
|
|
1442
|
-
for m in mirrored_mounts:
|
|
1443
|
-
c_target = m.get("Destination", "///")
|
|
1444
|
-
if (
|
|
1445
|
-
e_target == c_target or e_target.startswith(f"{c_target}/")
|
|
1446
|
-
) and len(c_target) >= len(b_target):
|
|
1447
|
-
b_source = m.get("Source")
|
|
1448
|
-
b_target = c_target
|
|
1449
|
-
if b_source:
|
|
1450
|
-
result = Path(b_source)
|
|
1451
|
-
if e_target != b_target:
|
|
1452
|
-
b_subpath = e_target.removeprefix(b_target)
|
|
1453
|
-
result = result.joinpath(b_subpath.lstrip("/"))
|
|
1454
|
-
self._container_ephemeral_files_dir = result
|
|
1455
|
-
|
|
1456
1409
|
def _find_self_container(
|
|
1457
1410
|
self,
|
|
1458
1411
|
self_container_id: str,
|
|
@@ -1536,12 +1489,6 @@ class DockerDeployer(Deployer):
|
|
|
1536
1489
|
if logger.isEnabledFor(logging.DEBUG):
|
|
1537
1490
|
logger.debug("Creating workload:\n%s", workload.to_yaml())
|
|
1538
1491
|
|
|
1539
|
-
# Create ephemeral file if needed,
|
|
1540
|
-
# (container index, configured path): <actual filename>
|
|
1541
|
-
ephemeral_filename_mapping: dict[tuple[int, str] : str] = (
|
|
1542
|
-
self._create_ephemeral_files(workload)
|
|
1543
|
-
)
|
|
1544
|
-
|
|
1545
1492
|
# Create ephemeral volumes if needed,
|
|
1546
1493
|
# <configured volume name>: <actual volume name>
|
|
1547
1494
|
ephemeral_volume_name_mapping: dict[str, str] = self._create_ephemeral_volumes(
|
|
@@ -1554,7 +1501,6 @@ class DockerDeployer(Deployer):
|
|
|
1554
1501
|
# Create init/run containers.
|
|
1555
1502
|
init_containers, run_containers = self._create_containers(
|
|
1556
1503
|
workload,
|
|
1557
|
-
ephemeral_filename_mapping,
|
|
1558
1504
|
ephemeral_volume_name_mapping,
|
|
1559
1505
|
pause_container,
|
|
1560
1506
|
)
|
|
@@ -1696,17 +1642,6 @@ class DockerDeployer(Deployer):
|
|
|
1696
1642
|
msg = f"Failed to delete volumes for workload {name}{_detail_api_call_error(e)}"
|
|
1697
1643
|
raise OperationError(msg) from e
|
|
1698
1644
|
|
|
1699
|
-
# Remove all ephemeral files for the workload.
|
|
1700
|
-
try:
|
|
1701
|
-
for fp in envs.GPUSTACK_RUNTIME_DOCKER_EPHEMERAL_FILES_DIR.glob(
|
|
1702
|
-
f"{name}-*",
|
|
1703
|
-
):
|
|
1704
|
-
if fp.is_file():
|
|
1705
|
-
fp.unlink(missing_ok=True)
|
|
1706
|
-
except OSError as e:
|
|
1707
|
-
msg = f"Failed to delete ephemeral files for workload {name}"
|
|
1708
|
-
raise OperationError(msg) from e
|
|
1709
|
-
|
|
1710
1645
|
return workload
|
|
1711
1646
|
|
|
1712
1647
|
@_supported
|
|
@@ -1,12 +1,14 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
|
|
3
3
|
import contextlib
|
|
4
|
+
import io
|
|
4
5
|
import json
|
|
5
6
|
import logging
|
|
6
7
|
import operator
|
|
7
8
|
import os
|
|
8
9
|
import socket
|
|
9
10
|
import sys
|
|
11
|
+
import tarfile
|
|
10
12
|
from dataclasses import dataclass, field
|
|
11
13
|
from functools import lru_cache, reduce
|
|
12
14
|
from math import ceil
|
|
@@ -306,10 +308,6 @@ class PodmanDeployer(Deployer):
|
|
|
306
308
|
"""
|
|
307
309
|
Client for interacting with the Podman daemon.
|
|
308
310
|
"""
|
|
309
|
-
_container_ephemeral_files_dir: Path | None = None
|
|
310
|
-
"""
|
|
311
|
-
Directory for ephemeral files inside containers, internal use only.
|
|
312
|
-
"""
|
|
313
311
|
_mutate_create_options: Callable[[dict[str, Any]], dict[str, Any]] | None = None
|
|
314
312
|
"""
|
|
315
313
|
Function to handle mirrored deployment, internal use only.
|
|
@@ -386,48 +384,6 @@ class PodmanDeployer(Deployer):
|
|
|
386
384
|
|
|
387
385
|
return wrapper
|
|
388
386
|
|
|
389
|
-
@staticmethod
|
|
390
|
-
def _create_ephemeral_files(
|
|
391
|
-
workload: PodmanWorkloadPlan,
|
|
392
|
-
) -> dict[tuple[int, str], str]:
|
|
393
|
-
"""
|
|
394
|
-
Create ephemeral files as local file for the workload.
|
|
395
|
-
|
|
396
|
-
Returns:
|
|
397
|
-
A mapping from (container index, configured path) to actual filename.
|
|
398
|
-
|
|
399
|
-
Raises:
|
|
400
|
-
OperationError:
|
|
401
|
-
If the ephemeral files fail to create.
|
|
402
|
-
|
|
403
|
-
"""
|
|
404
|
-
# Map (container index, configured path) to actual filename.
|
|
405
|
-
ephemeral_filename_mapping: dict[tuple[int, str], str] = {}
|
|
406
|
-
ephemeral_files: list[tuple[str, str, int]] = []
|
|
407
|
-
for ci, c in enumerate(workload.containers):
|
|
408
|
-
for fi, f in enumerate(c.files or []):
|
|
409
|
-
if f.content is not None:
|
|
410
|
-
fn = f"{workload.name}-{ci}-{fi}"
|
|
411
|
-
ephemeral_filename_mapping[(ci, f.path)] = fn
|
|
412
|
-
ephemeral_files.append((fn, f.content, f.mode))
|
|
413
|
-
if not ephemeral_filename_mapping:
|
|
414
|
-
return ephemeral_filename_mapping
|
|
415
|
-
|
|
416
|
-
# Create ephemeral files directory if not exists.
|
|
417
|
-
try:
|
|
418
|
-
for fn, fc, fm in ephemeral_files:
|
|
419
|
-
fp = envs.GPUSTACK_RUNTIME_PODMAN_EPHEMERAL_FILES_DIR.joinpath(fn)
|
|
420
|
-
with fp.open("w", encoding="utf-8") as f:
|
|
421
|
-
f.write(fc)
|
|
422
|
-
f.flush()
|
|
423
|
-
fp.chmod(fm)
|
|
424
|
-
logger.debug("Created local file %s with mode %s", fp, oct(fm))
|
|
425
|
-
except OSError as e:
|
|
426
|
-
msg = "Failed to create ephemeral files"
|
|
427
|
-
raise OperationError(msg) from e
|
|
428
|
-
|
|
429
|
-
return ephemeral_filename_mapping
|
|
430
|
-
|
|
431
387
|
def _create_ephemeral_volumes(self, workload: PodmanWorkloadPlan) -> dict[str, str]:
|
|
432
388
|
"""
|
|
433
389
|
Create ephemeral volumes for the workload.
|
|
@@ -715,12 +671,10 @@ class PodmanDeployer(Deployer):
|
|
|
715
671
|
else:
|
|
716
672
|
return d_container
|
|
717
673
|
|
|
674
|
+
@staticmethod
|
|
718
675
|
def _append_container_mounts(
|
|
719
|
-
self,
|
|
720
676
|
create_options: dict[str, Any],
|
|
721
677
|
c: Container,
|
|
722
|
-
ci: int,
|
|
723
|
-
ephemeral_filename_mapping: dict[tuple[int, str] : str],
|
|
724
678
|
ephemeral_volume_name_mapping: dict[str, str],
|
|
725
679
|
):
|
|
726
680
|
"""
|
|
@@ -736,17 +690,7 @@ class PodmanDeployer(Deployer):
|
|
|
736
690
|
"target": "",
|
|
737
691
|
}
|
|
738
692
|
|
|
739
|
-
if f.content is
|
|
740
|
-
# Ephemeral file, use from local ephemeral files directory.
|
|
741
|
-
if (ci, f.path) not in ephemeral_filename_mapping:
|
|
742
|
-
continue
|
|
743
|
-
fn = ephemeral_filename_mapping[(ci, f.path)]
|
|
744
|
-
path = str(
|
|
745
|
-
self._container_ephemeral_files_dir.joinpath(fn),
|
|
746
|
-
)
|
|
747
|
-
binding["source"] = path
|
|
748
|
-
binding["target"] = f"/{f.path.lstrip('/')}"
|
|
749
|
-
elif f.path:
|
|
693
|
+
if f.content is None and f.path:
|
|
750
694
|
# Host file, bind directly.
|
|
751
695
|
binding["source"] = f.path
|
|
752
696
|
binding["target"] = f.path
|
|
@@ -858,10 +802,39 @@ class PodmanDeployer(Deployer):
|
|
|
858
802
|
|
|
859
803
|
return healthcheck
|
|
860
804
|
|
|
805
|
+
@staticmethod
|
|
806
|
+
def _upload_ephemeral_files(
|
|
807
|
+
c: Container,
|
|
808
|
+
container: podman.domain.containers.Container,
|
|
809
|
+
):
|
|
810
|
+
if not c.files:
|
|
811
|
+
return
|
|
812
|
+
|
|
813
|
+
f_tar = io.BytesIO()
|
|
814
|
+
with tarfile.open(fileobj=f_tar, mode="w") as tar:
|
|
815
|
+
for f in c.files:
|
|
816
|
+
if f.content is None or not f.path:
|
|
817
|
+
continue
|
|
818
|
+
fc_bytes = f.content.encode("utf-8")
|
|
819
|
+
info = tarfile.TarInfo(name=f.path.lstrip("/"))
|
|
820
|
+
info.size = len(fc_bytes)
|
|
821
|
+
info.mode = f.mode
|
|
822
|
+
tar.addfile(tarinfo=info, fileobj=io.BytesIO(fc_bytes))
|
|
823
|
+
if f_tar.getbuffer().nbytes == 0:
|
|
824
|
+
return
|
|
825
|
+
|
|
826
|
+
f_tar.seek(0)
|
|
827
|
+
uploaded = container.put_archive(
|
|
828
|
+
path="/",
|
|
829
|
+
data=f_tar.getvalue(),
|
|
830
|
+
)
|
|
831
|
+
if not uploaded:
|
|
832
|
+
msg = f"Failed to upload ephemeral files to container {container.name}"
|
|
833
|
+
raise OperationError(msg)
|
|
834
|
+
|
|
861
835
|
def _create_containers(
|
|
862
836
|
self,
|
|
863
837
|
workload: PodmanWorkloadPlan,
|
|
864
|
-
ephemeral_filename_mapping: dict[tuple[int, str] : str],
|
|
865
838
|
ephemeral_volume_name_mapping: dict[str, str],
|
|
866
839
|
pause_container: podman.domain.containers.Container,
|
|
867
840
|
) -> (
|
|
@@ -1077,8 +1050,6 @@ class PodmanDeployer(Deployer):
|
|
|
1077
1050
|
self._append_container_mounts(
|
|
1078
1051
|
create_options,
|
|
1079
1052
|
c,
|
|
1080
|
-
ci,
|
|
1081
|
-
ephemeral_filename_mapping,
|
|
1082
1053
|
ephemeral_volume_name_mapping,
|
|
1083
1054
|
)
|
|
1084
1055
|
|
|
@@ -1125,6 +1096,10 @@ class PodmanDeployer(Deployer):
|
|
|
1125
1096
|
detach=detach,
|
|
1126
1097
|
**create_options,
|
|
1127
1098
|
)
|
|
1099
|
+
|
|
1100
|
+
# Upload ephemeral files into the container.
|
|
1101
|
+
self._upload_ephemeral_files(c, d_container)
|
|
1102
|
+
|
|
1128
1103
|
except podman.errors.APIError as e:
|
|
1129
1104
|
msg = f"Failed to create container {container_name}{_detail_api_call_error(e)}"
|
|
1130
1105
|
raise OperationError(msg) from e
|
|
@@ -1174,9 +1149,6 @@ class PodmanDeployer(Deployer):
|
|
|
1174
1149
|
def __init__(self):
|
|
1175
1150
|
super().__init__(_NAME)
|
|
1176
1151
|
self._client = self._get_client()
|
|
1177
|
-
self._container_ephemeral_files_dir = (
|
|
1178
|
-
envs.GPUSTACK_RUNTIME_PODMAN_EPHEMERAL_FILES_DIR
|
|
1179
|
-
)
|
|
1180
1152
|
|
|
1181
1153
|
def _prepare_create(self):
|
|
1182
1154
|
"""
|
|
@@ -1370,25 +1342,6 @@ class PodmanDeployer(Deployer):
|
|
|
1370
1342
|
|
|
1371
1343
|
self._mutate_create_options = mutate_create_options
|
|
1372
1344
|
|
|
1373
|
-
# Extract ephemeral files dir mutation if any.
|
|
1374
|
-
if mirrored_mounts:
|
|
1375
|
-
e_target = str(envs.GPUSTACK_RUNTIME_PODMAN_EPHEMERAL_FILES_DIR)
|
|
1376
|
-
b_source = ""
|
|
1377
|
-
b_target = ""
|
|
1378
|
-
for m in mirrored_mounts:
|
|
1379
|
-
c_target = m.get("Destination", "///")
|
|
1380
|
-
if (
|
|
1381
|
-
e_target == c_target or e_target.startswith(f"{c_target}/")
|
|
1382
|
-
) and len(c_target) >= len(b_target):
|
|
1383
|
-
b_source = m.get("Source")
|
|
1384
|
-
b_target = c_target
|
|
1385
|
-
if b_source:
|
|
1386
|
-
result = Path(b_source)
|
|
1387
|
-
if e_target != b_target:
|
|
1388
|
-
b_subpath = e_target.removeprefix(b_target)
|
|
1389
|
-
result = result.joinpath(b_subpath.lstrip("/"))
|
|
1390
|
-
self._container_ephemeral_files_dir = result
|
|
1391
|
-
|
|
1392
1345
|
def _find_self_container(
|
|
1393
1346
|
self,
|
|
1394
1347
|
self_container_id: str,
|
|
@@ -1472,12 +1425,6 @@ class PodmanDeployer(Deployer):
|
|
|
1472
1425
|
if logger.isEnabledFor(logging.DEBUG):
|
|
1473
1426
|
logger.debug("Creating workload:\n%s", workload.to_yaml())
|
|
1474
1427
|
|
|
1475
|
-
# Create ephemeral file if needed,
|
|
1476
|
-
# (container index, configured path): <actual filename>
|
|
1477
|
-
ephemeral_filename_mapping: dict[tuple[int, str] : str] = (
|
|
1478
|
-
self._create_ephemeral_files(workload)
|
|
1479
|
-
)
|
|
1480
|
-
|
|
1481
1428
|
# Create ephemeral volumes if needed,
|
|
1482
1429
|
# <configured volume name>: <actual volume name>
|
|
1483
1430
|
ephemeral_volume_name_mapping: dict[str, str] = self._create_ephemeral_volumes(
|
|
@@ -1490,7 +1437,6 @@ class PodmanDeployer(Deployer):
|
|
|
1490
1437
|
# Create init/run containers.
|
|
1491
1438
|
init_containers, run_containers = self._create_containers(
|
|
1492
1439
|
workload,
|
|
1493
|
-
ephemeral_filename_mapping,
|
|
1494
1440
|
ephemeral_volume_name_mapping,
|
|
1495
1441
|
pause_container,
|
|
1496
1442
|
)
|
|
@@ -1631,17 +1577,6 @@ class PodmanDeployer(Deployer):
|
|
|
1631
1577
|
msg = f"Failed to delete volumes for workload {name}{_detail_api_call_error(e)}"
|
|
1632
1578
|
raise OperationError(msg) from e
|
|
1633
1579
|
|
|
1634
|
-
# Remove all ephemeral files for the workload.
|
|
1635
|
-
try:
|
|
1636
|
-
for fp in envs.GPUSTACK_RUNTIME_PODMAN_EPHEMERAL_FILES_DIR.glob(
|
|
1637
|
-
f"{name}-*",
|
|
1638
|
-
):
|
|
1639
|
-
if fp.is_file():
|
|
1640
|
-
fp.unlink(missing_ok=True)
|
|
1641
|
-
except OSError as e:
|
|
1642
|
-
msg = f"Failed to delete ephemeral files for workload {name}"
|
|
1643
|
-
raise OperationError(msg) from e
|
|
1644
|
-
|
|
1645
1580
|
return workload
|
|
1646
1581
|
|
|
1647
1582
|
@_supported
|
gpustack_runtime/envs.py
CHANGED
|
@@ -228,10 +228,6 @@ if TYPE_CHECKING:
|
|
|
228
228
|
"""
|
|
229
229
|
Container image used for unhealthy restart container in Docker.
|
|
230
230
|
"""
|
|
231
|
-
GPUSTACK_RUNTIME_DOCKER_EPHEMERAL_FILES_DIR: Path | None = None
|
|
232
|
-
"""
|
|
233
|
-
Directory for storing ephemeral files for Docker.
|
|
234
|
-
"""
|
|
235
231
|
GPUSTACK_RUNTIME_DOCKER_MUTE_ORIGINAL_HEALTHCHECK: bool = True
|
|
236
232
|
"""
|
|
237
233
|
Mute the original healthcheck of the container in Docker.
|
|
@@ -297,11 +293,6 @@ if TYPE_CHECKING:
|
|
|
297
293
|
Container image used for unhealthy restart container in Podman.
|
|
298
294
|
Default is same as `GPUSTACK_RUNTIME_DOCKER_UNHEALTHY_RESTART_IMAGE`.
|
|
299
295
|
"""
|
|
300
|
-
GPUSTACK_RUNTIME_PODMAN_EPHEMERAL_FILES_DIR: Path | None = None
|
|
301
|
-
"""
|
|
302
|
-
Directory for storing ephemeral files for Podman.
|
|
303
|
-
Default is same as `GPUSTACK_RUNTIME_DOCKER_EPHEMERAL_FILES_DIR`.
|
|
304
|
-
"""
|
|
305
296
|
GPUSTACK_RUNTIME_PODMAN_MUTE_ORIGINAL_HEALTHCHECK: bool = True
|
|
306
297
|
"""
|
|
307
298
|
Mute the original healthcheck of the container in Podman.
|
|
@@ -526,12 +517,6 @@ variables: dict[str, Callable[[], Any]] = {
|
|
|
526
517
|
"GPUSTACK_RUNTIME_DOCKER_UNHEALTHY_RESTART_IMAGE",
|
|
527
518
|
"gpustack/runtime:health",
|
|
528
519
|
),
|
|
529
|
-
"GPUSTACK_RUNTIME_DOCKER_EPHEMERAL_FILES_DIR": lambda: mkdir_path(
|
|
530
|
-
getenv(
|
|
531
|
-
"GPUSTACK_RUNTIME_DOCKER_EPHEMERAL_FILES_DIR",
|
|
532
|
-
expand_path("~/.cache/gpustack-runtime"),
|
|
533
|
-
),
|
|
534
|
-
),
|
|
535
520
|
"GPUSTACK_RUNTIME_DOCKER_MUTE_ORIGINAL_HEALTHCHECK": lambda: to_bool(
|
|
536
521
|
getenv("GPUSTACK_RUNTIME_DOCKER_MUTE_ORIGINAL_HEALTHCHECK", "1"),
|
|
537
522
|
),
|
|
@@ -622,16 +607,6 @@ variables: dict[str, Callable[[], Any]] = {
|
|
|
622
607
|
],
|
|
623
608
|
default="gpustack/runtime:health",
|
|
624
609
|
),
|
|
625
|
-
"GPUSTACK_RUNTIME_PODMAN_EPHEMERAL_FILES_DIR": lambda: mkdir_path(
|
|
626
|
-
getenvs(
|
|
627
|
-
keys=[
|
|
628
|
-
"GPUSTACK_RUNTIME_PODMAN_EPHEMERAL_FILES_DIR",
|
|
629
|
-
# Fallback to Docker's setting.
|
|
630
|
-
"GPUSTACK_RUNTIME_DOCKER_EPHEMERAL_FILES_DIR",
|
|
631
|
-
],
|
|
632
|
-
default=expand_path("~/.cache/gpustack-runtime"),
|
|
633
|
-
),
|
|
634
|
-
),
|
|
635
610
|
"GPUSTACK_RUNTIME_PODMAN_MUTE_ORIGINAL_HEALTHCHECK": lambda: to_bool(
|
|
636
611
|
getenvs(
|
|
637
612
|
keys=[
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: gpustack-runtime
|
|
3
|
-
Version: 0.1.39
|
|
3
|
+
Version: 0.1.39.post1
|
|
4
4
|
Summary: GPUStack Runtime is library for detecting GPU resources and launching GPU workloads.
|
|
5
5
|
Project-URL: Homepage, https://github.com/gpustack/runtime
|
|
6
6
|
Project-URL: Bug Tracker, https://github.com/gpustack/gpustack/issues
|
|
@@ -15,7 +15,7 @@ Classifier: Programming Language :: Python :: 3.13
|
|
|
15
15
|
Requires-Python: >=3.10
|
|
16
16
|
Requires-Dist: argcomplete>=3.6.3
|
|
17
17
|
Requires-Dist: docker>=7.1.0
|
|
18
|
-
Requires-Dist: gpustack-runner>=0.1.23.
|
|
18
|
+
Requires-Dist: gpustack-runner>=0.1.23.post2
|
|
19
19
|
Requires-Dist: kubernetes>=33.1.0
|
|
20
20
|
Requires-Dist: nvidia-ml-py>=13.580.65
|
|
21
21
|
Requires-Dist: podman==5.6.0
|
|
@@ -1,21 +1,21 @@
|
|
|
1
1
|
gpustack_runtime/__init__.py,sha256=kKcK6DMIXOdpWhgMS7xlsNrBNvEmY0L8eyQtBIC3CU4,160
|
|
2
|
-
gpustack_runtime/__main__.py,sha256=
|
|
3
|
-
gpustack_runtime/_version.py,sha256=
|
|
2
|
+
gpustack_runtime/__main__.py,sha256=ILEyXTfxn__jFvovxjxzRDIg7QJqQ2pQrP_2BCGQZRQ,3389
|
|
3
|
+
gpustack_runtime/_version.py,sha256=nC-EU-83isDNnCdVSshJtz4OPykhWYA4jFlaXf9xxAY,792
|
|
4
4
|
gpustack_runtime/_version.pyi,sha256=A42NoSgcqEXVy2OeNm4LXC9CbyonbooYrSUBlPm2lGY,156
|
|
5
|
-
gpustack_runtime/envs.py,sha256=
|
|
5
|
+
gpustack_runtime/envs.py,sha256=624z3eettjvXY3MW2KXFvPcWogp-gmjI5SoBnyfF9Vs,31852
|
|
6
6
|
gpustack_runtime/logging.py,sha256=h_fvD5FV7GHCo00IUDLQmkPR-H6r66IX_WSwZwl0mCw,6869
|
|
7
|
-
gpustack_runtime/cmds/__init__.py,sha256=
|
|
7
|
+
gpustack_runtime/cmds/__init__.py,sha256=9licqBPf2qLsGmv_cL6-SSUPVYCLavcRvryFfr_ZHUk,1010
|
|
8
8
|
gpustack_runtime/cmds/__types__.py,sha256=7C4kQM0EHPD8WpJpTo6kh9rEdkrYALcLQ-GAzMMsqV8,789
|
|
9
|
-
gpustack_runtime/cmds/deployer.py,sha256=
|
|
9
|
+
gpustack_runtime/cmds/deployer.py,sha256=GlQKEXlrC35M7jFmUGylVlfndIu3Y9FhYTo5SsDMjPo,23206
|
|
10
10
|
gpustack_runtime/cmds/detector.py,sha256=xTuPkl0Psi6T_n7RvwCHBeubX4AsjXAhc_emrMufY58,8724
|
|
11
11
|
gpustack_runtime/cmds/images.py,sha256=CjmD82JMdzftRiaeEYSKTGBXudb_qla41v-UzwZQtTo,514
|
|
12
12
|
gpustack_runtime/deployer/__init__.py,sha256=OGAr302rnH8ijmqUPxaF8MjyrDcCdw2BF0wnq_-GKXc,10346
|
|
13
13
|
gpustack_runtime/deployer/__patches__.py,sha256=4TCUdDzKBiOYmr5s0UkrEjBU0UjT7U36R0aQbSvrFRE,17823
|
|
14
14
|
gpustack_runtime/deployer/__types__.py,sha256=UWovjd3n1avWwNm7U2z3Ipftpa9_r7ABUKu0PE1sVy4,60531
|
|
15
15
|
gpustack_runtime/deployer/__utils__.py,sha256=aKoHSeodwEPuUW19bXZNNPG-5ACFn-o2EosmGxtkxS0,21054
|
|
16
|
-
gpustack_runtime/deployer/docker.py,sha256=
|
|
16
|
+
gpustack_runtime/deployer/docker.py,sha256=moxCOn_IkdQCiHxZKgOvGGOT3TbemJTq74vhgyBlHZY,77361
|
|
17
17
|
gpustack_runtime/deployer/kuberentes.py,sha256=BSlSwsp0CK0xoSdQf9U4EnbbOei8pIk1QQi3p2lzHz4,79614
|
|
18
|
-
gpustack_runtime/deployer/podman.py,sha256=
|
|
18
|
+
gpustack_runtime/deployer/podman.py,sha256=GUxMTnaQuErZK55wvhK2MbgFjkYLrXD8ThXuWcM06zI,74114
|
|
19
19
|
gpustack_runtime/detector/__init__.py,sha256=kLVxZ9iud2aLwqqAOanIYNb4QSHavUPhqay-FU5ndbU,5443
|
|
20
20
|
gpustack_runtime/detector/__types__.py,sha256=nVh9OV8tZdHFjHPlYul3u2zMvnpa9KOL5GxlPJhEvGk,13163
|
|
21
21
|
gpustack_runtime/detector/__utils__.py,sha256=Yj4GvGvuDuscDG5OpExYdv-1VhmEHBpRrBC9rTsB-kA,24584
|
|
@@ -41,9 +41,9 @@ gpustack_runtime/detector/pymxsml/mxsml_extension.py,sha256=zfWFRGa9eSup336t2lPh
|
|
|
41
41
|
gpustack_runtime/detector/pymxsml/mxsml_mcm.py,sha256=a4rX7hJNJKTqLodKU9rYeDaAEKef6UNVThl1w-aiFsA,15363
|
|
42
42
|
gpustack_runtime/detector/pyrocmcore/__init__.py,sha256=8XxKmwLX4-uoP7cfxjGoEmk9qlrGf0804pgyb74mJKs,2108
|
|
43
43
|
gpustack_runtime/detector/pyrocmsmi/__init__.py,sha256=OmvfJepAtEnDKBMLrktMXlRbJEZttMDQ2R4ztD8kLGg,11806
|
|
44
|
-
gpustack_runtime/_version_appendix.py,sha256=
|
|
45
|
-
gpustack_runtime-0.1.39.dist-info/METADATA,sha256=
|
|
46
|
-
gpustack_runtime-0.1.39.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
|
|
47
|
-
gpustack_runtime-0.1.39.dist-info/entry_points.txt,sha256=bBO_61GxP6dIT74uZwbSDgW5Vt2pTePUS3CgjUJkUgg,68
|
|
48
|
-
gpustack_runtime-0.1.39.dist-info/licenses/LICENSE,sha256=OiPibowBvB-NHV3TP_NOj18XNBlXcshXZFMpa3uvKVE,10362
|
|
49
|
-
gpustack_runtime-0.1.39.dist-info/RECORD,,
|
|
44
|
+
gpustack_runtime/_version_appendix.py,sha256=8YQIRWVgq2vdpyqBhE6QHm4WYQetOtYCK_sPA5OF-E0,23
|
|
45
|
+
gpustack_runtime-0.1.39.post1.dist-info/METADATA,sha256=E4bWmJgRHV2dKcb87lEDMQYapW5xoFeFwmtmMnPPIyU,2131
|
|
46
|
+
gpustack_runtime-0.1.39.post1.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
|
|
47
|
+
gpustack_runtime-0.1.39.post1.dist-info/entry_points.txt,sha256=bBO_61GxP6dIT74uZwbSDgW5Vt2pTePUS3CgjUJkUgg,68
|
|
48
|
+
gpustack_runtime-0.1.39.post1.dist-info/licenses/LICENSE,sha256=OiPibowBvB-NHV3TP_NOj18XNBlXcshXZFMpa3uvKVE,10362
|
|
49
|
+
gpustack_runtime-0.1.39.post1.dist-info/RECORD,,
|
|
File without changes
|
{gpustack_runtime-0.1.39.dist-info → gpustack_runtime-0.1.39.post1.dist-info}/entry_points.txt
RENAMED
|
File without changes
|
{gpustack_runtime-0.1.39.dist-info → gpustack_runtime-0.1.39.post1.dist-info}/licenses/LICENSE
RENAMED
|
File without changes
|