dstack 0.19.29__py3-none-any.whl → 0.19.30__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of dstack might be problematic. Click here for more details.

@@ -22,7 +22,7 @@ from dstack._internal.cli.commands.server import ServerCommand
22
22
  from dstack._internal.cli.commands.stats import StatsCommand
23
23
  from dstack._internal.cli.commands.stop import StopCommand
24
24
  from dstack._internal.cli.commands.volume import VolumeCommand
25
- from dstack._internal.cli.utils.common import _colors, console
25
+ from dstack._internal.cli.utils.common import _colors, configure_logging, console
26
26
  from dstack._internal.cli.utils.updates import check_for_updates
27
27
  from dstack._internal.core.errors import ClientError, CLIError, ConfigurationError, SSHError
28
28
  from dstack._internal.core.services.ssh.client import get_ssh_client_info
@@ -39,6 +39,8 @@ def main():
39
39
  RichHelpFormatter.styles["argparse.groups"] = "bold grey74"
40
40
  RichHelpFormatter.styles["argparse.text"] = "grey74"
41
41
 
42
+ configure_logging()
43
+
42
44
  parser = argparse.ArgumentParser(
43
45
  description=(
44
46
  "Not sure where to start?"
@@ -159,12 +159,19 @@ class FleetConfigurator(ApplyEnvVarsConfiguratorMixin, BaseApplyConfigurator[Fle
159
159
  console.print(
160
160
  get_fleets_table(
161
161
  [fleet],
162
- verbose=_failed_provisioning(fleet),
162
+ verbose=_fleet_has_failed_instances(fleet),
163
163
  format_date=local_time,
164
164
  )
165
165
  )
166
- if _failed_provisioning(fleet):
167
- console.print("\n[error]Some instances failed. Check the table above for errors.[/]")
166
+ if _fleet_has_failed_instances(fleet):
167
+ if _fleet_retrying(fleet):
168
+ console.print(
169
+ "\n[error]Some instances failed. Provisioning will be retried in the background.[/]"
170
+ )
171
+ else:
172
+ console.print(
173
+ "\n[error]Some instances failed. Check the table above for errors.[/]"
174
+ )
168
175
  exit(1)
169
176
 
170
177
  def _apply_plan_on_old_server(self, plan: FleetPlan, command_args: argparse.Namespace):
@@ -253,11 +260,11 @@ class FleetConfigurator(ApplyEnvVarsConfiguratorMixin, BaseApplyConfigurator[Fle
253
260
  console.print(
254
261
  get_fleets_table(
255
262
  [fleet],
256
- verbose=_failed_provisioning(fleet),
263
+ verbose=_fleet_has_failed_instances(fleet),
257
264
  format_date=local_time,
258
265
  )
259
266
  )
260
- if _failed_provisioning(fleet):
267
+ if _fleet_has_failed_instances(fleet):
261
268
  console.print("\n[error]Some instances failed. Check the table above for errors.[/]")
262
269
  exit(1)
263
270
 
@@ -462,13 +469,20 @@ def _finished_provisioning(fleet: Fleet) -> bool:
462
469
  return True
463
470
 
464
471
 
465
- def _failed_provisioning(fleet: Fleet) -> bool:
472
+ def _fleet_has_failed_instances(fleet: Fleet) -> bool:
466
473
  for instance in fleet.instances:
467
474
  if instance.status == InstanceStatus.TERMINATED:
468
475
  return True
469
476
  return False
470
477
 
471
478
 
479
+ def _fleet_retrying(fleet: Fleet) -> bool:
480
+ if fleet.spec.configuration.nodes is None:
481
+ return False
482
+ active_instances = [i for i in fleet.instances if i.status.is_active()]
483
+ return len(active_instances) < fleet.spec.configuration.nodes.min
484
+
485
+
472
486
  def _apply_plan(api: Client, plan: FleetPlan) -> Fleet:
473
487
  try:
474
488
  return api.client.fleets.apply_plan(
@@ -9,7 +9,7 @@ from dstack._internal.core.models.runs import Requirements, RunSpec, get_policy_
9
9
  from dstack._internal.server.schemas.gpus import GpuGroup
10
10
 
11
11
 
12
- def print_gpu_json(gpu_response, run_spec, group_by_cli, api_project):
12
+ def print_gpu_json(gpus, run_spec, group_by_cli, api_project):
13
13
  """Print GPU information in JSON format."""
14
14
  req = Requirements(
15
15
  resources=run_spec.configuration.resources,
@@ -36,7 +36,7 @@ def print_gpu_json(gpu_response, run_spec, group_by_cli, api_project):
36
36
  "gpus": [],
37
37
  }
38
38
 
39
- for gpu_group in gpu_response.gpus:
39
+ for gpu_group in gpus:
40
40
  gpu_data = {
41
41
  "name": gpu_group.name,
42
42
  "memory_mib": gpu_group.memory_mib,
@@ -291,7 +291,11 @@ class AWSCompute(
291
291
  image_id, username = self._get_image_id_and_username(
292
292
  ec2_client=ec2_client,
293
293
  region=instance_offer.region,
294
- cuda=len(instance_offer.instance.resources.gpus) > 0,
294
+ gpu_name=(
295
+ instance_offer.instance.resources.gpus[0].name
296
+ if len(instance_offer.instance.resources.gpus) > 0
297
+ else None
298
+ ),
295
299
  instance_type=instance_offer.instance.name,
296
300
  image_config=self.config.os_images,
297
301
  )
@@ -897,11 +901,13 @@ class AWSCompute(
897
901
  self,
898
902
  ec2_client: botocore.client.BaseClient,
899
903
  region: str,
900
- cuda: bool,
904
+ gpu_name: Optional[str],
901
905
  instance_type: str,
902
906
  image_config: Optional[AWSOSImageConfig] = None,
903
907
  ) -> tuple:
904
- return hashkey(region, cuda, instance_type, image_config.json() if image_config else None)
908
+ return hashkey(
909
+ region, gpu_name, instance_type, image_config.json() if image_config else None
910
+ )
905
911
 
906
912
  @cachedmethod(
907
913
  cache=lambda self: self._get_image_id_and_username_cache,
@@ -912,13 +918,13 @@ class AWSCompute(
912
918
  self,
913
919
  ec2_client: botocore.client.BaseClient,
914
920
  region: str,
915
- cuda: bool,
921
+ gpu_name: Optional[str],
916
922
  instance_type: str,
917
923
  image_config: Optional[AWSOSImageConfig] = None,
918
924
  ) -> tuple[str, str]:
919
925
  return aws_resources.get_image_id_and_username(
920
926
  ec2_client=ec2_client,
921
- cuda=cuda,
927
+ gpu_name=gpu_name,
922
928
  instance_type=instance_type,
923
929
  image_config=image_config,
924
930
  )
@@ -6,6 +6,8 @@ import botocore.exceptions
6
6
 
7
7
  import dstack.version as version
8
8
  from dstack._internal.core.backends.aws.models import AWSOSImageConfig
9
+ from dstack._internal.core.backends.base.compute import requires_nvidia_proprietary_kernel_modules
10
+ from dstack._internal.core.consts import DSTACK_OS_IMAGE_WITH_PROPRIETARY_NVIDIA_KERNEL_MODULES
9
11
  from dstack._internal.core.errors import BackendError, ComputeError, ComputeResourceNotFoundError
10
12
  from dstack._internal.utils.logging import get_logger
11
13
 
@@ -17,14 +19,14 @@ DLAMI_OWNER_ACCOUNT_ID = "898082745236"
17
19
 
18
20
  def get_image_id_and_username(
19
21
  ec2_client: botocore.client.BaseClient,
20
- cuda: bool,
22
+ gpu_name: Optional[str],
21
23
  instance_type: str,
22
24
  image_config: Optional[AWSOSImageConfig] = None,
23
25
  ) -> tuple[str, str]:
24
26
  if image_config is not None:
25
- image = image_config.nvidia if cuda else image_config.cpu
27
+ image = image_config.nvidia if gpu_name else image_config.cpu
26
28
  if image is None:
27
- logger.warning("%s image not configured", "nvidia" if cuda else "cpu")
29
+ logger.warning("%s image not configured", "nvidia" if gpu_name else "cpu")
28
30
  raise ComputeResourceNotFoundError()
29
31
  image_name = image.name
30
32
  image_owner = image.owner
@@ -35,9 +37,12 @@ def get_image_id_and_username(
35
37
  image_owner = DLAMI_OWNER_ACCOUNT_ID
36
38
  username = "ubuntu"
37
39
  else:
38
- image_name = (
39
- f"dstack-{version.base_image}" if not cuda else f"dstack-cuda-{version.base_image}"
40
- )
40
+ if gpu_name is None:
41
+ image_name = f"dstack-{version.base_image}"
42
+ elif not requires_nvidia_proprietary_kernel_modules(gpu_name):
43
+ image_name = f"dstack-cuda-{version.base_image}"
44
+ else:
45
+ image_name = f"dstack-cuda-{DSTACK_OS_IMAGE_WITH_PROPRIETARY_NVIDIA_KERNEL_MODULES}"
41
46
  image_owner = DSTACK_ACCOUNT_ID
42
47
  username = "ubuntu"
43
48
  response = ec2_client.describe_images(
@@ -48,8 +48,10 @@ from dstack._internal.core.backends.base.compute import (
48
48
  get_gateway_user_data,
49
49
  get_user_data,
50
50
  merge_tags,
51
+ requires_nvidia_proprietary_kernel_modules,
51
52
  )
52
53
  from dstack._internal.core.backends.base.offers import get_catalog_offers, get_offers_disk_modifier
54
+ from dstack._internal.core.consts import DSTACK_OS_IMAGE_WITH_PROPRIETARY_NVIDIA_KERNEL_MODULES
53
55
  from dstack._internal.core.errors import ComputeError, NoCapacityError
54
56
  from dstack._internal.core.models.backends.base import BackendType
55
57
  from dstack._internal.core.models.gateways import (
@@ -372,6 +374,7 @@ def _parse_config_vpc_id(vpc_id: str) -> Tuple[str, str]:
372
374
  class VMImageVariant(enum.Enum):
373
375
  GRID = enum.auto()
374
376
  CUDA = enum.auto()
377
+ CUDA_WITH_PROPRIETARY_KERNEL_MODULES = enum.auto()
375
378
  STANDARD = enum.auto()
376
379
 
377
380
  @classmethod
@@ -379,18 +382,24 @@ class VMImageVariant(enum.Enum):
379
382
  if "_A10_v5" in instance.name:
380
383
  return cls.GRID
381
384
  elif len(instance.resources.gpus) > 0:
382
- return cls.CUDA
385
+ if not requires_nvidia_proprietary_kernel_modules(instance.resources.gpus[0].name):
386
+ return cls.CUDA
387
+ else:
388
+ return cls.CUDA_WITH_PROPRIETARY_KERNEL_MODULES
383
389
  else:
384
390
  return cls.STANDARD
385
391
 
386
392
  def get_image_name(self) -> str:
387
- name = "dstack-"
388
393
  if self is self.GRID:
389
- name += "grid-"
394
+ return f"dstack-grid-{version.base_image}"
390
395
  elif self is self.CUDA:
391
- name += "cuda-"
392
- name += version.base_image
393
- return name
396
+ return f"dstack-cuda-{version.base_image}"
397
+ elif self is self.CUDA_WITH_PROPRIETARY_KERNEL_MODULES:
398
+ return f"dstack-cuda-{DSTACK_OS_IMAGE_WITH_PROPRIETARY_NVIDIA_KERNEL_MODULES}"
399
+ elif self is self.STANDARD:
400
+ return f"dstack-{version.base_image}"
401
+ else:
402
+ raise ValueError(f"Unexpected image variant {self!r}")
394
403
 
395
404
 
396
405
  _SUPPORTED_VM_SERIES_PATTERNS = [
@@ -48,6 +48,22 @@ logger = get_logger(__name__)
48
48
  DSTACK_SHIM_BINARY_NAME = "dstack-shim"
49
49
  DSTACK_RUNNER_BINARY_NAME = "dstack-runner"
50
50
  DEFAULT_PRIVATE_SUBNETS = ("10.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16")
51
+ NVIDIA_GPUS_REQUIRING_PROPRIETARY_KERNEL_MODULES = frozenset(
52
+ # All NVIDIA architectures prior to Turing do not support Open Kernel Modules and require
53
+ # proprietary modules. This list is incomplete, update when necessary.
54
+ [
55
+ "v100",
56
+ "p100",
57
+ "p40",
58
+ "p4",
59
+ "m60",
60
+ "m40",
61
+ "m4",
62
+ "k80",
63
+ "k40",
64
+ "k20",
65
+ ]
66
+ )
51
67
 
52
68
  GoArchType = Literal["amd64", "arm64"]
53
69
 
@@ -969,3 +985,12 @@ def merge_tags(
969
985
  for k, v in resource_tags.items():
970
986
  res.setdefault(k, v)
971
987
  return res
988
+
989
+
990
+ def requires_nvidia_proprietary_kernel_modules(gpu_name: str) -> bool:
991
+ """
992
+ Returns:
993
+ Whether this NVIDIA GPU requires NVIDIA proprietary kernel modules
994
+ instead of open kernel modules.
995
+ """
996
+ return gpu_name.lower() in NVIDIA_GPUS_REQUIRING_PROPRIETARY_KERNEL_MODULES
@@ -31,6 +31,7 @@ from dstack._internal.core.backends.base.compute import (
31
31
  get_shim_commands,
32
32
  get_user_data,
33
33
  merge_tags,
34
+ requires_nvidia_proprietary_kernel_modules,
34
35
  )
35
36
  from dstack._internal.core.backends.base.offers import (
36
37
  get_catalog_offers,
@@ -38,6 +39,7 @@ from dstack._internal.core.backends.base.offers import (
38
39
  )
39
40
  from dstack._internal.core.backends.gcp.features import tcpx as tcpx_features
40
41
  from dstack._internal.core.backends.gcp.models import GCPConfig
42
+ from dstack._internal.core.consts import DSTACK_OS_IMAGE_WITH_PROPRIETARY_NVIDIA_KERNEL_MODULES
41
43
  from dstack._internal.core.errors import (
42
44
  ComputeError,
43
45
  ComputeResourceNotFoundError,
@@ -293,7 +295,11 @@ class GCPCompute(
293
295
 
294
296
  image = _get_image(
295
297
  instance_type_name=instance_offer.instance.name,
296
- cuda=len(instance_offer.instance.resources.gpus) > 0,
298
+ gpu_name=(
299
+ instance_offer.instance.resources.gpus[0].name
300
+ if len(instance_offer.instance.resources.gpus) > 0
301
+ else None
302
+ ),
297
303
  )
298
304
 
299
305
  for zone in zones:
@@ -904,7 +910,7 @@ class GCPImage:
904
910
  is_ufw_installed: bool
905
911
 
906
912
 
907
- def _get_image(instance_type_name: str, cuda: bool) -> GCPImage:
913
+ def _get_image(instance_type_name: str, gpu_name: Optional[str]) -> GCPImage:
908
914
  if instance_type_name == "a3-megagpu-8g":
909
915
  image_name = "dstack-a3mega-5"
910
916
  is_ufw_installed = False
@@ -913,8 +919,11 @@ def _get_image(instance_type_name: str, cuda: bool) -> GCPImage:
913
919
  id="projects/cos-cloud/global/images/cos-105-17412-535-78",
914
920
  is_ufw_installed=False,
915
921
  )
916
- elif cuda:
917
- image_name = f"dstack-cuda-{version.base_image}"
922
+ elif gpu_name is not None:
923
+ if not requires_nvidia_proprietary_kernel_modules(gpu_name):
924
+ image_name = f"dstack-cuda-{version.base_image}"
925
+ else:
926
+ image_name = f"dstack-cuda-{DSTACK_OS_IMAGE_WITH_PROPRIETARY_NVIDIA_KERNEL_MODULES}"
918
927
  is_ufw_installed = True
919
928
  else:
920
929
  image_name = f"dstack-{version.base_image}"
@@ -118,7 +118,11 @@ class OCICompute(
118
118
  availability_domain = instance_offer.availability_zones[0]
119
119
 
120
120
  listing, package = resources.get_marketplace_listing_and_package(
121
- cuda=len(instance_offer.instance.resources.gpus) > 0,
121
+ gpu_name=(
122
+ instance_offer.instance.resources.gpus[0].name
123
+ if len(instance_offer.instance.resources.gpus) > 0
124
+ else None
125
+ ),
122
126
  client=region.marketplace_client,
123
127
  )
124
128
  resources.accept_marketplace_listing_agreements(
@@ -23,7 +23,9 @@ import oci
23
23
  from oci.object_storage.models import CreatePreauthenticatedRequestDetails
24
24
 
25
25
  from dstack import version
26
+ from dstack._internal.core.backends.base.compute import requires_nvidia_proprietary_kernel_modules
26
27
  from dstack._internal.core.backends.oci.region import OCIRegionClient
28
+ from dstack._internal.core.consts import DSTACK_OS_IMAGE_WITH_PROPRIETARY_NVIDIA_KERNEL_MODULES
27
29
  from dstack._internal.core.errors import BackendError
28
30
  from dstack._internal.core.models.instances import InstanceOffer
29
31
  from dstack._internal.utils.common import batched
@@ -352,11 +354,14 @@ def terminate_instance_if_exists(client: oci.core.ComputeClient, instance_id: st
352
354
 
353
355
 
354
356
  def get_marketplace_listing_and_package(
355
- cuda: bool, client: oci.marketplace.MarketplaceClient
357
+ gpu_name: Optional[str], client: oci.marketplace.MarketplaceClient
356
358
  ) -> Tuple[oci.marketplace.models.Listing, oci.marketplace.models.ImageListingPackage]:
357
359
  listing_name = f"dstack-{version.base_image}"
358
- if cuda:
359
- listing_name = f"dstack-cuda-{version.base_image}"
360
+ if gpu_name is not None:
361
+ if not requires_nvidia_proprietary_kernel_modules(gpu_name):
362
+ listing_name = f"dstack-cuda-{version.base_image}"
363
+ else:
364
+ listing_name = f"dstack-cuda-{DSTACK_OS_IMAGE_WITH_PROPRIETARY_NVIDIA_KERNEL_MODULES}"
360
365
 
361
366
  listing_summaries = list_marketplace_listings(listing_name, client)
362
367
  if len(listing_summaries) != 1:
@@ -4,3 +4,5 @@ DSTACK_SHIM_HTTP_PORT = 10998
4
4
  DSTACK_RUNNER_HTTP_PORT = 10999
5
5
  # ssh server (runs alongside the runner inside a container) listen port
6
6
  DSTACK_RUNNER_SSH_PORT = 10022
7
+ # legacy AWS, Azure, GCP, and OCI image for older GPUs
8
+ DSTACK_OS_IMAGE_WITH_PROPRIETARY_NVIDIA_KERNEL_MODULES = "0.10"
@@ -36,24 +36,59 @@ def get_repo_creds_and_default_branch(
36
36
 
37
37
  # no auth
38
38
  with suppress(InvalidRepoCredentialsError):
39
- return _get_repo_creds_and_default_branch_https(url)
39
+ creds, default_branch = _get_repo_creds_and_default_branch_https(url)
40
+ logger.debug(
41
+ "Git repo %s is public. Using no auth. Default branch: %s", repo_url, default_branch
42
+ )
43
+ return creds, default_branch
40
44
 
41
45
  # ssh key provided by the user or pulled from the server
42
46
  if identity_file is not None or private_key is not None:
43
47
  if identity_file is not None:
44
48
  private_key = _read_private_key(identity_file)
45
- return _get_repo_creds_and_default_branch_ssh(url, identity_file, private_key)
49
+ creds, default_branch = _get_repo_creds_and_default_branch_ssh(
50
+ url, identity_file, private_key
51
+ )
52
+ logger.debug(
53
+ "Git repo %s is private. Using identity file: %s. Default branch: %s",
54
+ repo_url,
55
+ identity_file,
56
+ default_branch,
57
+ )
58
+ return creds, default_branch
46
59
  elif private_key is not None:
47
60
  with NamedTemporaryFile("w+", 0o600) as f:
48
61
  f.write(private_key)
49
62
  f.flush()
50
- return _get_repo_creds_and_default_branch_ssh(url, f.name, private_key)
63
+ creds, default_branch = _get_repo_creds_and_default_branch_ssh(
64
+ url, f.name, private_key
65
+ )
66
+ masked_key = "***" + private_key[-10:] if len(private_key) > 10 else "***MASKED***"
67
+ logger.debug(
68
+ "Git repo %s is private. Using private key: %s. Default branch: %s",
69
+ repo_url,
70
+ masked_key,
71
+ default_branch,
72
+ )
73
+ return creds, default_branch
51
74
  else:
52
75
  assert False, "should not reach here"
53
76
 
54
77
  # oauth token provided by the user or pulled from the server
55
78
  if oauth_token is not None:
56
- return _get_repo_creds_and_default_branch_https(url, oauth_token)
79
+ creds, default_branch = _get_repo_creds_and_default_branch_https(url, oauth_token)
80
+ masked_token = (
81
+ len(oauth_token[:-4]) * "*" + oauth_token[-4:]
82
+ if len(oauth_token) > 4
83
+ else "***MASKED***"
84
+ )
85
+ logger.debug(
86
+ "Git repo %s is private. Using provided OAuth token: %s. Default branch: %s",
87
+ repo_url,
88
+ masked_token,
89
+ default_branch,
90
+ )
91
+ return creds, default_branch
57
92
 
58
93
  # key from ssh config
59
94
  identities = get_host_config(url.original_host).get("identityfile")
@@ -61,7 +96,16 @@ def get_repo_creds_and_default_branch(
61
96
  _identity_file = identities[0]
62
97
  with suppress(InvalidRepoCredentialsError):
63
98
  _private_key = _read_private_key(_identity_file)
64
- return _get_repo_creds_and_default_branch_ssh(url, _identity_file, _private_key)
99
+ creds, default_branch = _get_repo_creds_and_default_branch_ssh(
100
+ url, _identity_file, _private_key
101
+ )
102
+ logger.debug(
103
+ "Git repo %s is private. Using SSH config identity file: %s. Default branch: %s",
104
+ repo_url,
105
+ _identity_file,
106
+ default_branch,
107
+ )
108
+ return creds, default_branch
65
109
 
66
110
  # token from gh config
67
111
  if os.path.exists(gh_config_path):
@@ -70,13 +114,35 @@ def get_repo_creds_and_default_branch(
70
114
  _oauth_token = gh_hosts.get(url.host, {}).get("oauth_token")
71
115
  if _oauth_token is not None:
72
116
  with suppress(InvalidRepoCredentialsError):
73
- return _get_repo_creds_and_default_branch_https(url, _oauth_token)
117
+ creds, default_branch = _get_repo_creds_and_default_branch_https(url, _oauth_token)
118
+ masked_token = (
119
+ len(_oauth_token[:-4]) * "*" + _oauth_token[-4:]
120
+ if len(_oauth_token) > 4
121
+ else "***MASKED***"
122
+ )
123
+ logger.debug(
124
+ "Git repo %s is private. Using GitHub config token: %s from %s. Default branch: %s",
125
+ repo_url,
126
+ masked_token,
127
+ gh_config_path,
128
+ default_branch,
129
+ )
130
+ return creds, default_branch
74
131
 
75
132
  # default user key
76
133
  if os.path.exists(default_ssh_key):
77
134
  with suppress(InvalidRepoCredentialsError):
78
135
  _private_key = _read_private_key(default_ssh_key)
79
- return _get_repo_creds_and_default_branch_ssh(url, default_ssh_key, _private_key)
136
+ creds, default_branch = _get_repo_creds_and_default_branch_ssh(
137
+ url, default_ssh_key, _private_key
138
+ )
139
+ logger.debug(
140
+ "Git repo %s is private. Using default identity file: %s. Default branch: %s",
141
+ repo_url,
142
+ default_ssh_key,
143
+ default_branch,
144
+ )
145
+ return creds, default_branch
80
146
 
81
147
  raise InvalidRepoCredentialsError(
82
148
  "No valid default Git credentials found. Pass valid `--token` or `--git-identity`."
@@ -87,8 +153,9 @@ def _get_repo_creds_and_default_branch_ssh(
87
153
  url: GitRepoURL, identity_file: PathLike, private_key: str
88
154
  ) -> tuple[RemoteRepoCreds, Optional[str]]:
89
155
  _url = url.as_ssh()
156
+ env = _make_git_env_for_creds_check(identity_file=identity_file)
90
157
  try:
91
- default_branch = _get_repo_default_branch(_url, make_git_env(identity_file=identity_file))
158
+ default_branch = _get_repo_default_branch(_url, env)
92
159
  except GitCommandError as e:
93
160
  message = f"Cannot access `{_url}` using the `{identity_file}` private SSH key"
94
161
  raise InvalidRepoCredentialsError(message) from e
@@ -104,8 +171,9 @@ def _get_repo_creds_and_default_branch_https(
104
171
  url: GitRepoURL, oauth_token: Optional[str] = None
105
172
  ) -> tuple[RemoteRepoCreds, Optional[str]]:
106
173
  _url = url.as_https()
174
+ env = _make_git_env_for_creds_check()
107
175
  try:
108
- default_branch = _get_repo_default_branch(url.as_https(oauth_token), make_git_env())
176
+ default_branch = _get_repo_default_branch(url.as_https(oauth_token), env)
109
177
  except GitCommandError as e:
110
178
  message = f"Cannot access `{_url}`"
111
179
  if oauth_token is not None:
@@ -120,9 +188,32 @@ def _get_repo_creds_and_default_branch_https(
120
188
  return creds, default_branch
121
189
 
122
190
 
191
+ def _make_git_env_for_creds_check(identity_file: Optional[PathLike] = None) -> dict[str, str]:
192
+ # Our goal is to check if _provided_ creds (if any) are correct, so we need to be sure that
193
+ # only the provided creds are used, without falling back to any additional mechanisms.
194
+ # To do this, we:
195
+ # 1. Disable all configs to ignore any stored creds
196
+ # 2. Disable askpass to avoid asking for creds interactively or fetching stored creds from
197
+ # a non-interactive askpass helper (for example, VS Code sets GIT_ASKPASS to its own helper,
198
+ # which silently provides creds to Git).
199
+ return make_git_env(disable_config=True, disable_askpass=True, identity_file=identity_file)
200
+
201
+
123
202
  def _get_repo_default_branch(url: str, env: dict[str, str]) -> Optional[str]:
203
+ # Git shipped by Apple with XCode is patched to support an additional config scope
204
+ # above "system" called "xcode". There is no option in `git config list` to show this config,
205
+ # but you can list the merged config (`git config list` without options) and then exclude
206
+ # all settings listed in `git config list --{system,global,local,worktree}`.
207
+ # As of time of writing, there are only two settings in the "xcode" config, one of which breaks
208
+ # our "is repo public?" check, namely "credential.helper=osxkeychain".
209
+ # As there is no way to disable "xcode" config (no env variable, no CLI option, etc.),
210
+ # the only way to disable credential helper is to override this specific setting with an empty
211
+ # string via command line argument: `git -c credential.helper= COMMAND [ARGS ...]`.
212
+ # See: https://github.com/git/git/commit/3d4355712b9fe77a96ad4ad877d92dc7ff6e0874
213
+ # See: https://gist.github.com/ChrisTollefson/ab9c0a5d1dd4dd615217345c6936a307
214
+ _git = git.cmd.Git()(c="credential.helper=")
124
215
  # output example: "ref: refs/heads/dev\tHEAD\n545344f77c0df78367085952a97fc3a058eb4c65\tHEAD"
125
- output: str = git.cmd.Git().ls_remote("--symref", url, "HEAD", env=env)
216
+ output: str = _git.ls_remote("--symref", url, "HEAD", env=env)
126
217
  for line in output.splitlines():
127
218
  # line format: `<oid> TAB <ref> LF`
128
219
  oid, _, ref = line.partition("\t")
@@ -1139,7 +1139,7 @@ def _patch_base_image_for_aws_efa(
1139
1139
  efa_enabled_patterns = [
1140
1140
  # TODO: p6-b200 isn't supported yet in gpuhunt
1141
1141
  r"^p6-b200\.(48xlarge)$",
1142
- r"^p5\.(48xlarge)$",
1142
+ r"^p5\.(4xlarge|48xlarge)$",
1143
1143
  r"^p5e\.(48xlarge)$",
1144
1144
  r"^p5en\.(48xlarge)$",
1145
1145
  r"^p4d\.(24xlarge)$",
@@ -3,7 +3,7 @@ import itertools
3
3
  import math
4
4
  import uuid
5
5
  from datetime import datetime, timedelta
6
- from typing import List, Optional, Tuple
6
+ from typing import List, Optional
7
7
 
8
8
  from sqlalchemy import and_, func, not_, or_, select
9
9
  from sqlalchemy.ext.asyncio import AsyncSession
@@ -25,6 +25,7 @@ from dstack._internal.core.models.instances import InstanceOfferWithAvailability
25
25
  from dstack._internal.core.models.profiles import (
26
26
  DEFAULT_RUN_TERMINATION_IDLE_TIME,
27
27
  CreationPolicy,
28
+ Profile,
28
29
  TerminationPolicy,
29
30
  )
30
31
  from dstack._internal.core.models.resources import Memory
@@ -34,6 +35,7 @@ from dstack._internal.core.models.runs import (
34
35
  JobRuntimeData,
35
36
  JobStatus,
36
37
  JobTerminationReason,
38
+ Requirements,
37
39
  Run,
38
40
  RunSpec,
39
41
  )
@@ -186,7 +188,7 @@ async def _process_submitted_job(session: AsyncSession, job_model: JobModel):
186
188
  project = run_model.project
187
189
  run = run_model_to_run(run_model)
188
190
  run_spec = run.run_spec
189
- profile = run_spec.merged_profile
191
+ run_profile = run_spec.merged_profile
190
192
  job = find_job(run.jobs, job_model.replica_num, job_model.job_num)
191
193
  multinode = job.job_spec.jobs_per_replica > 1
192
194
 
@@ -333,7 +335,7 @@ async def _process_submitted_job(session: AsyncSession, job_model: JobModel):
333
335
  job_model.status = JobStatus.PROVISIONING
334
336
  else:
335
337
  # Assigned no instance, create a new one
336
- if profile.creation_policy == CreationPolicy.REUSE:
338
+ if run_profile.creation_policy == CreationPolicy.REUSE:
337
339
  logger.debug("%s: reuse instance failed", fmt(job_model))
338
340
  job_model.status = JobStatus.TERMINATING
339
341
  job_model.termination_reason = JobTerminationReason.FAILED_TO_START_DUE_TO_NO_CAPACITY
@@ -362,7 +364,7 @@ async def _process_submitted_job(session: AsyncSession, job_model: JobModel):
362
364
  return
363
365
 
364
366
  logger.info("%s: now is provisioning a new instance", fmt(job_model))
365
- job_provisioning_data, offer = run_job_result
367
+ job_provisioning_data, offer, effective_profile, _ = run_job_result
366
368
  job_model.job_provisioning_data = job_provisioning_data.json()
367
369
  job_model.status = JobStatus.PROVISIONING
368
370
  if fleet_model is None:
@@ -382,12 +384,11 @@ async def _process_submitted_job(session: AsyncSession, job_model: JobModel):
382
384
  instance = _create_instance_model_for_job(
383
385
  project=project,
384
386
  fleet_model=fleet_model,
385
- run_spec=run_spec,
386
387
  job_model=job_model,
387
- job=job,
388
388
  job_provisioning_data=job_provisioning_data,
389
389
  offer=offer,
390
390
  instance_num=instance_num,
391
+ profile=effective_profile,
391
392
  )
392
393
  job_model.job_runtime_data = _prepare_job_runtime_data(offer, multinode).json()
393
394
  # Both this task and process_fleets can add instances to fleets.
@@ -546,23 +547,22 @@ async def _find_optimal_fleet_with_offers(
546
547
  fleet_cheapest_pool_offer = fleet_instances_with_pool_offers[0][1].price
547
548
 
548
549
  candidate_fleet = fleet_model_to_fleet(candidate_fleet_model)
549
- profile = combine_fleet_and_run_profiles(
550
- candidate_fleet.spec.merged_profile, run_spec.merged_profile
551
- )
552
- fleet_requirements = get_fleet_requirements(candidate_fleet.spec)
553
- requirements = combine_fleet_and_run_requirements(
554
- fleet_requirements, job.job_spec.requirements
555
- )
556
- multinode = (
557
- candidate_fleet.spec.configuration.placement == InstanceGroupPlacement.CLUSTER
558
- or job.job_spec.jobs_per_replica > 1
559
- )
550
+ profile = None
551
+ requirements = None
552
+ try:
553
+ profile, requirements = _get_run_profile_and_requirements_in_fleet(
554
+ job=job,
555
+ run_spec=run_spec,
556
+ fleet=candidate_fleet,
557
+ )
558
+ except ValueError:
559
+ pass
560
560
  fleet_backend_offers = []
561
- if (
562
- _check_can_create_new_instance_in_fleet(candidate_fleet)
563
- and profile is not None
564
- and requirements is not None
565
- ):
561
+ if profile is not None and requirements is not None:
562
+ multinode = (
563
+ candidate_fleet.spec.configuration.placement == InstanceGroupPlacement.CLUSTER
564
+ or job.job_spec.jobs_per_replica > 1
565
+ )
566
566
  fleet_backend_offers = await get_offers_by_requirements(
567
567
  project=project,
568
568
  profile=profile,
@@ -704,7 +704,7 @@ async def _run_job_on_new_instance(
704
704
  master_job_provisioning_data: Optional[JobProvisioningData] = None,
705
705
  volumes: Optional[List[List[Volume]]] = None,
706
706
  fleet_model: Optional[FleetModel] = None,
707
- ) -> Optional[Tuple[JobProvisioningData, InstanceOfferWithAvailability]]:
707
+ ) -> Optional[tuple[JobProvisioningData, InstanceOfferWithAvailability, Profile, Requirements]]:
708
708
  if volumes is None:
709
709
  volumes = []
710
710
  profile = run.run_spec.merged_profile
@@ -712,21 +712,14 @@ async def _run_job_on_new_instance(
712
712
  fleet = None
713
713
  if fleet_model is not None:
714
714
  fleet = fleet_model_to_fleet(fleet_model)
715
- if not _check_can_create_new_instance_in_fleet(fleet):
716
- logger.debug(
717
- "%s: cannot fit new instance into fleet %s", fmt(job_model), fleet_model.name
718
- )
719
- return None
720
- profile = combine_fleet_and_run_profiles(fleet.spec.merged_profile, profile)
721
- if profile is None:
722
- logger.debug("%s: cannot combine fleet %s profile", fmt(job_model), fleet_model.name)
723
- return None
724
- fleet_requirements = get_fleet_requirements(fleet.spec)
725
- requirements = combine_fleet_and_run_requirements(fleet_requirements, requirements)
726
- if requirements is None:
727
- logger.debug(
728
- "%s: cannot combine fleet %s requirements", fmt(job_model), fleet_model.name
715
+ try:
716
+ profile, requirements = _get_run_profile_and_requirements_in_fleet(
717
+ job=job,
718
+ run_spec=run.run_spec,
719
+ fleet=fleet,
729
720
  )
721
+ except ValueError as e:
722
+ logger.debug("%s: %s", fmt(job_model), e.args[0])
730
723
  return None
731
724
  # TODO: Respect fleet provisioning properties such as tags
732
725
 
@@ -766,7 +759,7 @@ async def _run_job_on_new_instance(
766
759
  project_ssh_private_key,
767
760
  offer_volumes,
768
761
  )
769
- return job_provisioning_data, offer
762
+ return job_provisioning_data, offer, profile, requirements
770
763
  except BackendError as e:
771
764
  logger.warning(
772
765
  "%s: %s launch in %s/%s failed: %s",
@@ -789,6 +782,25 @@ async def _run_job_on_new_instance(
789
782
  return None
790
783
 
791
784
 
785
+ def _get_run_profile_and_requirements_in_fleet(
786
+ job: Job,
787
+ run_spec: RunSpec,
788
+ fleet: Fleet,
789
+ ) -> tuple[Profile, Requirements]:
790
+ if not _check_can_create_new_instance_in_fleet(fleet):
791
+ raise ValueError("Cannot fit new instance into fleet")
792
+ profile = combine_fleet_and_run_profiles(fleet.spec.merged_profile, run_spec.merged_profile)
793
+ if profile is None:
794
+ raise ValueError("Cannot combine fleet profile")
795
+ fleet_requirements = get_fleet_requirements(fleet.spec)
796
+ requirements = combine_fleet_and_run_requirements(
797
+ fleet_requirements, job.job_spec.requirements
798
+ )
799
+ if requirements is None:
800
+ raise ValueError("Cannot combine fleet requirements")
801
+ return profile, requirements
802
+
803
+
792
804
  def _check_can_create_new_instance_in_fleet(fleet: Fleet) -> bool:
793
805
  if fleet.spec.configuration.ssh_config is not None:
794
806
  return False
@@ -857,14 +869,12 @@ async def _get_next_instance_num(session: AsyncSession, fleet_model: FleetModel)
857
869
  def _create_instance_model_for_job(
858
870
  project: ProjectModel,
859
871
  fleet_model: FleetModel,
860
- run_spec: RunSpec,
861
872
  job_model: JobModel,
862
- job: Job,
863
873
  job_provisioning_data: JobProvisioningData,
864
874
  offer: InstanceOfferWithAvailability,
865
875
  instance_num: int,
876
+ profile: Profile,
866
877
  ) -> InstanceModel:
867
- profile = run_spec.merged_profile
868
878
  if not job_provisioning_data.dockerized:
869
879
  # terminate vastai/k8s instances immediately
870
880
  termination_policy = TerminationPolicy.DESTROY_AFTER_IDLE
@@ -50,8 +50,28 @@ def make_ssh_command_for_git(identity_file: PathLike) -> str:
50
50
  )
51
51
 
52
52
 
53
- def make_git_env(*, identity_file: Optional[PathLike] = None) -> dict[str, str]:
54
- env: dict[str, str] = {"GIT_TERMINAL_PROMPT": "0"}
53
+ def make_git_env(
54
+ *,
55
+ disable_prompt: bool = True,
56
+ disable_askpass: bool = False,
57
+ disable_config: bool = False,
58
+ identity_file: Optional[PathLike] = None,
59
+ ) -> dict[str, str]:
60
+ env: dict[str, str] = {}
61
+ if disable_prompt:
62
+ # Fail with error instead of prompting on the terminal (e.g., when asking for
63
+ # HTTP authentication)
64
+ env["GIT_TERMINAL_PROMPT"] = "0"
65
+ if disable_askpass:
66
+ env["GIT_ASKPASS"] = ""
67
+ env["SSH_ASKPASS"] = ""
68
+ if disable_config:
69
+ # Disable system-wide config (usually /etc/gitconfig)
70
+ env["GIT_CONFIG_SYSTEM"] = os.devnull
71
+ # Disable user (aka "global") config ($XDG_CONFIG_HOME/git/config or ~/.git/config)
72
+ env["GIT_CONFIG_GLOBAL"] = os.devnull
73
+ # Disable repo (aka "local") config (./.git/config)
74
+ env["GIT_DIR"] = os.devnull
55
75
  if identity_file is not None:
56
76
  env["GIT_SSH_COMMAND"] = make_ssh_command_for_git(identity_file)
57
77
  return env
dstack/version.py CHANGED
@@ -1,4 +1,4 @@
1
- __version__ = "0.19.29"
1
+ __version__ = "0.19.30"
2
2
  __is_release__ = True
3
- base_image = "0.10"
3
+ base_image = "0.11rc2"
4
4
  base_image_ubuntu_version = "22.04"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: dstack
3
- Version: 0.19.29
3
+ Version: 0.19.30
4
4
  Summary: dstack is an open-source orchestration engine for running AI workloads on any cloud or on-premises.
5
5
  Project-URL: Homepage, https://dstack.ai
6
6
  Project-URL: Source, https://github.com/dstackai/dstack
@@ -331,24 +331,26 @@ Description-Content-Type: text/markdown
331
331
 
332
332
  </div>
333
333
 
334
- `dstack` is an open-source container orchestrator that simplifies workload orchestration and drives GPU utilization for ML teams. It works with any GPU cloud, on-prem cluster, or accelerated hardware.
334
+ `dstack` provides a unified control plane for running development, training, and inference on GPUs across cloud VMs, Kubernetes, or on-prem clusters. It helps your team avoid vendor lock-in and reduce GPU costs.
335
335
 
336
336
  #### Accelerators
337
337
 
338
338
  `dstack` supports `NVIDIA`, `AMD`, `Google TPU`, `Intel Gaudi`, and `Tenstorrent` accelerators out of the box.
339
339
 
340
340
  ## Latest news ✨
341
+ - [2025/09] [dstack 0.19.27: Offers UI, Digital Ocean and AMD Developer Cloud](https://github.com/dstackai/dstack/releases/tag/0.19.27)
342
+ - [2025/08] [dstack 0.19.26: Repos – explicit repo configuration via YAML](https://github.com/dstackai/dstack/releases/tag/0.19.26)
343
+ - [2025/08] [dstack 0.19.25: `dstack offer` CLI command](https://github.com/dstackai/dstack/releases/tag/0.19.25)
344
+ - [2025/08] [dstack 0.19.22: Service probes, GPU health-checks, Tenstorrent Galaxy, Secrets UI](https://github.com/dstackai/dstack/releases/tag/0.19.22)
345
+ - [2025/07] [dstack 0.19.21: Scheduled tasks](https://github.com/dstackai/dstack/releases/tag/0.19.21)
341
346
  - [2025/07] [dstack 0.19.17: Secrets, Files, Rolling deployment](https://github.com/dstackai/dstack/releases/tag/0.19.17)
342
347
  - [2025/06] [dstack 0.19.16: Docker in Docker, CloudRift](https://github.com/dstackai/dstack/releases/tag/0.19.16)
343
348
  - [2025/06] [dstack 0.19.13: InfiniBand support in default images](https://github.com/dstackai/dstack/releases/tag/0.19.13)
344
349
  - [2025/06] [dstack 0.19.12: Simplified use of MPI](https://github.com/dstackai/dstack/releases/tag/0.19.12)
345
- - [2025/05] [dstack 0.19.10: Priorities](https://github.com/dstackai/dstack/releases/tag/0.19.10)
346
- - [2025/05] [dstack 0.19.8: Nebius clusters, GH200 on Lambda](https://github.com/dstackai/dstack/releases/tag/0.19.8)
347
- - [2025/04] [dstack 0.19.6: Tenstorrent, Plugins](https://github.com/dstackai/dstack/releases/tag/0.19.6)
348
350
 
349
351
  ## How does it work?
350
352
 
351
- <img src="https://dstack.ai/static-assets/static-assets/images/dstack-architecture-diagram-v10.svg" width="750" />
353
+ <img src="https://dstack.ai/static-assets/static-assets/images/dstack-architecture-diagram-v11.svg" width="750" />
352
354
 
353
355
  ### Installation
354
356
 
@@ -1,10 +1,10 @@
1
1
  dstack/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
- dstack/version.py,sha256=YFaOhGPwo6LX5fJekfVb7R4HaBPGERtQmy7ytf9Rmq0,102
2
+ dstack/version.py,sha256=2A1fxFQ9n9Kcprebn6q6fi-3iIYieg8EN2ej5aCbbYs,105
3
3
  dstack/_internal/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
4
  dstack/_internal/compat.py,sha256=bF9U9fTMfL8UVhCouedoUSTYFl7UAOiU0WXrnRoByxw,40
5
5
  dstack/_internal/settings.py,sha256=FYtd7tRk17Oc62Kl_3O8NuT5JHb8TKhLThl1TsfjjVs,1390
6
6
  dstack/_internal/cli/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
7
- dstack/_internal/cli/main.py,sha256=AZ4TB025U5wpQq3qH5sk9KkOYZCpxMIKqfaLH_Mv0DU,3876
7
+ dstack/_internal/cli/main.py,sha256=0QScKrDlNf0rHXVhSiprAKOMJxJbmlc6QKTRCZDpb0U,3920
8
8
  dstack/_internal/cli/commands/__init__.py,sha256=aZdSeBeL38BYl_kjkdTHNbsQZiFbpDAHxas01be9CG8,2269
9
9
  dstack/_internal/cli/commands/apply.py,sha256=py0ysrUSo8EbwzpHRS3d_vbO3ALxpDFQmscieSoKkjE,3725
10
10
  dstack/_internal/cli/commands/attach.py,sha256=75hojBqYRXj_65eQNvjypuyDy1G1HmajzOHSLe3pOIk,5148
@@ -31,7 +31,7 @@ dstack/_internal/cli/services/profile.py,sha256=TiNMP_0Xbx9cD22zc8dTHHNIsZeIpavW
31
31
  dstack/_internal/cli/services/repos.py,sha256=wvb3nHI9yJ0fb-dWf2yihl6zdG1ZRVpKiC0CUz9qL7Y,2100
32
32
  dstack/_internal/cli/services/configurators/__init__.py,sha256=AwbKV85_XH0V8IS8v2Zpgl8vnvVCLMLbMuPyvsZv3mQ,2971
33
33
  dstack/_internal/cli/services/configurators/base.py,sha256=9mabsRsKHY7xFl8yDRM__FOEQvtEYOn_4KeXKzO6-vA,3244
34
- dstack/_internal/cli/services/configurators/fleet.py,sha256=pPfQmXBdqXHy8QAxGm9yY3Y3vKis-vaKirLrCO84zQA,18827
34
+ dstack/_internal/cli/services/configurators/fleet.py,sha256=xchLFqHkm0zVs3tEVMlYXd4lGjP_OWzdp3nzqNg7b8k,19374
35
35
  dstack/_internal/cli/services/configurators/gateway.py,sha256=B2aEoVhhRZGk25OekR7BT4eAvAgZGAyVOw7DlSuo2jg,8755
36
36
  dstack/_internal/cli/services/configurators/run.py,sha256=lD14geWv8WtImrVDa5w9bXmA8JK8xIcTUawBTbbu6ZE,39543
37
37
  dstack/_internal/cli/services/configurators/volume.py,sha256=ZeESHttKFERBFysJvSGFC4UXUOLghKlzcilXbL48s-c,8346
@@ -39,14 +39,14 @@ dstack/_internal/cli/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJ
39
39
  dstack/_internal/cli/utils/common.py,sha256=xgq2Rdx6wGS1NLx8FlmkjKrGaNYD428oeBB4VMjvJ5c,3447
40
40
  dstack/_internal/cli/utils/fleet.py,sha256=qgyZnhKSsdx4nWVnzcU30vhwV7dtrwn8C0yNKMQ1H4g,4044
41
41
  dstack/_internal/cli/utils/gateway.py,sha256=qMYa1NTAT_O98x2_mSyWDRbiHj5fqt6xUXFh9NIUwAM,1502
42
- dstack/_internal/cli/utils/gpu.py,sha256=5LrOFo2bWSNZcyf_vCxhInT1AFYZLbPUvOqJgWP45vM,7688
42
+ dstack/_internal/cli/utils/gpu.py,sha256=sgkSZbK50i4MdVAP44b0Xg1QX631YBDOxi_8PCOQI_U,7667
43
43
  dstack/_internal/cli/utils/rich.py,sha256=Gx1MJU929kMKsbdo9qF7XHARNta2426Ssb-xMLVhwbQ,5710
44
44
  dstack/_internal/cli/utils/run.py,sha256=-hZI0q09qnTeUFJF5zkJ4v3NLS2y2mCDrYHjplZs8RA,11789
45
45
  dstack/_internal/cli/utils/secrets.py,sha256=c7an9e8HwB24oiP1q225G1dhWcvCKw4aZtN0rN38_PE,654
46
46
  dstack/_internal/cli/utils/updates.py,sha256=pGr5keEmHojnbY0ayjWL4GWTz4ParY6fP85tHpJ11pI,3043
47
47
  dstack/_internal/cli/utils/volume.py,sha256=mU9I06dVMFbpjfkefxrZNoSWadKLoib3U14rHudNQN4,1975
48
48
  dstack/_internal/core/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
49
- dstack/_internal/core/consts.py,sha256=c1Yd5UY6Qx7KeuYgloXWncWhMsYj6TqwlElda7NtB98,254
49
+ dstack/_internal/core/consts.py,sha256=gN6AKBhvR-o6unEH66Kg3Sehqd06TO7uxunSvThvqzY,373
50
50
  dstack/_internal/core/errors.py,sha256=VTEmJ6C0zWL3fUL8ObRjB4rNL7_NxwEuWf0hZs6orWo,3377
51
51
  dstack/_internal/core/backends/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
52
52
  dstack/_internal/core/backends/configurators.py,sha256=Fy1FpiS_2aV8IWT1nkCb2CUI7fDVVRK-O2YFcHSpkTo,4573
@@ -59,21 +59,21 @@ dstack/_internal/core/backends/amddevcloud/configurator.py,sha256=WF4e-Yh2jMuTKQ
59
59
  dstack/_internal/core/backends/aws/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
60
60
  dstack/_internal/core/backends/aws/auth.py,sha256=BAe3HAedoAO42Fbv7YhpODKWlyil8M0PuRJsL-_u_kw,978
61
61
  dstack/_internal/core/backends/aws/backend.py,sha256=pjROH-S9pgrSMm-Eox_ocL7cTU6mIMRxvURq7Vi-2J8,876
62
- dstack/_internal/core/backends/aws/compute.py,sha256=mxwNXg1sNjF1bGNxt42DfXj66bbEi-Yh92lKS6EMxkg,45224
62
+ dstack/_internal/core/backends/aws/compute.py,sha256=wmeFCzgAfTZznpUOPlxAy4eGYhgVVixbqt2_q4RmQfU,45444
63
63
  dstack/_internal/core/backends/aws/configurator.py,sha256=X4CS-DnvsbdYCEVo6HBKLmH0YQ-zh5RucOjyhEcRca8,7634
64
64
  dstack/_internal/core/backends/aws/models.py,sha256=EUCHXHmZnshe3rwI9UtjilwcUMu1Z7MO4Y4-nlZ_IcA,4404
65
- dstack/_internal/core/backends/aws/resources.py,sha256=ccvdQd2H1DuUupdzMtqf86nkrenYPBJb1k3VgHu5Gp4,23610
65
+ dstack/_internal/core/backends/aws/resources.py,sha256=vwfy2lz9E0RVl3Lhs8dsuWMhhJOjv1NDckqSNucs7f0,24025
66
66
  dstack/_internal/core/backends/azure/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
67
67
  dstack/_internal/core/backends/azure/auth.py,sha256=CHp3QRNx3tIwkhyOY_l8WU-ElY4Pxhavoy9JSdwr15g,1259
68
68
  dstack/_internal/core/backends/azure/backend.py,sha256=XySTor8v_tLkZctDBryrFHrbVzQgNf_RUPkXQYplvwM,746
69
- dstack/_internal/core/backends/azure/compute.py,sha256=cbR0nSQfSIiR1x2J35So9xJbDkSjfYTEcYGQ-_1pceo,24695
69
+ dstack/_internal/core/backends/azure/compute.py,sha256=tuMYTAti3RU58ueBnfvznfc66k55aniNNzWBsXmFEz4,25372
70
70
  dstack/_internal/core/backends/azure/configurator.py,sha256=tKovBgMvyKmCogZ2OMxOJ-Sh_TCBqYXBvbBM-kgyeBI,18367
71
71
  dstack/_internal/core/backends/azure/models.py,sha256=ap0WsYBSFdDCxY0G3W1sg2DBN2k0xknz6bp8Ypf6phs,3356
72
72
  dstack/_internal/core/backends/azure/resources.py,sha256=cJWS2Yjxdg-XtjpdBxfVDZaX_gTbuoEpmdjTxdA9uIw,3431
73
73
  dstack/_internal/core/backends/azure/utils.py,sha256=taHMJq6UHRzUXLUcO2P5VCKy3wJaye2bG-6QdkEPNdY,1741
74
74
  dstack/_internal/core/backends/base/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
75
75
  dstack/_internal/core/backends/base/backend.py,sha256=hdFMHED1RMV9GVfLSU0yGhGE-cXlbLvt1coDA885PMM,505
76
- dstack/_internal/core/backends/base/compute.py,sha256=bK4whPes7mCQ698Fu1XtgOUGOpiBhx9EnhTI9KkG_R0,33666
76
+ dstack/_internal/core/backends/base/compute.py,sha256=XsyNrbTvHb8_DKDnM--mTiLXqOQW0pIdn3Ug-afRcP4,34358
77
77
  dstack/_internal/core/backends/base/configurator.py,sha256=TKG5iJlOXxVT2agE_ruMW8JGt2dtwXY7ghmJN8bEWlw,4299
78
78
  dstack/_internal/core/backends/base/models.py,sha256=Ij0osOl-T-ABsKLoVg2eY81DMkwdWkevAnjXj2QnLXI,532
79
79
  dstack/_internal/core/backends/base/offers.py,sha256=1688KlzBWgQWJFAEQMtqOud4Z8CwpvufNolX1zm1UTI,7487
@@ -109,7 +109,7 @@ dstack/_internal/core/backends/dstack/models.py,sha256=Jbe0Syez43N5i97TlIc8l-5Vn
109
109
  dstack/_internal/core/backends/gcp/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
110
110
  dstack/_internal/core/backends/gcp/auth.py,sha256=VWCn34hapaZ3E0OHCoiAVL97o3s64RF9w85CZ6xdtmk,2235
111
111
  dstack/_internal/core/backends/gcp/backend.py,sha256=OvTv1c7j4LTPCIEtkwD3-q6Eo1QET8xlM8CzbY0hLmI,572
112
- dstack/_internal/core/backends/gcp/compute.py,sha256=LycsCO9gCI1WEhsjUNnn-A4PrjciSU28BDsG3BEGNb4,43239
112
+ dstack/_internal/core/backends/gcp/compute.py,sha256=0OByUucYMMzhLTvohvsVRV98fB8SOXmkBoPYG-NN0IU,43724
113
113
  dstack/_internal/core/backends/gcp/configurator.py,sha256=eVPr6pD4g66ej5qxhFd94lb_R11CIBA-py9PQS9EOGE,6865
114
114
  dstack/_internal/core/backends/gcp/models.py,sha256=biLA3rlFcoPatAZpKycuIl-8PdnNSAFiDCJjov65_zo,4612
115
115
  dstack/_internal/core/backends/gcp/resources.py,sha256=2jpg24NH4Ov4J4Of1F3DPkwEWkRxEi-x4ZTx8XSNGIQ,16926
@@ -146,12 +146,12 @@ dstack/_internal/core/backends/nebius/resources.py,sha256=ttgwdqokvXF8BH_IDPFZxW
146
146
  dstack/_internal/core/backends/oci/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
147
147
  dstack/_internal/core/backends/oci/auth.py,sha256=8Cr18y_LOsyRP-16yfFpT70Cofpm0clB3KawS_7aRl4,717
148
148
  dstack/_internal/core/backends/oci/backend.py,sha256=yXjVCt7n6BVLH0byYFbNFf-P9J0FwlNfxsYbKGMdoI4,536
149
- dstack/_internal/core/backends/oci/compute.py,sha256=jB3A6s4Hxh-LYrqQR8wJYqYCxLt31cbeMxrEkBxRfys,7605
149
+ dstack/_internal/core/backends/oci/compute.py,sha256=njttE6pwrSq5itqUmpzfKjl1NFQ4Qgr5SAOMO4gEu_c,7733
150
150
  dstack/_internal/core/backends/oci/configurator.py,sha256=_mi093BzV8Xdfw81wkiqoY5d5x4CCCtni7J01CZeDCQ,5768
151
151
  dstack/_internal/core/backends/oci/exceptions.py,sha256=GxlUf7itcAQgY3m-U2ul_ZXzfUOcEcPEmisWFaoWSkA,288
152
152
  dstack/_internal/core/backends/oci/models.py,sha256=fDlY7-A0WDQeiwJzwrQl6D5ENOc-677Bz8LXyvBvF0c,2969
153
153
  dstack/_internal/core/backends/oci/region.py,sha256=a41dmlv-7jh2vP2z-0wnf0Yzdh2Donjm9aXuMgFXXKo,2935
154
- dstack/_internal/core/backends/oci/resources.py,sha256=VfTDuVTEO8MUvD3i2DFoe2du1qP4MSQb1yEaDgHbUp8,28982
154
+ dstack/_internal/core/backends/oci/resources.py,sha256=-47u2vWvRnpNQmgz3A12vu1hSIQ-dqawDl5ndyF0nwY,29392
155
155
  dstack/_internal/core/backends/runpod/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
156
156
  dstack/_internal/core/backends/runpod/api_client.py,sha256=Oet7CwQTu3sE57unqh-JvuMCBDEDPruAbGB4GyM0dM4,13427
157
157
  dstack/_internal/core/backends/runpod/backend.py,sha256=GcLbySsGY29ZGHXSqTXCxUUoTK9pxVScKly0_fkNr8c,566
@@ -219,7 +219,7 @@ dstack/_internal/core/services/api_client.py,sha256=HTQ0fcZciUh-nmfV09hUt__Z4N2z
219
219
  dstack/_internal/core/services/diff.py,sha256=xtJzlSLIw9pnECpMQTy-a2zcfnSd4Q9qi_Y0W00D65A,2073
220
220
  dstack/_internal/core/services/logs.py,sha256=7_eJdH4MD-3rVb4A6rIJfjj_p4jzUOCmjRVlPD-UDsg,2166
221
221
  dstack/_internal/core/services/profiles.py,sha256=ZFY6VILW6urm6l3u6xpORJ2z48eGJ2jcmRgjVrxr2tw,1612
222
- dstack/_internal/core/services/repos.py,sha256=Cq-t9RvdmEKjQIgQA6giQirqopdrPT-WzUoCoKLt0ws,6251
222
+ dstack/_internal/core/services/repos.py,sha256=Lw-4MXFBaE_BogRgtYra1H3KPkRGeVWoIfU8hTyJ7GM,10552
223
223
  dstack/_internal/core/services/configs/__init__.py,sha256=jMAHvZphEgZJldEiQhkaNSW8lVrVAxuYbu9RtLysUGU,5645
224
224
  dstack/_internal/core/services/ssh/__init__.py,sha256=UhgC3Lv3CPSGqSPEQZIKOfLKUlCFnaB0uqPQhfKCFt0,878
225
225
  dstack/_internal/core/services/ssh/attach.py,sha256=6YZ_HaimKrjdLzcDLksGi0_9rlVyLeBgPmmWHC7dYxo,9096
@@ -297,9 +297,9 @@ dstack/_internal/server/background/tasks/process_metrics.py,sha256=yKXe9J7m3oleK
297
297
  dstack/_internal/server/background/tasks/process_placement_groups.py,sha256=lgYIzjHG9EITK31yG6uQjlIcSwW5jsP9ZOBBZqW_eNs,4263
298
298
  dstack/_internal/server/background/tasks/process_probes.py,sha256=dmug-_rmYiVLLF-imto-Ju1gPtENvHvCjHyilqgYuJw,6457
299
299
  dstack/_internal/server/background/tasks/process_prometheus_metrics.py,sha256=_UZm37FVV4rhdd0So7HtcKbIgrSdAr5Vx-Uen_xizec,5459
300
- dstack/_internal/server/background/tasks/process_running_jobs.py,sha256=ulDrWgwnhosbK0Mtt0yNS4E38QSKXSh6ermNt-3jUTs,44965
300
+ dstack/_internal/server/background/tasks/process_running_jobs.py,sha256=IoQi7mm4upEZgujTkWYrXDKrC5rSZ5Q4_jAR4OpajaM,44973
301
301
  dstack/_internal/server/background/tasks/process_runs.py,sha256=Cx7Z1B7pZVlvCl-OsIaAiIMFG_aZDdn3nlZeha6k2x4,25041
302
- dstack/_internal/server/background/tasks/process_submitted_jobs.py,sha256=vcu1JRJ0-p_VB7HZfYkVf9d_yclazrcW1U7zHQltjcA,40935
302
+ dstack/_internal/server/background/tasks/process_submitted_jobs.py,sha256=XxPapMdCsuA_H_X27SIwIZFd0Y5jzwvIABnhqa-kwyQ,41098
303
303
  dstack/_internal/server/background/tasks/process_terminating_jobs.py,sha256=S7ZSDVMX-N0XMaMgwFa1QG_RAi48BP432s9AqHw4PMM,4066
304
304
  dstack/_internal/server/background/tasks/process_volumes.py,sha256=_fMmkwLYsyX-kpW9pDrZVJvFTZEOPp0gpjyKBMW-zw0,5204
305
305
  dstack/_internal/server/migrations/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -628,7 +628,7 @@ dstack/_internal/utils/nested_list.py,sha256=ba4fmMuaz807Bf6KRve-9_8zm4dxlNMQiFb
628
628
  dstack/_internal/utils/network.py,sha256=vvIQulBapL6Zw50jsit7sNNbGJVKTzooqWhkVWqaAzQ,1488
629
629
  dstack/_internal/utils/path.py,sha256=FSKOBLDZdT64KJ-lqpRMoo6YzqR2jwCgdEmcodFQz-E,1342
630
630
  dstack/_internal/utils/random_names.py,sha256=lVyzAmXhaSf2bAuvRFQ_ihLy42in1ydN4gJYsMw4GG8,3537
631
- dstack/_internal/utils/ssh.py,sha256=FxOA85-NTYEGhnW_PL3-uVYGZbVZfyvcA8uvD7BcN2Q,12131
631
+ dstack/_internal/utils/ssh.py,sha256=SE10JKbnUyocoAhMs0dsLIUDOQmUtQGArZEUAFIXBPQ,12852
632
632
  dstack/_internal/utils/tags.py,sha256=Gx4aZM85E_UUCGuFZbCEFxHB5DNgrtJP15_1tFFEd1c,1292
633
633
  dstack/_internal/utils/typing.py,sha256=tHrhtkr3G6jkk-_FZyobdjLYvP5_YIgXbKsN-13dfvg,386
634
634
  dstack/api/__init__.py,sha256=7qbG42qEHMyNDFRCz9FgfuimAKws3hV8eMXs6oRgh5c,1607
@@ -662,8 +662,8 @@ dstack/plugins/builtin/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3
662
662
  dstack/plugins/builtin/rest_plugin/__init__.py,sha256=lgTsq8Z6Km2F2UhPRChVB4vDM5ZpWtdk1iB1aa20ypA,440
663
663
  dstack/plugins/builtin/rest_plugin/_models.py,sha256=9hgVuU6OGSxidar88XhQnNo9izYWeQvVH45ciErv-Es,1910
664
664
  dstack/plugins/builtin/rest_plugin/_plugin.py,sha256=h3r3Yc3h22i93fifPTgTm9Oojd1sN1O4DP7ZTV-kWpM,5386
665
- dstack-0.19.29.dist-info/METADATA,sha256=HcciEQov78Oe3HcLD7vquMFJHimJXPOx_AANTEW33gM,20586
666
- dstack-0.19.29.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
667
- dstack-0.19.29.dist-info/entry_points.txt,sha256=GnLrMS8hx3rWAySQjA7tPNhtixV6a-brRkmal1PKoHc,58
668
- dstack-0.19.29.dist-info/licenses/LICENSE.md,sha256=qDABaRGjSKVOib1U8viw2P_96sIK7Puo426784oD9f8,15976
669
- dstack-0.19.29.dist-info/RECORD,,
665
+ dstack-0.19.30.dist-info/METADATA,sha256=1Eek1ucesAzIWosGAWsdivcQu0_mV0FKW00DVi6R4nk,20919
666
+ dstack-0.19.30.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
667
+ dstack-0.19.30.dist-info/entry_points.txt,sha256=GnLrMS8hx3rWAySQjA7tPNhtixV6a-brRkmal1PKoHc,58
668
+ dstack-0.19.30.dist-info/licenses/LICENSE.md,sha256=qDABaRGjSKVOib1U8viw2P_96sIK7Puo426784oD9f8,15976
669
+ dstack-0.19.30.dist-info/RECORD,,