dstack 0.19.27__py3-none-any.whl → 0.19.29__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of dstack might be problematic. Click here for more details.
- dstack/_internal/cli/commands/__init__.py +11 -8
- dstack/_internal/cli/commands/apply.py +6 -3
- dstack/_internal/cli/commands/completion.py +3 -1
- dstack/_internal/cli/commands/config.py +1 -0
- dstack/_internal/cli/commands/init.py +2 -2
- dstack/_internal/cli/commands/offer.py +1 -1
- dstack/_internal/cli/commands/project.py +1 -0
- dstack/_internal/cli/commands/server.py +2 -2
- dstack/_internal/cli/main.py +1 -1
- dstack/_internal/cli/services/configurators/base.py +2 -4
- dstack/_internal/cli/services/configurators/fleet.py +4 -5
- dstack/_internal/cli/services/configurators/gateway.py +3 -5
- dstack/_internal/cli/services/configurators/run.py +51 -27
- dstack/_internal/cli/services/configurators/volume.py +3 -5
- dstack/_internal/core/backends/aws/compute.py +51 -36
- dstack/_internal/core/backends/azure/compute.py +10 -7
- dstack/_internal/core/backends/base/compute.py +96 -14
- dstack/_internal/core/backends/base/offers.py +34 -4
- dstack/_internal/core/backends/cloudrift/compute.py +5 -7
- dstack/_internal/core/backends/cudo/compute.py +4 -2
- dstack/_internal/core/backends/datacrunch/compute.py +13 -11
- dstack/_internal/core/backends/digitalocean_base/compute.py +4 -5
- dstack/_internal/core/backends/gcp/compute.py +12 -7
- dstack/_internal/core/backends/hotaisle/compute.py +4 -7
- dstack/_internal/core/backends/kubernetes/compute.py +6 -4
- dstack/_internal/core/backends/lambdalabs/compute.py +4 -5
- dstack/_internal/core/backends/local/compute.py +1 -3
- dstack/_internal/core/backends/nebius/compute.py +10 -7
- dstack/_internal/core/backends/oci/compute.py +10 -7
- dstack/_internal/core/backends/runpod/compute.py +15 -6
- dstack/_internal/core/backends/template/compute.py.jinja +3 -1
- dstack/_internal/core/backends/tensordock/compute.py +1 -3
- dstack/_internal/core/backends/tensordock/models.py +2 -0
- dstack/_internal/core/backends/vastai/compute.py +7 -3
- dstack/_internal/core/backends/vultr/compute.py +5 -5
- dstack/_internal/core/compatibility/runs.py +2 -0
- dstack/_internal/core/models/common.py +67 -43
- dstack/_internal/core/models/configurations.py +88 -62
- dstack/_internal/core/models/fleets.py +41 -24
- dstack/_internal/core/models/instances.py +5 -5
- dstack/_internal/core/models/profiles.py +66 -47
- dstack/_internal/core/models/projects.py +8 -0
- dstack/_internal/core/models/repos/remote.py +21 -16
- dstack/_internal/core/models/resources.py +69 -65
- dstack/_internal/core/models/runs.py +17 -9
- dstack/_internal/server/app.py +5 -0
- dstack/_internal/server/background/tasks/process_fleets.py +8 -0
- dstack/_internal/server/background/tasks/process_instances.py +3 -2
- dstack/_internal/server/background/tasks/process_submitted_jobs.py +97 -34
- dstack/_internal/server/models.py +6 -5
- dstack/_internal/server/schemas/gateways.py +10 -9
- dstack/_internal/server/services/backends/__init__.py +1 -1
- dstack/_internal/server/services/backends/handlers.py +2 -0
- dstack/_internal/server/services/docker.py +8 -7
- dstack/_internal/server/services/projects.py +63 -4
- dstack/_internal/server/services/runs.py +2 -0
- dstack/_internal/server/settings.py +46 -0
- dstack/_internal/server/statics/index.html +1 -1
- dstack/_internal/server/statics/main-56191fbfe77f49b251de.css +3 -0
- dstack/_internal/server/statics/{main-4eecc75fbe64067eb1bc.js → main-c51afa7f243e24d3e446.js} +61115 -49101
- dstack/_internal/server/statics/{main-4eecc75fbe64067eb1bc.js.map → main-c51afa7f243e24d3e446.js.map} +1 -1
- dstack/_internal/utils/env.py +85 -11
- dstack/version.py +1 -1
- {dstack-0.19.27.dist-info → dstack-0.19.29.dist-info}/METADATA +1 -1
- {dstack-0.19.27.dist-info → dstack-0.19.29.dist-info}/RECORD +68 -73
- dstack/_internal/core/backends/tensordock/__init__.py +0 -0
- dstack/_internal/core/backends/tensordock/api_client.py +0 -104
- dstack/_internal/core/backends/tensordock/backend.py +0 -16
- dstack/_internal/core/backends/tensordock/configurator.py +0 -74
- dstack/_internal/server/statics/main-56191c63d516fd0041c4.css +0 -3
- dstack/_internal/server/statics/static/media/github.1f7102513534c83a9d8d735d2b8c12a2.svg +0 -3
- {dstack-0.19.27.dist-info → dstack-0.19.29.dist-info}/WHEEL +0 -0
- {dstack-0.19.27.dist-info → dstack-0.19.29.dist-info}/entry_points.txt +0 -0
- {dstack-0.19.27.dist-info → dstack-0.19.29.dist-info}/licenses/LICENSE.md +0 -0
|
@@ -1,17 +1,18 @@
|
|
|
1
1
|
from concurrent.futures import ThreadPoolExecutor
|
|
2
2
|
from functools import cached_property
|
|
3
|
-
from typing import List, Optional
|
|
3
|
+
from typing import Callable, List, Optional
|
|
4
4
|
|
|
5
5
|
import oci
|
|
6
6
|
|
|
7
7
|
from dstack._internal.core.backends.base.compute import (
|
|
8
8
|
Compute,
|
|
9
|
+
ComputeWithAllOffersCached,
|
|
9
10
|
ComputeWithCreateInstanceSupport,
|
|
10
11
|
ComputeWithMultinodeSupport,
|
|
11
12
|
generate_unique_instance_name,
|
|
12
13
|
get_user_data,
|
|
13
14
|
)
|
|
14
|
-
from dstack._internal.core.backends.base.offers import get_catalog_offers
|
|
15
|
+
from dstack._internal.core.backends.base.offers import get_catalog_offers, get_offers_disk_modifier
|
|
15
16
|
from dstack._internal.core.backends.oci import resources
|
|
16
17
|
from dstack._internal.core.backends.oci.models import OCIConfig
|
|
17
18
|
from dstack._internal.core.backends.oci.region import make_region_clients_map
|
|
@@ -47,6 +48,7 @@ CONFIGURABLE_DISK_SIZE = Range[Memory](min=Memory.parse("50GB"), max=Memory.pars
|
|
|
47
48
|
|
|
48
49
|
|
|
49
50
|
class OCICompute(
|
|
51
|
+
ComputeWithAllOffersCached,
|
|
50
52
|
ComputeWithCreateInstanceSupport,
|
|
51
53
|
ComputeWithMultinodeSupport,
|
|
52
54
|
Compute,
|
|
@@ -60,14 +62,10 @@ class OCICompute(
|
|
|
60
62
|
def shapes_quota(self) -> resources.ShapesQuota:
|
|
61
63
|
return resources.ShapesQuota.load(self.regions, self.config.compartment_id)
|
|
62
64
|
|
|
63
|
-
def
|
|
64
|
-
self, requirements: Optional[Requirements] = None
|
|
65
|
-
) -> List[InstanceOfferWithAvailability]:
|
|
65
|
+
def get_all_offers_with_availability(self) -> List[InstanceOfferWithAvailability]:
|
|
66
66
|
offers = get_catalog_offers(
|
|
67
67
|
backend=BackendType.OCI,
|
|
68
68
|
locations=self.config.regions,
|
|
69
|
-
requirements=requirements,
|
|
70
|
-
configurable_disk_size=CONFIGURABLE_DISK_SIZE,
|
|
71
69
|
extra_filter=_supported_instances,
|
|
72
70
|
)
|
|
73
71
|
|
|
@@ -96,6 +94,11 @@ class OCICompute(
|
|
|
96
94
|
|
|
97
95
|
return offers_with_availability
|
|
98
96
|
|
|
97
|
+
def get_offers_modifier(
|
|
98
|
+
self, requirements: Requirements
|
|
99
|
+
) -> Callable[[InstanceOfferWithAvailability], Optional[InstanceOfferWithAvailability]]:
|
|
100
|
+
return get_offers_disk_modifier(CONFIGURABLE_DISK_SIZE, requirements)
|
|
101
|
+
|
|
99
102
|
def terminate_instance(
|
|
100
103
|
self, instance_id: str, region: str, backend_data: Optional[str] = None
|
|
101
104
|
) -> None:
|
|
@@ -1,17 +1,18 @@
|
|
|
1
1
|
import json
|
|
2
2
|
import uuid
|
|
3
3
|
from datetime import timedelta
|
|
4
|
-
from typing import List, Optional
|
|
4
|
+
from typing import Callable, List, Optional
|
|
5
5
|
|
|
6
6
|
from dstack._internal.core.backends.base.backend import Compute
|
|
7
7
|
from dstack._internal.core.backends.base.compute import (
|
|
8
|
+
ComputeWithAllOffersCached,
|
|
8
9
|
ComputeWithVolumeSupport,
|
|
9
10
|
generate_unique_instance_name,
|
|
10
11
|
generate_unique_volume_name,
|
|
11
12
|
get_docker_commands,
|
|
12
13
|
get_job_instance_name,
|
|
13
14
|
)
|
|
14
|
-
from dstack._internal.core.backends.base.offers import get_catalog_offers
|
|
15
|
+
from dstack._internal.core.backends.base.offers import get_catalog_offers, get_offers_disk_modifier
|
|
15
16
|
from dstack._internal.core.backends.runpod.api_client import RunpodApiClient
|
|
16
17
|
from dstack._internal.core.backends.runpod.models import RunpodConfig
|
|
17
18
|
from dstack._internal.core.consts import DSTACK_RUNNER_SSH_PORT
|
|
@@ -27,6 +28,7 @@ from dstack._internal.core.models.instances import (
|
|
|
27
28
|
InstanceOfferWithAvailability,
|
|
28
29
|
SSHKey,
|
|
29
30
|
)
|
|
31
|
+
from dstack._internal.core.models.resources import Memory, Range
|
|
30
32
|
from dstack._internal.core.models.runs import Job, JobProvisioningData, Requirements, Run
|
|
31
33
|
from dstack._internal.core.models.volumes import Volume, VolumeProvisioningData
|
|
32
34
|
from dstack._internal.utils.common import get_current_datetime
|
|
@@ -39,8 +41,12 @@ MAX_RESOURCE_NAME_LEN = 60
|
|
|
39
41
|
|
|
40
42
|
CONTAINER_REGISTRY_AUTH_CLEANUP_INTERVAL = 60 * 60 * 24 # 24 hour
|
|
41
43
|
|
|
44
|
+
# RunPod does not seem to have any limits on the disk size.
|
|
45
|
+
CONFIGURABLE_DISK_SIZE = Range[Memory](min=Memory.parse("1GB"), max=None)
|
|
46
|
+
|
|
42
47
|
|
|
43
48
|
class RunpodCompute(
|
|
49
|
+
ComputeWithAllOffersCached,
|
|
44
50
|
ComputeWithVolumeSupport,
|
|
45
51
|
Compute,
|
|
46
52
|
):
|
|
@@ -51,13 +57,11 @@ class RunpodCompute(
|
|
|
51
57
|
self.config = config
|
|
52
58
|
self.api_client = RunpodApiClient(config.creds.api_key)
|
|
53
59
|
|
|
54
|
-
def
|
|
55
|
-
self, requirements: Optional[Requirements] = None
|
|
56
|
-
) -> List[InstanceOfferWithAvailability]:
|
|
60
|
+
def get_all_offers_with_availability(self) -> List[InstanceOfferWithAvailability]:
|
|
57
61
|
offers = get_catalog_offers(
|
|
58
62
|
backend=BackendType.RUNPOD,
|
|
59
63
|
locations=self.config.regions or None,
|
|
60
|
-
requirements=
|
|
64
|
+
requirements=None,
|
|
61
65
|
extra_filter=lambda o: _is_secure_cloud(o.region) or self.config.allow_community_cloud,
|
|
62
66
|
)
|
|
63
67
|
offers = [
|
|
@@ -68,6 +72,11 @@ class RunpodCompute(
|
|
|
68
72
|
]
|
|
69
73
|
return offers
|
|
70
74
|
|
|
75
|
+
def get_offers_modifier(
|
|
76
|
+
self, requirements: Requirements
|
|
77
|
+
) -> Callable[[InstanceOfferWithAvailability], Optional[InstanceOfferWithAvailability]]:
|
|
78
|
+
return get_offers_disk_modifier(CONFIGURABLE_DISK_SIZE, requirements)
|
|
79
|
+
|
|
71
80
|
def run_job(
|
|
72
81
|
self,
|
|
73
82
|
run: Run,
|
|
@@ -2,6 +2,7 @@ from typing import List, Optional
|
|
|
2
2
|
|
|
3
3
|
from dstack._internal.core.backends.base.backend import Compute
|
|
4
4
|
from dstack._internal.core.backends.base.compute import (
|
|
5
|
+
ComputeWithAllOffersCached,
|
|
5
6
|
ComputeWithCreateInstanceSupport,
|
|
6
7
|
ComputeWithGatewaySupport,
|
|
7
8
|
ComputeWithMultinodeSupport,
|
|
@@ -28,6 +29,7 @@ logger = get_logger(__name__)
|
|
|
28
29
|
|
|
29
30
|
class {{ backend_name }}Compute(
|
|
30
31
|
# TODO: Choose ComputeWith* classes to extend and implement
|
|
32
|
+
# ComputeWithAllOffersCached,
|
|
31
33
|
# ComputeWithCreateInstanceSupport,
|
|
32
34
|
# ComputeWithMultinodeSupport,
|
|
33
35
|
# ComputeWithReservationSupport,
|
|
@@ -42,7 +44,7 @@ class {{ backend_name }}Compute(
|
|
|
42
44
|
self.config = config
|
|
43
45
|
|
|
44
46
|
def get_offers(
|
|
45
|
-
self, requirements:
|
|
47
|
+
self, requirements: Requirements
|
|
46
48
|
) -> List[InstanceOfferWithAvailability]:
|
|
47
49
|
# If the provider is added to gpuhunt, you'd typically get offers
|
|
48
50
|
# using `get_catalog_offers()` and extend them with availability info.
|
|
@@ -39,9 +39,7 @@ class TensorDockCompute(
|
|
|
39
39
|
self.config = config
|
|
40
40
|
self.api_client = TensorDockAPIClient(config.creds.api_key, config.creds.api_token)
|
|
41
41
|
|
|
42
|
-
def get_offers(
|
|
43
|
-
self, requirements: Optional[Requirements] = None
|
|
44
|
-
) -> List[InstanceOfferWithAvailability]:
|
|
42
|
+
def get_offers(self, requirements: Requirements) -> List[InstanceOfferWithAvailability]:
|
|
45
43
|
offers = get_catalog_offers(
|
|
46
44
|
backend=BackendType.TENSORDOCK,
|
|
47
45
|
requirements=requirements,
|
|
@@ -4,6 +4,8 @@ from pydantic import Field
|
|
|
4
4
|
|
|
5
5
|
from dstack._internal.core.models.common import CoreModel
|
|
6
6
|
|
|
7
|
+
# TODO: TensorDock is deprecated and will be removed in the future
|
|
8
|
+
|
|
7
9
|
|
|
8
10
|
class TensorDockAPIKeyCreds(CoreModel):
|
|
9
11
|
type: Annotated[Literal["api_key"], Field(description="The type of credentials")] = "api_key"
|
|
@@ -5,6 +5,7 @@ from gpuhunt.providers.vastai import VastAIProvider
|
|
|
5
5
|
|
|
6
6
|
from dstack._internal.core.backends.base.backend import Compute
|
|
7
7
|
from dstack._internal.core.backends.base.compute import (
|
|
8
|
+
ComputeWithFilteredOffersCached,
|
|
8
9
|
generate_unique_instance_name_for_job,
|
|
9
10
|
get_docker_commands,
|
|
10
11
|
)
|
|
@@ -30,7 +31,10 @@ logger = get_logger(__name__)
|
|
|
30
31
|
MAX_INSTANCE_NAME_LEN = 60
|
|
31
32
|
|
|
32
33
|
|
|
33
|
-
class VastAICompute(
|
|
34
|
+
class VastAICompute(
|
|
35
|
+
ComputeWithFilteredOffersCached,
|
|
36
|
+
Compute,
|
|
37
|
+
):
|
|
34
38
|
def __init__(self, config: VastAIConfig):
|
|
35
39
|
super().__init__()
|
|
36
40
|
self.config = config
|
|
@@ -49,8 +53,8 @@ class VastAICompute(Compute):
|
|
|
49
53
|
)
|
|
50
54
|
)
|
|
51
55
|
|
|
52
|
-
def
|
|
53
|
-
self, requirements:
|
|
56
|
+
def get_offers_by_requirements(
|
|
57
|
+
self, requirements: Requirements
|
|
54
58
|
) -> List[InstanceOfferWithAvailability]:
|
|
55
59
|
offers = get_catalog_offers(
|
|
56
60
|
backend=BackendType.VASTAI,
|
|
@@ -6,6 +6,7 @@ import requests
|
|
|
6
6
|
|
|
7
7
|
from dstack._internal.core.backends.base.backend import Compute
|
|
8
8
|
from dstack._internal.core.backends.base.compute import (
|
|
9
|
+
ComputeWithAllOffersCached,
|
|
9
10
|
ComputeWithCreateInstanceSupport,
|
|
10
11
|
ComputeWithMultinodeSupport,
|
|
11
12
|
generate_unique_instance_name,
|
|
@@ -23,7 +24,7 @@ from dstack._internal.core.models.instances import (
|
|
|
23
24
|
InstanceOfferWithAvailability,
|
|
24
25
|
)
|
|
25
26
|
from dstack._internal.core.models.placement import PlacementGroup
|
|
26
|
-
from dstack._internal.core.models.runs import JobProvisioningData
|
|
27
|
+
from dstack._internal.core.models.runs import JobProvisioningData
|
|
27
28
|
from dstack._internal.utils.logging import get_logger
|
|
28
29
|
|
|
29
30
|
logger = get_logger(__name__)
|
|
@@ -32,6 +33,7 @@ MAX_INSTANCE_NAME_LEN = 64
|
|
|
32
33
|
|
|
33
34
|
|
|
34
35
|
class VultrCompute(
|
|
36
|
+
ComputeWithAllOffersCached,
|
|
35
37
|
ComputeWithCreateInstanceSupport,
|
|
36
38
|
ComputeWithMultinodeSupport,
|
|
37
39
|
Compute,
|
|
@@ -41,12 +43,10 @@ class VultrCompute(
|
|
|
41
43
|
self.config = config
|
|
42
44
|
self.api_client = VultrApiClient(config.creds.api_key)
|
|
43
45
|
|
|
44
|
-
def
|
|
45
|
-
self, requirements: Optional[Requirements] = None
|
|
46
|
-
) -> List[InstanceOfferWithAvailability]:
|
|
46
|
+
def get_all_offers_with_availability(self) -> List[InstanceOfferWithAvailability]:
|
|
47
47
|
offers = get_catalog_offers(
|
|
48
48
|
backend=BackendType.VULTR,
|
|
49
|
-
requirements=
|
|
49
|
+
requirements=None,
|
|
50
50
|
locations=self.config.regions or None,
|
|
51
51
|
extra_filter=_supported_instances,
|
|
52
52
|
)
|
|
@@ -31,6 +31,8 @@ def get_apply_plan_excludes(plan: ApplyRunPlanInput) -> Optional[IncludeExcludeD
|
|
|
31
31
|
current_resource_excludes["status_message"] = True
|
|
32
32
|
if current_resource.deployment_num == 0:
|
|
33
33
|
current_resource_excludes["deployment_num"] = True
|
|
34
|
+
if current_resource.fleet is None:
|
|
35
|
+
current_resource_excludes["fleet"] = True
|
|
34
36
|
apply_plan_excludes["current_resource"] = current_resource_excludes
|
|
35
37
|
current_resource_excludes["run_spec"] = get_run_spec_excludes(current_resource.run_spec)
|
|
36
38
|
job_submissions_excludes: IncludeExcludeDictType = {}
|
|
@@ -1,10 +1,10 @@
|
|
|
1
1
|
import re
|
|
2
2
|
from enum import Enum
|
|
3
|
-
from typing import Any, Callable, Optional, Union
|
|
3
|
+
from typing import TYPE_CHECKING, Any, Callable, Mapping, Optional, Union
|
|
4
4
|
|
|
5
5
|
import orjson
|
|
6
6
|
from pydantic import Field
|
|
7
|
-
from pydantic_duality import
|
|
7
|
+
from pydantic_duality import generate_dual_base_model
|
|
8
8
|
from typing_extensions import Annotated
|
|
9
9
|
|
|
10
10
|
from dstack._internal.utils.json_utils import pydantic_orjson_dumps
|
|
@@ -17,46 +17,73 @@ IncludeExcludeDictType = dict[
|
|
|
17
17
|
IncludeExcludeType = Union[IncludeExcludeSetType, IncludeExcludeDictType]
|
|
18
18
|
|
|
19
19
|
|
|
20
|
+
class CoreConfig:
|
|
21
|
+
json_loads = orjson.loads
|
|
22
|
+
json_dumps = pydantic_orjson_dumps
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
# All dstack models inherit from pydantic-duality's DualBaseModel.
|
|
20
26
|
# DualBaseModel creates two classes for the model:
|
|
21
27
|
# one with extra = "forbid" (CoreModel/CoreModel.__request__),
|
|
22
28
|
# and another with extra = "ignore" (CoreModel.__response__).
|
|
23
|
-
# This allows to use the same model both for
|
|
24
|
-
# for
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
29
|
+
# This allows to use the same model both for strict parsing of the user input and
|
|
30
|
+
# for permissive parsing of the server responses.
|
|
31
|
+
#
|
|
32
|
+
# We define a func to generate CoreModel dynamically that can be used
|
|
33
|
+
# to define custom Config for both __request__ and __response__ models.
|
|
34
|
+
# Note: Defining config in the model class directly overrides
|
|
35
|
+
# pydantic-duality's base config, breaking __response__.
|
|
36
|
+
def generate_dual_core_model(
|
|
37
|
+
custom_config: Union[type, Mapping],
|
|
38
|
+
) -> "type[CoreModel]":
|
|
39
|
+
class CoreModel(generate_dual_base_model(custom_config)):
|
|
40
|
+
def json(
|
|
41
|
+
self,
|
|
42
|
+
*,
|
|
43
|
+
include: Optional[IncludeExcludeType] = None,
|
|
44
|
+
exclude: Optional[IncludeExcludeType] = None,
|
|
45
|
+
by_alias: bool = False,
|
|
46
|
+
skip_defaults: Optional[bool] = None, # ignore as it's deprecated
|
|
47
|
+
exclude_unset: bool = False,
|
|
48
|
+
exclude_defaults: bool = False,
|
|
49
|
+
exclude_none: bool = False,
|
|
50
|
+
encoder: Optional[Callable[[Any], Any]] = None,
|
|
51
|
+
models_as_dict: bool = True, # does not seems to be needed by dstack or dependencies
|
|
52
|
+
**dumps_kwargs: Any,
|
|
53
|
+
) -> str:
|
|
54
|
+
"""
|
|
55
|
+
Override `json()` method so that it calls `dict()`.
|
|
56
|
+
Allows changing how models are serialized by overriding `dict()` only.
|
|
57
|
+
By default, `json()` won't call `dict()`, so changes applied in `dict()` won't take place.
|
|
58
|
+
"""
|
|
59
|
+
data = self.dict(
|
|
60
|
+
by_alias=by_alias,
|
|
61
|
+
include=include,
|
|
62
|
+
exclude=exclude,
|
|
63
|
+
exclude_unset=exclude_unset,
|
|
64
|
+
exclude_defaults=exclude_defaults,
|
|
65
|
+
exclude_none=exclude_none,
|
|
66
|
+
)
|
|
67
|
+
if self.__custom_root_type__:
|
|
68
|
+
data = data["__root__"]
|
|
69
|
+
return self.__config__.json_dumps(data, default=encoder, **dumps_kwargs)
|
|
70
|
+
|
|
71
|
+
return CoreModel
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
if TYPE_CHECKING:
|
|
75
|
+
|
|
76
|
+
class CoreModel(generate_dual_base_model(CoreConfig)):
|
|
77
|
+
pass
|
|
78
|
+
else:
|
|
79
|
+
CoreModel = generate_dual_core_model(CoreConfig)
|
|
80
|
+
|
|
81
|
+
|
|
82
|
+
class FrozenConfig(CoreConfig):
|
|
83
|
+
frozen = True
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
FrozenCoreModel = generate_dual_core_model(FrozenConfig)
|
|
60
87
|
|
|
61
88
|
|
|
62
89
|
class Duration(int):
|
|
@@ -93,7 +120,7 @@ class Duration(int):
|
|
|
93
120
|
raise ValueError(f"Cannot parse the duration {v}")
|
|
94
121
|
|
|
95
122
|
|
|
96
|
-
class RegistryAuth(
|
|
123
|
+
class RegistryAuth(FrozenCoreModel):
|
|
97
124
|
"""
|
|
98
125
|
Credentials for pulling a private Docker image.
|
|
99
126
|
|
|
@@ -105,9 +132,6 @@ class RegistryAuth(CoreModel):
|
|
|
105
132
|
username: Annotated[str, Field(description="The username")]
|
|
106
133
|
password: Annotated[str, Field(description="The password or access token")]
|
|
107
134
|
|
|
108
|
-
class Config(CoreModel.Config):
|
|
109
|
-
frozen = True
|
|
110
|
-
|
|
111
135
|
|
|
112
136
|
class ApplyAction(str, Enum):
|
|
113
137
|
CREATE = "create" # resource is to be created or overridden
|
|
@@ -10,12 +10,23 @@ from pydantic import Field, ValidationError, conint, constr, root_validator, val
|
|
|
10
10
|
from typing_extensions import Self
|
|
11
11
|
|
|
12
12
|
from dstack._internal.core.errors import ConfigurationError
|
|
13
|
-
from dstack._internal.core.models.common import
|
|
13
|
+
from dstack._internal.core.models.common import (
|
|
14
|
+
CoreConfig,
|
|
15
|
+
CoreModel,
|
|
16
|
+
Duration,
|
|
17
|
+
RegistryAuth,
|
|
18
|
+
generate_dual_core_model,
|
|
19
|
+
)
|
|
14
20
|
from dstack._internal.core.models.envs import Env
|
|
15
21
|
from dstack._internal.core.models.files import FilePathMapping
|
|
16
22
|
from dstack._internal.core.models.fleets import FleetConfiguration
|
|
17
23
|
from dstack._internal.core.models.gateways import GatewayConfiguration
|
|
18
|
-
from dstack._internal.core.models.profiles import
|
|
24
|
+
from dstack._internal.core.models.profiles import (
|
|
25
|
+
ProfileParams,
|
|
26
|
+
ProfileParamsConfig,
|
|
27
|
+
parse_duration,
|
|
28
|
+
parse_off_duration,
|
|
29
|
+
)
|
|
19
30
|
from dstack._internal.core.models.resources import Range, ResourcesSpec
|
|
20
31
|
from dstack._internal.core.models.services import AnyModel, OpenAIChatModel
|
|
21
32
|
from dstack._internal.core.models.unix import UnixUser
|
|
@@ -276,7 +287,20 @@ class HTTPHeaderSpec(CoreModel):
|
|
|
276
287
|
]
|
|
277
288
|
|
|
278
289
|
|
|
279
|
-
class
|
|
290
|
+
class ProbeConfigConfig(CoreConfig):
|
|
291
|
+
@staticmethod
|
|
292
|
+
def schema_extra(schema: Dict[str, Any]):
|
|
293
|
+
add_extra_schema_types(
|
|
294
|
+
schema["properties"]["timeout"],
|
|
295
|
+
extra_types=[{"type": "string"}],
|
|
296
|
+
)
|
|
297
|
+
add_extra_schema_types(
|
|
298
|
+
schema["properties"]["interval"],
|
|
299
|
+
extra_types=[{"type": "string"}],
|
|
300
|
+
)
|
|
301
|
+
|
|
302
|
+
|
|
303
|
+
class ProbeConfig(generate_dual_core_model(ProbeConfigConfig)):
|
|
280
304
|
type: Literal["http"] # expect other probe types in the future, namely `exec`
|
|
281
305
|
url: Annotated[
|
|
282
306
|
Optional[str], Field(description=f"The URL to request. Defaults to `{DEFAULT_PROBE_URL}`")
|
|
@@ -331,18 +355,6 @@ class ProbeConfig(CoreModel):
|
|
|
331
355
|
),
|
|
332
356
|
] = None
|
|
333
357
|
|
|
334
|
-
class Config(CoreModel.Config):
|
|
335
|
-
@staticmethod
|
|
336
|
-
def schema_extra(schema: Dict[str, Any]):
|
|
337
|
-
add_extra_schema_types(
|
|
338
|
-
schema["properties"]["timeout"],
|
|
339
|
-
extra_types=[{"type": "string"}],
|
|
340
|
-
)
|
|
341
|
-
add_extra_schema_types(
|
|
342
|
-
schema["properties"]["interval"],
|
|
343
|
-
extra_types=[{"type": "string"}],
|
|
344
|
-
)
|
|
345
|
-
|
|
346
358
|
@validator("timeout", pre=True)
|
|
347
359
|
def parse_timeout(cls, v: Optional[Union[int, str]]) -> Optional[int]:
|
|
348
360
|
if v is None:
|
|
@@ -381,6 +393,19 @@ class ProbeConfig(CoreModel):
|
|
|
381
393
|
return values
|
|
382
394
|
|
|
383
395
|
|
|
396
|
+
class BaseRunConfigurationConfig(CoreConfig):
|
|
397
|
+
@staticmethod
|
|
398
|
+
def schema_extra(schema: Dict[str, Any]):
|
|
399
|
+
add_extra_schema_types(
|
|
400
|
+
schema["properties"]["volumes"]["items"],
|
|
401
|
+
extra_types=[{"type": "string"}],
|
|
402
|
+
)
|
|
403
|
+
add_extra_schema_types(
|
|
404
|
+
schema["properties"]["files"]["items"],
|
|
405
|
+
extra_types=[{"type": "string"}],
|
|
406
|
+
)
|
|
407
|
+
|
|
408
|
+
|
|
384
409
|
class BaseRunConfiguration(CoreModel):
|
|
385
410
|
type: Literal["none"]
|
|
386
411
|
name: Annotated[
|
|
@@ -484,18 +509,6 @@ class BaseRunConfiguration(CoreModel):
|
|
|
484
509
|
# deprecated since 0.18.31; task, service -- no effect; dev-environment -- executed right before `init`
|
|
485
510
|
setup: CommandsList = []
|
|
486
511
|
|
|
487
|
-
class Config(CoreModel.Config):
|
|
488
|
-
@staticmethod
|
|
489
|
-
def schema_extra(schema: Dict[str, Any]):
|
|
490
|
-
add_extra_schema_types(
|
|
491
|
-
schema["properties"]["volumes"]["items"],
|
|
492
|
-
extra_types=[{"type": "string"}],
|
|
493
|
-
)
|
|
494
|
-
add_extra_schema_types(
|
|
495
|
-
schema["properties"]["files"]["items"],
|
|
496
|
-
extra_types=[{"type": "string"}],
|
|
497
|
-
)
|
|
498
|
-
|
|
499
512
|
@validator("python", pre=True, always=True)
|
|
500
513
|
def convert_python(cls, v, values) -> Optional[PythonVersion]:
|
|
501
514
|
if v is not None and values.get("image"):
|
|
@@ -621,20 +634,25 @@ class DevEnvironmentConfigurationParams(CoreModel):
|
|
|
621
634
|
return None
|
|
622
635
|
|
|
623
636
|
|
|
637
|
+
class DevEnvironmentConfigurationConfig(
|
|
638
|
+
ProfileParamsConfig,
|
|
639
|
+
BaseRunConfigurationConfig,
|
|
640
|
+
):
|
|
641
|
+
@staticmethod
|
|
642
|
+
def schema_extra(schema: Dict[str, Any]):
|
|
643
|
+
ProfileParamsConfig.schema_extra(schema)
|
|
644
|
+
BaseRunConfigurationConfig.schema_extra(schema)
|
|
645
|
+
|
|
646
|
+
|
|
624
647
|
class DevEnvironmentConfiguration(
|
|
625
648
|
ProfileParams,
|
|
626
649
|
BaseRunConfiguration,
|
|
627
650
|
ConfigurationWithPortsParams,
|
|
628
651
|
DevEnvironmentConfigurationParams,
|
|
652
|
+
generate_dual_core_model(DevEnvironmentConfigurationConfig),
|
|
629
653
|
):
|
|
630
654
|
type: Literal["dev-environment"] = "dev-environment"
|
|
631
655
|
|
|
632
|
-
class Config(ProfileParams.Config, BaseRunConfiguration.Config):
|
|
633
|
-
@staticmethod
|
|
634
|
-
def schema_extra(schema: Dict[str, Any]):
|
|
635
|
-
ProfileParams.Config.schema_extra(schema)
|
|
636
|
-
BaseRunConfiguration.Config.schema_extra(schema)
|
|
637
|
-
|
|
638
656
|
@validator("entrypoint")
|
|
639
657
|
def validate_entrypoint(cls, v: Optional[str]) -> Optional[str]:
|
|
640
658
|
if v is not None:
|
|
@@ -646,20 +664,38 @@ class TaskConfigurationParams(CoreModel):
|
|
|
646
664
|
nodes: Annotated[int, Field(description="Number of nodes", ge=1)] = 1
|
|
647
665
|
|
|
648
666
|
|
|
667
|
+
class TaskConfigurationConfig(
|
|
668
|
+
ProfileParamsConfig,
|
|
669
|
+
BaseRunConfigurationConfig,
|
|
670
|
+
):
|
|
671
|
+
@staticmethod
|
|
672
|
+
def schema_extra(schema: Dict[str, Any]):
|
|
673
|
+
ProfileParamsConfig.schema_extra(schema)
|
|
674
|
+
BaseRunConfigurationConfig.schema_extra(schema)
|
|
675
|
+
|
|
676
|
+
|
|
649
677
|
class TaskConfiguration(
|
|
650
678
|
ProfileParams,
|
|
651
679
|
BaseRunConfiguration,
|
|
652
680
|
ConfigurationWithCommandsParams,
|
|
653
681
|
ConfigurationWithPortsParams,
|
|
654
682
|
TaskConfigurationParams,
|
|
683
|
+
generate_dual_core_model(TaskConfigurationConfig),
|
|
655
684
|
):
|
|
656
685
|
type: Literal["task"] = "task"
|
|
657
686
|
|
|
658
|
-
|
|
659
|
-
|
|
660
|
-
|
|
661
|
-
|
|
662
|
-
|
|
687
|
+
|
|
688
|
+
class ServiceConfigurationParamsConfig(CoreConfig):
|
|
689
|
+
@staticmethod
|
|
690
|
+
def schema_extra(schema: Dict[str, Any]):
|
|
691
|
+
add_extra_schema_types(
|
|
692
|
+
schema["properties"]["replicas"],
|
|
693
|
+
extra_types=[{"type": "integer"}, {"type": "string"}],
|
|
694
|
+
)
|
|
695
|
+
add_extra_schema_types(
|
|
696
|
+
schema["properties"]["model"],
|
|
697
|
+
extra_types=[{"type": "string"}],
|
|
698
|
+
)
|
|
663
699
|
|
|
664
700
|
|
|
665
701
|
class ServiceConfigurationParams(CoreModel):
|
|
@@ -719,18 +755,6 @@ class ServiceConfigurationParams(CoreModel):
|
|
|
719
755
|
Field(description="List of probes used to determine job health"),
|
|
720
756
|
] = []
|
|
721
757
|
|
|
722
|
-
class Config(CoreModel.Config):
|
|
723
|
-
@staticmethod
|
|
724
|
-
def schema_extra(schema: Dict[str, Any]):
|
|
725
|
-
add_extra_schema_types(
|
|
726
|
-
schema["properties"]["replicas"],
|
|
727
|
-
extra_types=[{"type": "integer"}, {"type": "string"}],
|
|
728
|
-
)
|
|
729
|
-
add_extra_schema_types(
|
|
730
|
-
schema["properties"]["model"],
|
|
731
|
-
extra_types=[{"type": "string"}],
|
|
732
|
-
)
|
|
733
|
-
|
|
734
758
|
@validator("port")
|
|
735
759
|
def convert_port(cls, v) -> PortMapping:
|
|
736
760
|
if isinstance(v, int):
|
|
@@ -797,25 +821,27 @@ class ServiceConfigurationParams(CoreModel):
|
|
|
797
821
|
return v
|
|
798
822
|
|
|
799
823
|
|
|
824
|
+
class ServiceConfigurationConfig(
|
|
825
|
+
ProfileParamsConfig,
|
|
826
|
+
BaseRunConfigurationConfig,
|
|
827
|
+
ServiceConfigurationParamsConfig,
|
|
828
|
+
):
|
|
829
|
+
@staticmethod
|
|
830
|
+
def schema_extra(schema: Dict[str, Any]):
|
|
831
|
+
ProfileParamsConfig.schema_extra(schema)
|
|
832
|
+
BaseRunConfigurationConfig.schema_extra(schema)
|
|
833
|
+
ServiceConfigurationParamsConfig.schema_extra(schema)
|
|
834
|
+
|
|
835
|
+
|
|
800
836
|
class ServiceConfiguration(
|
|
801
837
|
ProfileParams,
|
|
802
838
|
BaseRunConfiguration,
|
|
803
839
|
ConfigurationWithCommandsParams,
|
|
804
840
|
ServiceConfigurationParams,
|
|
841
|
+
generate_dual_core_model(ServiceConfigurationConfig),
|
|
805
842
|
):
|
|
806
843
|
type: Literal["service"] = "service"
|
|
807
844
|
|
|
808
|
-
class Config(
|
|
809
|
-
ProfileParams.Config,
|
|
810
|
-
BaseRunConfiguration.Config,
|
|
811
|
-
ServiceConfigurationParams.Config,
|
|
812
|
-
):
|
|
813
|
-
@staticmethod
|
|
814
|
-
def schema_extra(schema: Dict[str, Any]):
|
|
815
|
-
ProfileParams.Config.schema_extra(schema)
|
|
816
|
-
BaseRunConfiguration.Config.schema_extra(schema)
|
|
817
|
-
ServiceConfigurationParams.Config.schema_extra(schema)
|
|
818
|
-
|
|
819
845
|
|
|
820
846
|
AnyRunConfiguration = Union[DevEnvironmentConfiguration, TaskConfiguration, ServiceConfiguration]
|
|
821
847
|
|
|
@@ -876,7 +902,7 @@ class DstackConfiguration(CoreModel):
|
|
|
876
902
|
Field(discriminator="type"),
|
|
877
903
|
]
|
|
878
904
|
|
|
879
|
-
class Config(
|
|
905
|
+
class Config(CoreConfig):
|
|
880
906
|
json_loads = orjson.loads
|
|
881
907
|
json_dumps = pydantic_orjson_dumps_with_indent
|
|
882
908
|
|