proximl 0.5.6__tar.gz → 0.5.7__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {proximl-0.5.6/proximl.egg-info → proximl-0.5.7}/PKG-INFO +1 -1
- {proximl-0.5.6 → proximl-0.5.7}/proximl/__init__.py +1 -1
- {proximl-0.5.6 → proximl-0.5.7}/proximl/checkpoints.py +25 -25
- {proximl-0.5.6 → proximl-0.5.7}/proximl/cli/cloudbender/__init__.py +1 -0
- proximl-0.5.7/proximl/cli/cloudbender/data_connector.py +159 -0
- {proximl-0.5.6 → proximl-0.5.7}/proximl/cli/cloudbender/service.py +19 -2
- {proximl-0.5.6 → proximl-0.5.7}/proximl/cloudbender/cloudbender.py +2 -0
- proximl-0.5.7/proximl/cloudbender/data_connectors.py +112 -0
- {proximl-0.5.6 → proximl-0.5.7}/proximl/cloudbender/services.py +65 -1
- {proximl-0.5.6 → proximl-0.5.7}/proximl/datasets.py +19 -8
- {proximl-0.5.6 → proximl-0.5.7}/proximl/jobs.py +13 -6
- {proximl-0.5.6 → proximl-0.5.7}/proximl/models.py +22 -19
- {proximl-0.5.6 → proximl-0.5.7}/proximl/projects.py +60 -8
- {proximl-0.5.6 → proximl-0.5.7}/proximl/volumes.py +9 -2
- {proximl-0.5.6 → proximl-0.5.7/proximl.egg-info}/PKG-INFO +1 -1
- {proximl-0.5.6 → proximl-0.5.7}/proximl.egg-info/SOURCES.txt +4 -1
- {proximl-0.5.6 → proximl-0.5.7}/pyproject.toml +1 -0
- {proximl-0.5.6 → proximl-0.5.7}/tests/integration/test_jobs_integration.py +13 -0
- proximl-0.5.7/tests/unit/cloudbender/test_data_connectors_unit.py +176 -0
- {proximl-0.5.6 → proximl-0.5.7}/tests/unit/cloudbender/test_services_unit.py +6 -0
- {proximl-0.5.6 → proximl-0.5.7}/tests/unit/test_projects_unit.py +45 -5
- {proximl-0.5.6 → proximl-0.5.7}/LICENSE +0 -0
- {proximl-0.5.6 → proximl-0.5.7}/README.md +0 -0
- {proximl-0.5.6 → proximl-0.5.7}/examples/__init__.py +0 -0
- {proximl-0.5.6 → proximl-0.5.7}/examples/create_dataset_and_training_job.py +0 -0
- {proximl-0.5.6 → proximl-0.5.7}/examples/local_storage.py +0 -0
- {proximl-0.5.6 → proximl-0.5.7}/examples/training_inference_pipeline.py +0 -0
- {proximl-0.5.6 → proximl-0.5.7}/proximl/__main__.py +0 -0
- {proximl-0.5.6 → proximl-0.5.7}/proximl/auth.py +0 -0
- {proximl-0.5.6 → proximl-0.5.7}/proximl/cli/__init__.py +0 -0
- {proximl-0.5.6 → proximl-0.5.7}/proximl/cli/checkpoint.py +0 -0
- {proximl-0.5.6 → proximl-0.5.7}/proximl/cli/cloudbender/datastore.py +0 -0
- {proximl-0.5.6 → proximl-0.5.7}/proximl/cli/cloudbender/device.py +0 -0
- {proximl-0.5.6 → proximl-0.5.7}/proximl/cli/cloudbender/node.py +0 -0
- {proximl-0.5.6 → proximl-0.5.7}/proximl/cli/cloudbender/provider.py +0 -0
- {proximl-0.5.6 → proximl-0.5.7}/proximl/cli/cloudbender/region.py +0 -0
- {proximl-0.5.6 → proximl-0.5.7}/proximl/cli/connection.py +0 -0
- {proximl-0.5.6 → proximl-0.5.7}/proximl/cli/dataset.py +0 -0
- {proximl-0.5.6 → proximl-0.5.7}/proximl/cli/environment.py +0 -0
- {proximl-0.5.6 → proximl-0.5.7}/proximl/cli/gpu.py +0 -0
- {proximl-0.5.6 → proximl-0.5.7}/proximl/cli/job/__init__.py +0 -0
- {proximl-0.5.6 → proximl-0.5.7}/proximl/cli/job/create.py +0 -0
- {proximl-0.5.6 → proximl-0.5.7}/proximl/cli/model.py +0 -0
- {proximl-0.5.6 → proximl-0.5.7}/proximl/cli/project.py +0 -0
- {proximl-0.5.6 → proximl-0.5.7}/proximl/cli/volume.py +0 -0
- {proximl-0.5.6 → proximl-0.5.7}/proximl/cloudbender/__init__.py +0 -0
- {proximl-0.5.6 → proximl-0.5.7}/proximl/cloudbender/datastores.py +0 -0
- {proximl-0.5.6 → proximl-0.5.7}/proximl/cloudbender/device_configs.py +0 -0
- {proximl-0.5.6 → proximl-0.5.7}/proximl/cloudbender/devices.py +0 -0
- {proximl-0.5.6 → proximl-0.5.7}/proximl/cloudbender/nodes.py +0 -0
- {proximl-0.5.6 → proximl-0.5.7}/proximl/cloudbender/providers.py +0 -0
- {proximl-0.5.6 → proximl-0.5.7}/proximl/cloudbender/regions.py +0 -0
- {proximl-0.5.6 → proximl-0.5.7}/proximl/connections.py +0 -0
- {proximl-0.5.6 → proximl-0.5.7}/proximl/environments.py +0 -0
- {proximl-0.5.6 → proximl-0.5.7}/proximl/exceptions.py +0 -0
- {proximl-0.5.6 → proximl-0.5.7}/proximl/gpu_types.py +0 -0
- {proximl-0.5.6 → proximl-0.5.7}/proximl/proximl.py +0 -0
- {proximl-0.5.6 → proximl-0.5.7}/proximl.egg-info/dependency_links.txt +0 -0
- {proximl-0.5.6 → proximl-0.5.7}/proximl.egg-info/entry_points.txt +0 -0
- {proximl-0.5.6 → proximl-0.5.7}/proximl.egg-info/requires.txt +0 -0
- {proximl-0.5.6 → proximl-0.5.7}/proximl.egg-info/top_level.txt +0 -0
- {proximl-0.5.6 → proximl-0.5.7}/setup.cfg +0 -0
- {proximl-0.5.6 → proximl-0.5.7}/setup.py +0 -0
- {proximl-0.5.6 → proximl-0.5.7}/tests/integration/__init__.py +0 -0
- {proximl-0.5.6 → proximl-0.5.7}/tests/integration/cloudbender/__init__.py +0 -0
- {proximl-0.5.6 → proximl-0.5.7}/tests/integration/cloudbender/test_providers_integration.py +0 -0
- {proximl-0.5.6 → proximl-0.5.7}/tests/integration/conftest.py +0 -0
- {proximl-0.5.6 → proximl-0.5.7}/tests/integration/test_checkpoints_integration.py +0 -0
- {proximl-0.5.6 → proximl-0.5.7}/tests/integration/test_datasets_integration.py +0 -0
- {proximl-0.5.6 → proximl-0.5.7}/tests/integration/test_environments_integration.py +0 -0
- {proximl-0.5.6 → proximl-0.5.7}/tests/integration/test_gpu_types_integration.py +0 -0
- {proximl-0.5.6 → proximl-0.5.7}/tests/integration/test_models_integration.py +0 -0
- {proximl-0.5.6 → proximl-0.5.7}/tests/integration/test_projects_integration.py +0 -0
- {proximl-0.5.6 → proximl-0.5.7}/tests/integration/test_volumes_integration.py +0 -0
- {proximl-0.5.6 → proximl-0.5.7}/tests/unit/__init__.py +0 -0
- {proximl-0.5.6 → proximl-0.5.7}/tests/unit/cli/__init__.py +0 -0
- {proximl-0.5.6 → proximl-0.5.7}/tests/unit/cli/cloudbender/__init__.py +0 -0
- {proximl-0.5.6 → proximl-0.5.7}/tests/unit/cli/cloudbender/test_cli_datastore_unit.py +0 -0
- {proximl-0.5.6 → proximl-0.5.7}/tests/unit/cli/cloudbender/test_cli_device_unit.py +0 -0
- {proximl-0.5.6 → proximl-0.5.7}/tests/unit/cli/cloudbender/test_cli_node_unit.py +0 -0
- {proximl-0.5.6 → proximl-0.5.7}/tests/unit/cli/cloudbender/test_cli_provider_unit.py +0 -0
- {proximl-0.5.6 → proximl-0.5.7}/tests/unit/cli/cloudbender/test_cli_region_unit.py +0 -0
- /proximl-0.5.6/tests/unit/cli/cloudbender/test_cli_reservation_unit.py → /proximl-0.5.7/tests/unit/cli/cloudbender/test_cli_service_unit.py +0 -0
- {proximl-0.5.6 → proximl-0.5.7}/tests/unit/cli/conftest.py +0 -0
- {proximl-0.5.6 → proximl-0.5.7}/tests/unit/cli/test_cli_checkpoint_unit.py +0 -0
- {proximl-0.5.6 → proximl-0.5.7}/tests/unit/cli/test_cli_datasets_unit.py +0 -0
- {proximl-0.5.6 → proximl-0.5.7}/tests/unit/cli/test_cli_environment_unit.py +0 -0
- {proximl-0.5.6 → proximl-0.5.7}/tests/unit/cli/test_cli_gpu_unit.py +0 -0
- {proximl-0.5.6 → proximl-0.5.7}/tests/unit/cli/test_cli_job_unit.py +0 -0
- {proximl-0.5.6 → proximl-0.5.7}/tests/unit/cli/test_cli_model_unit.py +0 -0
- {proximl-0.5.6 → proximl-0.5.7}/tests/unit/cli/test_cli_project_unit.py +0 -0
- {proximl-0.5.6 → proximl-0.5.7}/tests/unit/cli/test_cli_volume_unit.py +0 -0
- {proximl-0.5.6 → proximl-0.5.7}/tests/unit/cloudbender/__init__.py +0 -0
- {proximl-0.5.6 → proximl-0.5.7}/tests/unit/cloudbender/test_datastores_unit.py +0 -0
- {proximl-0.5.6 → proximl-0.5.7}/tests/unit/cloudbender/test_device_configs_unit.py +0 -0
- {proximl-0.5.6 → proximl-0.5.7}/tests/unit/cloudbender/test_devices_unit.py +0 -0
- {proximl-0.5.6 → proximl-0.5.7}/tests/unit/cloudbender/test_nodes_unit.py +0 -0
- {proximl-0.5.6 → proximl-0.5.7}/tests/unit/cloudbender/test_providers_unit.py +0 -0
- {proximl-0.5.6 → proximl-0.5.7}/tests/unit/cloudbender/test_regions_unit.py +0 -0
- {proximl-0.5.6 → proximl-0.5.7}/tests/unit/conftest.py +0 -0
- {proximl-0.5.6 → proximl-0.5.7}/tests/unit/test_auth.py +0 -0
- {proximl-0.5.6 → proximl-0.5.7}/tests/unit/test_checkpoints_unit.py +0 -0
- {proximl-0.5.6 → proximl-0.5.7}/tests/unit/test_connections_unit.py +0 -0
- {proximl-0.5.6 → proximl-0.5.7}/tests/unit/test_datasets_unit.py +0 -0
- {proximl-0.5.6 → proximl-0.5.7}/tests/unit/test_environments_unit.py +0 -0
- {proximl-0.5.6 → proximl-0.5.7}/tests/unit/test_exceptions.py +0 -0
- {proximl-0.5.6 → proximl-0.5.7}/tests/unit/test_gpu_types_unit.py +0 -0
- {proximl-0.5.6 → proximl-0.5.7}/tests/unit/test_jobs_unit.py +0 -0
- {proximl-0.5.6 → proximl-0.5.7}/tests/unit/test_models_unit.py +0 -0
- {proximl-0.5.6 → proximl-0.5.7}/tests/unit/test_proximl.py +0 -0
- {proximl-0.5.6 → proximl-0.5.7}/tests/unit/test_volumes_unit.py +0 -0
|
@@ -23,9 +23,7 @@ class Checkpoints(object):
|
|
|
23
23
|
|
|
24
24
|
async def list(self, **kwargs):
|
|
25
25
|
resp = await self.proximl._query(f"/checkpoint", "GET", kwargs)
|
|
26
|
-
checkpoints = [
|
|
27
|
-
Checkpoint(self.proximl, **checkpoint) for checkpoint in resp
|
|
28
|
-
]
|
|
26
|
+
checkpoints = [Checkpoint(self.proximl, **checkpoint) for checkpoint in resp]
|
|
29
27
|
return checkpoints
|
|
30
28
|
|
|
31
29
|
async def list_public(self, **kwargs):
|
|
@@ -39,8 +37,7 @@ class Checkpoints(object):
|
|
|
39
37
|
source_type=source_type,
|
|
40
38
|
source_uri=source_uri,
|
|
41
39
|
source_options=kwargs.get("source_options"),
|
|
42
|
-
project_uuid=kwargs.get("project_uuid")
|
|
43
|
-
or self.proximl.active_project,
|
|
40
|
+
project_uuid=kwargs.get("project_uuid") or self.proximl.active_project,
|
|
44
41
|
)
|
|
45
42
|
payload = {k: v for k, v in data.items() if v is not None}
|
|
46
43
|
logging.info(f"Creating Checkpoint {name}")
|
|
@@ -60,9 +57,7 @@ class Checkpoint:
|
|
|
60
57
|
def __init__(self, proximl, **kwargs):
|
|
61
58
|
self.proximl = proximl
|
|
62
59
|
self._checkpoint = kwargs
|
|
63
|
-
self._id = self._checkpoint.get(
|
|
64
|
-
"id", self._checkpoint.get("checkpoint_uuid")
|
|
65
|
-
)
|
|
60
|
+
self._id = self._checkpoint.get("id", self._checkpoint.get("checkpoint_uuid"))
|
|
66
61
|
self._status = self._checkpoint.get("status")
|
|
67
62
|
self._name = self._checkpoint.get("name")
|
|
68
63
|
self._size = self._checkpoint.get("size")
|
|
@@ -123,15 +118,17 @@ class Checkpoint:
|
|
|
123
118
|
entity_type="checkpoint",
|
|
124
119
|
project_uuid=self._checkpoint.get("project_uuid"),
|
|
125
120
|
cidr=self._checkpoint.get("vpn").get("cidr"),
|
|
126
|
-
ssh_port=self._checkpoint.get("vpn")
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
output_path=
|
|
133
|
-
|
|
134
|
-
|
|
121
|
+
ssh_port=self._checkpoint.get("vpn").get("client").get("ssh_port"),
|
|
122
|
+
input_path=(
|
|
123
|
+
self._checkpoint.get("source_uri")
|
|
124
|
+
if self.status in ["new", "downloading"]
|
|
125
|
+
else None
|
|
126
|
+
),
|
|
127
|
+
output_path=(
|
|
128
|
+
self._checkpoint.get("output_uri")
|
|
129
|
+
if self.status == "exporting"
|
|
130
|
+
else None
|
|
131
|
+
),
|
|
135
132
|
)
|
|
136
133
|
else:
|
|
137
134
|
details = dict()
|
|
@@ -195,9 +192,7 @@ class Checkpoint:
|
|
|
195
192
|
if msg_handler:
|
|
196
193
|
msg_handler(data)
|
|
197
194
|
else:
|
|
198
|
-
timestamp = datetime.fromtimestamp(
|
|
199
|
-
int(data.get("time")) / 1000
|
|
200
|
-
)
|
|
195
|
+
timestamp = datetime.fromtimestamp(int(data.get("time")) / 1000)
|
|
201
196
|
print(
|
|
202
197
|
f"{timestamp.strftime('%m/%d/%Y, %H:%M:%S')}: {data.get('msg').rstrip()}"
|
|
203
198
|
)
|
|
@@ -224,19 +219,24 @@ class Checkpoint:
|
|
|
224
219
|
return self
|
|
225
220
|
|
|
226
221
|
async def wait_for(self, status, timeout=300):
|
|
222
|
+
if self.status == status:
|
|
223
|
+
return
|
|
227
224
|
valid_statuses = ["downloading", "ready", "archived"]
|
|
228
225
|
if not status in valid_statuses:
|
|
229
226
|
raise SpecificationError(
|
|
230
227
|
"status",
|
|
231
228
|
f"Invalid wait_for status {status}. Valid statuses are: {valid_statuses}",
|
|
232
229
|
)
|
|
233
|
-
|
|
234
|
-
|
|
230
|
+
|
|
231
|
+
MAX_TIMEOUT = 24 * 60 * 60
|
|
232
|
+
if timeout > MAX_TIMEOUT:
|
|
233
|
+
raise SpecificationError(
|
|
234
|
+
"timeout",
|
|
235
|
+
f"timeout must be less than {MAX_TIMEOUT} seconds.",
|
|
236
|
+
)
|
|
235
237
|
POLL_INTERVAL_MIN = 5
|
|
236
238
|
POLL_INTERVAL_MAX = 60
|
|
237
|
-
POLL_INTERVAL = max(
|
|
238
|
-
min(timeout / 60, POLL_INTERVAL_MAX), POLL_INTERVAL_MIN
|
|
239
|
-
)
|
|
239
|
+
POLL_INTERVAL = max(min(timeout / 60, POLL_INTERVAL_MAX), POLL_INTERVAL_MIN)
|
|
240
240
|
retry_count = math.ceil(timeout / POLL_INTERVAL)
|
|
241
241
|
count = 0
|
|
242
242
|
while count < retry_count:
|
|
@@ -15,4 +15,5 @@ from proximl.cli.cloudbender.region import region
|
|
|
15
15
|
from proximl.cli.cloudbender.node import node
|
|
16
16
|
from proximl.cli.cloudbender.device import device
|
|
17
17
|
from proximl.cli.cloudbender.datastore import datastore
|
|
18
|
+
from proximl.cli.cloudbender.data_connector import data_connector
|
|
18
19
|
from proximl.cli.cloudbender.service import service
|
|
@@ -0,0 +1,159 @@
|
|
|
1
|
+
import click
|
|
2
|
+
from proximl.cli import cli, pass_config, search_by_id_name
|
|
3
|
+
from proximl.cli.cloudbender import cloudbender
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
@cloudbender.group()
|
|
7
|
+
@pass_config
|
|
8
|
+
def data_connector(config):
|
|
9
|
+
"""proxiML CloudBender data connector commands."""
|
|
10
|
+
pass
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
@data_connector.command()
|
|
14
|
+
@click.option(
|
|
15
|
+
"--provider",
|
|
16
|
+
"-p",
|
|
17
|
+
type=click.STRING,
|
|
18
|
+
required=True,
|
|
19
|
+
help="The provider ID of the region.",
|
|
20
|
+
)
|
|
21
|
+
@click.option(
|
|
22
|
+
"--region",
|
|
23
|
+
"-r",
|
|
24
|
+
type=click.STRING,
|
|
25
|
+
required=True,
|
|
26
|
+
help="The region ID to list data connectors for.",
|
|
27
|
+
)
|
|
28
|
+
@pass_config
|
|
29
|
+
def list(config, provider, region):
|
|
30
|
+
"""List data connectors."""
|
|
31
|
+
data = [
|
|
32
|
+
["ID", "NAME", "TYPE"],
|
|
33
|
+
[
|
|
34
|
+
"-" * 80,
|
|
35
|
+
"-" * 80,
|
|
36
|
+
"-" * 80,
|
|
37
|
+
],
|
|
38
|
+
]
|
|
39
|
+
|
|
40
|
+
data_connectors = config.proximl.run(
|
|
41
|
+
config.proximl.client.cloudbender.data_connectors.list(
|
|
42
|
+
provider_uuid=provider, region_uuid=region
|
|
43
|
+
)
|
|
44
|
+
)
|
|
45
|
+
|
|
46
|
+
for data_connector in data_connectors:
|
|
47
|
+
data.append(
|
|
48
|
+
[
|
|
49
|
+
data_connector.id,
|
|
50
|
+
data_connector.name,
|
|
51
|
+
data_connector.type,
|
|
52
|
+
]
|
|
53
|
+
)
|
|
54
|
+
|
|
55
|
+
for row in data:
|
|
56
|
+
click.echo(
|
|
57
|
+
"{: >37.36} {: >29.28} {: >9.8}" "".format(*row),
|
|
58
|
+
file=config.stdout,
|
|
59
|
+
)
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
@data_connector.command()
|
|
63
|
+
@click.option(
|
|
64
|
+
"--provider",
|
|
65
|
+
"-p",
|
|
66
|
+
type=click.STRING,
|
|
67
|
+
required=True,
|
|
68
|
+
help="The provider ID of the region.",
|
|
69
|
+
)
|
|
70
|
+
@click.option(
|
|
71
|
+
"--region",
|
|
72
|
+
"-r",
|
|
73
|
+
type=click.STRING,
|
|
74
|
+
required=True,
|
|
75
|
+
help="The region ID to create the data_connector in.",
|
|
76
|
+
)
|
|
77
|
+
@click.option(
|
|
78
|
+
"--type",
|
|
79
|
+
"-t",
|
|
80
|
+
type=click.Choice(
|
|
81
|
+
[
|
|
82
|
+
"custom",
|
|
83
|
+
],
|
|
84
|
+
case_sensitive=False,
|
|
85
|
+
),
|
|
86
|
+
required=True,
|
|
87
|
+
help="The type of data connector to create.",
|
|
88
|
+
)
|
|
89
|
+
@click.option(
|
|
90
|
+
"--protocol",
|
|
91
|
+
"-r",
|
|
92
|
+
type=click.STRING,
|
|
93
|
+
help="The transport protocol of the data connector",
|
|
94
|
+
)
|
|
95
|
+
@click.option(
|
|
96
|
+
"--port-range",
|
|
97
|
+
"-p",
|
|
98
|
+
type=click.STRING,
|
|
99
|
+
help="The port range of the data connector",
|
|
100
|
+
)
|
|
101
|
+
@click.option(
|
|
102
|
+
"--cidr",
|
|
103
|
+
"-i",
|
|
104
|
+
type=click.STRING,
|
|
105
|
+
help="The IP range to allow in CIDR notation",
|
|
106
|
+
)
|
|
107
|
+
@click.argument("name", type=click.STRING, required=True)
|
|
108
|
+
@pass_config
|
|
109
|
+
def create(config, provider, region, type, protocol, port_range, cidr, name):
|
|
110
|
+
"""
|
|
111
|
+
Creates a data_connector.
|
|
112
|
+
"""
|
|
113
|
+
return config.proximl.run(
|
|
114
|
+
config.proximl.client.cloudbender.data_connectors.create(
|
|
115
|
+
provider_uuid=provider,
|
|
116
|
+
region_uuid=region,
|
|
117
|
+
name=name,
|
|
118
|
+
type=type,
|
|
119
|
+
protocol=protocol,
|
|
120
|
+
port_range=port_range,
|
|
121
|
+
cidr=cidr,
|
|
122
|
+
)
|
|
123
|
+
)
|
|
124
|
+
|
|
125
|
+
|
|
126
|
+
@data_connector.command()
|
|
127
|
+
@click.option(
|
|
128
|
+
"--provider",
|
|
129
|
+
"-p",
|
|
130
|
+
type=click.STRING,
|
|
131
|
+
required=True,
|
|
132
|
+
help="The provider ID of the region.",
|
|
133
|
+
)
|
|
134
|
+
@click.option(
|
|
135
|
+
"--region",
|
|
136
|
+
"-r",
|
|
137
|
+
type=click.STRING,
|
|
138
|
+
required=True,
|
|
139
|
+
help="The region ID to remove the data_connector from.",
|
|
140
|
+
)
|
|
141
|
+
@click.argument("data_connector", type=click.STRING)
|
|
142
|
+
@pass_config
|
|
143
|
+
def remove(config, provider, region, data_connector):
|
|
144
|
+
"""
|
|
145
|
+
Remove a data_connector.
|
|
146
|
+
|
|
147
|
+
DATASTORE may be specified by name or ID, but ID is preferred.
|
|
148
|
+
"""
|
|
149
|
+
data_connectors = config.proximl.run(
|
|
150
|
+
config.proximl.client.cloudbender.data_connectors.list(
|
|
151
|
+
provider_uuid=provider, region_uuid=region
|
|
152
|
+
)
|
|
153
|
+
)
|
|
154
|
+
|
|
155
|
+
found = search_by_id_name(data_connector, data_connectors)
|
|
156
|
+
if None is found:
|
|
157
|
+
raise click.UsageError("Cannot find specified data_connector.")
|
|
158
|
+
|
|
159
|
+
return config.proximl.run(found.remove())
|
|
@@ -74,6 +74,19 @@ def list(config, provider, region):
|
|
|
74
74
|
required=True,
|
|
75
75
|
help="The region ID to create the service in.",
|
|
76
76
|
)
|
|
77
|
+
@click.option(
|
|
78
|
+
"--type",
|
|
79
|
+
"-t",
|
|
80
|
+
type=click.Choice(
|
|
81
|
+
[
|
|
82
|
+
"https",
|
|
83
|
+
"tcp",
|
|
84
|
+
"udp",
|
|
85
|
+
],
|
|
86
|
+
),
|
|
87
|
+
required=True,
|
|
88
|
+
help="The type of regional service.",
|
|
89
|
+
)
|
|
77
90
|
@click.option(
|
|
78
91
|
"--public/--no-public",
|
|
79
92
|
default=True,
|
|
@@ -82,13 +95,17 @@ def list(config, provider, region):
|
|
|
82
95
|
)
|
|
83
96
|
@click.argument("name", type=click.STRING, required=True)
|
|
84
97
|
@pass_config
|
|
85
|
-
def create(config, provider, region, public, name):
|
|
98
|
+
def create(config, provider, region, type, public, name):
|
|
86
99
|
"""
|
|
87
100
|
Creates a service.
|
|
88
101
|
"""
|
|
89
102
|
return config.proximl.run(
|
|
90
103
|
config.proximl.client.cloudbender.services.create(
|
|
91
|
-
provider_uuid=provider,
|
|
104
|
+
provider_uuid=provider,
|
|
105
|
+
region_uuid=region,
|
|
106
|
+
name=name,
|
|
107
|
+
type=type,
|
|
108
|
+
public=public,
|
|
92
109
|
)
|
|
93
110
|
)
|
|
94
111
|
|
|
@@ -3,6 +3,7 @@ from .regions import Regions
|
|
|
3
3
|
from .nodes import Nodes
|
|
4
4
|
from .devices import Devices
|
|
5
5
|
from .datastores import Datastores
|
|
6
|
+
from .data_connectors import DataConnectors
|
|
6
7
|
from .services import Services
|
|
7
8
|
from .device_configs import DeviceConfigs
|
|
8
9
|
|
|
@@ -15,5 +16,6 @@ class Cloudbender(object):
|
|
|
15
16
|
self.nodes = Nodes(proximl)
|
|
16
17
|
self.devices = Devices(proximl)
|
|
17
18
|
self.datastores = Datastores(proximl)
|
|
19
|
+
self.data_connectors = DataConnectors(proximl)
|
|
18
20
|
self.services = Services(proximl)
|
|
19
21
|
self.device_configs = DeviceConfigs(proximl)
|
|
@@ -0,0 +1,112 @@
|
|
|
1
|
+
import json
|
|
2
|
+
import logging
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
class DataConnectors(object):
|
|
6
|
+
def __init__(self, proximl):
|
|
7
|
+
self.proximl = proximl
|
|
8
|
+
|
|
9
|
+
async def get(self, provider_uuid, region_uuid, id, **kwargs):
|
|
10
|
+
resp = await self.proximl._query(
|
|
11
|
+
f"/provider/{provider_uuid}/region/{region_uuid}/data_connector/{id}",
|
|
12
|
+
"GET",
|
|
13
|
+
kwargs,
|
|
14
|
+
)
|
|
15
|
+
return DataConnector(self.proximl, **resp)
|
|
16
|
+
|
|
17
|
+
async def list(self, provider_uuid, region_uuid, **kwargs):
|
|
18
|
+
resp = await self.proximl._query(
|
|
19
|
+
f"/provider/{provider_uuid}/region/{region_uuid}/data_connector",
|
|
20
|
+
"GET",
|
|
21
|
+
kwargs,
|
|
22
|
+
)
|
|
23
|
+
data_connectors = [
|
|
24
|
+
DataConnector(self.proximl, **data_connector) for data_connector in resp
|
|
25
|
+
]
|
|
26
|
+
return data_connectors
|
|
27
|
+
|
|
28
|
+
async def create(
|
|
29
|
+
self,
|
|
30
|
+
provider_uuid,
|
|
31
|
+
region_uuid,
|
|
32
|
+
name,
|
|
33
|
+
type,
|
|
34
|
+
**kwargs,
|
|
35
|
+
):
|
|
36
|
+
logging.info(f"Creating Data Connector {name}")
|
|
37
|
+
data = dict(
|
|
38
|
+
name=name,
|
|
39
|
+
type=type,
|
|
40
|
+
**kwargs,
|
|
41
|
+
)
|
|
42
|
+
payload = {k: v for k, v in data.items() if v is not None}
|
|
43
|
+
resp = await self.proximl._query(
|
|
44
|
+
f"/provider/{provider_uuid}/region/{region_uuid}/data_connector",
|
|
45
|
+
"POST",
|
|
46
|
+
None,
|
|
47
|
+
payload,
|
|
48
|
+
)
|
|
49
|
+
data_connector = DataConnector(self.proximl, **resp)
|
|
50
|
+
logging.info(f"Created Data Connector {name} with id {data_connector.id}")
|
|
51
|
+
return data_connector
|
|
52
|
+
|
|
53
|
+
async def remove(self, provider_uuid, region_uuid, id, **kwargs):
|
|
54
|
+
await self.proximl._query(
|
|
55
|
+
f"/provider/{provider_uuid}/region/{region_uuid}/data_connector/{id}",
|
|
56
|
+
"DELETE",
|
|
57
|
+
kwargs,
|
|
58
|
+
)
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
class DataConnector:
|
|
62
|
+
def __init__(self, proximl, **kwargs):
|
|
63
|
+
self.proximl = proximl
|
|
64
|
+
self._data_connector = kwargs
|
|
65
|
+
self._id = self._data_connector.get("connector_id")
|
|
66
|
+
self._provider_uuid = self._data_connector.get("provider_uuid")
|
|
67
|
+
self._region_uuid = self._data_connector.get("region_uuid")
|
|
68
|
+
self._type = self._data_connector.get("type")
|
|
69
|
+
self._name = self._data_connector.get("name")
|
|
70
|
+
|
|
71
|
+
@property
|
|
72
|
+
def id(self) -> str:
|
|
73
|
+
return self._id
|
|
74
|
+
|
|
75
|
+
@property
|
|
76
|
+
def provider_uuid(self) -> str:
|
|
77
|
+
return self._provider_uuid
|
|
78
|
+
|
|
79
|
+
@property
|
|
80
|
+
def region_uuid(self) -> str:
|
|
81
|
+
return self._region_uuid
|
|
82
|
+
|
|
83
|
+
@property
|
|
84
|
+
def type(self) -> str:
|
|
85
|
+
return self._type
|
|
86
|
+
|
|
87
|
+
@property
|
|
88
|
+
def name(self) -> str:
|
|
89
|
+
return self._name
|
|
90
|
+
|
|
91
|
+
def __str__(self):
|
|
92
|
+
return json.dumps({k: v for k, v in self._data_connector.items()})
|
|
93
|
+
|
|
94
|
+
def __repr__(self):
|
|
95
|
+
return f"DataConnector( proximl , **{self._data_connector.__repr__()})"
|
|
96
|
+
|
|
97
|
+
def __bool__(self):
|
|
98
|
+
return bool(self._id)
|
|
99
|
+
|
|
100
|
+
async def remove(self):
|
|
101
|
+
await self.proximl._query(
|
|
102
|
+
f"/provider/{self._provider_uuid}/region/{self._region_uuid}/data_connector/{self._id}",
|
|
103
|
+
"DELETE",
|
|
104
|
+
)
|
|
105
|
+
|
|
106
|
+
async def refresh(self):
|
|
107
|
+
resp = await self.proximl._query(
|
|
108
|
+
f"/provider/{self._provider_uuid}/region/{self._region_uuid}/data_connector/{self._id}",
|
|
109
|
+
"GET",
|
|
110
|
+
)
|
|
111
|
+
self.__init__(self.proximl, **resp)
|
|
112
|
+
return self
|
|
@@ -1,5 +1,13 @@
|
|
|
1
1
|
import json
|
|
2
2
|
import logging
|
|
3
|
+
import asyncio
|
|
4
|
+
import math
|
|
5
|
+
|
|
6
|
+
from proximl.exceptions import (
|
|
7
|
+
ApiError,
|
|
8
|
+
SpecificationError,
|
|
9
|
+
ProxiMLException,
|
|
10
|
+
)
|
|
3
11
|
|
|
4
12
|
|
|
5
13
|
class Services(object):
|
|
@@ -28,12 +36,14 @@ class Services(object):
|
|
|
28
36
|
provider_uuid,
|
|
29
37
|
region_uuid,
|
|
30
38
|
name,
|
|
39
|
+
type,
|
|
31
40
|
public,
|
|
32
41
|
**kwargs,
|
|
33
42
|
):
|
|
34
43
|
logging.info(f"Creating Service {name}")
|
|
35
44
|
data = dict(
|
|
36
45
|
name=name,
|
|
46
|
+
type=type,
|
|
37
47
|
public=public,
|
|
38
48
|
**kwargs,
|
|
39
49
|
)
|
|
@@ -65,7 +75,12 @@ class Service:
|
|
|
65
75
|
self._region_uuid = self._service.get("region_uuid")
|
|
66
76
|
self._public = self._service.get("public")
|
|
67
77
|
self._name = self._service.get("name")
|
|
68
|
-
self.
|
|
78
|
+
self._type = self._service.get("type")
|
|
79
|
+
self._hostname = self._service.get("custom_hostname") or self._service.get(
|
|
80
|
+
"hostname"
|
|
81
|
+
)
|
|
82
|
+
self._status = self._service.get("status")
|
|
83
|
+
self._port = self._service.get("port")
|
|
69
84
|
|
|
70
85
|
@property
|
|
71
86
|
def id(self) -> str:
|
|
@@ -91,6 +106,18 @@ class Service:
|
|
|
91
106
|
def hostname(self) -> str:
|
|
92
107
|
return self._hostname
|
|
93
108
|
|
|
109
|
+
@property
|
|
110
|
+
def status(self) -> str:
|
|
111
|
+
return self._status
|
|
112
|
+
|
|
113
|
+
@property
|
|
114
|
+
def type(self) -> str:
|
|
115
|
+
return self._type
|
|
116
|
+
|
|
117
|
+
@property
|
|
118
|
+
def port(self) -> str:
|
|
119
|
+
return self._port
|
|
120
|
+
|
|
94
121
|
def __str__(self):
|
|
95
122
|
return json.dumps({k: v for k, v in self._service.items()})
|
|
96
123
|
|
|
@@ -113,3 +140,40 @@ class Service:
|
|
|
113
140
|
)
|
|
114
141
|
self.__init__(self.proximl, **resp)
|
|
115
142
|
return self
|
|
143
|
+
|
|
144
|
+
async def wait_for(self, status, timeout=300):
|
|
145
|
+
if self.status == status:
|
|
146
|
+
return
|
|
147
|
+
valid_statuses = ["active", "archived"]
|
|
148
|
+
if not status in valid_statuses:
|
|
149
|
+
raise SpecificationError(
|
|
150
|
+
"status",
|
|
151
|
+
f"Invalid wait_for status {status}. Valid statuses are: {valid_statuses}",
|
|
152
|
+
)
|
|
153
|
+
MAX_TIMEOUT = 24 * 60 * 60
|
|
154
|
+
if timeout > MAX_TIMEOUT:
|
|
155
|
+
raise SpecificationError(
|
|
156
|
+
"timeout",
|
|
157
|
+
f"timeout must be less than {MAX_TIMEOUT} seconds.",
|
|
158
|
+
)
|
|
159
|
+
|
|
160
|
+
POLL_INTERVAL_MIN = 5
|
|
161
|
+
POLL_INTERVAL_MAX = 60
|
|
162
|
+
POLL_INTERVAL = max(min(timeout / 60, POLL_INTERVAL_MAX), POLL_INTERVAL_MIN)
|
|
163
|
+
retry_count = math.ceil(timeout / POLL_INTERVAL)
|
|
164
|
+
count = 0
|
|
165
|
+
while count < retry_count:
|
|
166
|
+
await asyncio.sleep(POLL_INTERVAL)
|
|
167
|
+
try:
|
|
168
|
+
await self.refresh()
|
|
169
|
+
except ApiError as e:
|
|
170
|
+
if status == "archived" and e.status == 404:
|
|
171
|
+
return
|
|
172
|
+
raise e
|
|
173
|
+
if self.status == status:
|
|
174
|
+
return self
|
|
175
|
+
else:
|
|
176
|
+
count += 1
|
|
177
|
+
logging.debug(f"self: {self}, retry count {count}")
|
|
178
|
+
|
|
179
|
+
raise ProxiMLException(f"Timeout waiting for {status}")
|
|
@@ -119,12 +119,16 @@ class Dataset:
|
|
|
119
119
|
project_uuid=self._dataset.get("project_uuid"),
|
|
120
120
|
cidr=self._dataset.get("vpn").get("cidr"),
|
|
121
121
|
ssh_port=self._dataset.get("vpn").get("client").get("ssh_port"),
|
|
122
|
-
input_path=
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
122
|
+
input_path=(
|
|
123
|
+
self._dataset.get("source_uri")
|
|
124
|
+
if self.status in ["new", "downloading"]
|
|
125
|
+
else None
|
|
126
|
+
),
|
|
127
|
+
output_path=(
|
|
128
|
+
self._dataset.get("output_uri")
|
|
129
|
+
if self.status == "exporting"
|
|
130
|
+
else None
|
|
131
|
+
),
|
|
128
132
|
)
|
|
129
133
|
else:
|
|
130
134
|
details = dict()
|
|
@@ -215,14 +219,21 @@ class Dataset:
|
|
|
215
219
|
return self
|
|
216
220
|
|
|
217
221
|
async def wait_for(self, status, timeout=300):
|
|
222
|
+
if self.status == status:
|
|
223
|
+
return
|
|
218
224
|
valid_statuses = ["downloading", "ready", "archived"]
|
|
219
225
|
if not status in valid_statuses:
|
|
220
226
|
raise SpecificationError(
|
|
221
227
|
"status",
|
|
222
228
|
f"Invalid wait_for status {status}. Valid statuses are: {valid_statuses}",
|
|
223
229
|
)
|
|
224
|
-
|
|
225
|
-
|
|
230
|
+
MAX_TIMEOUT = 24 * 60 * 60
|
|
231
|
+
if timeout > MAX_TIMEOUT:
|
|
232
|
+
raise SpecificationError(
|
|
233
|
+
"timeout",
|
|
234
|
+
f"timeout must be less than {MAX_TIMEOUT} seconds.",
|
|
235
|
+
)
|
|
236
|
+
|
|
226
237
|
POLL_INTERVAL_MIN = 5
|
|
227
238
|
POLL_INTERVAL_MAX = 60
|
|
228
239
|
POLL_INTERVAL = max(min(timeout / 60, POLL_INTERVAL_MAX), POLL_INTERVAL_MIN)
|
|
@@ -468,6 +468,12 @@ class Job:
|
|
|
468
468
|
return job
|
|
469
469
|
|
|
470
470
|
async def wait_for(self, status, timeout=300):
|
|
471
|
+
if self.status == status or (
|
|
472
|
+
self.type == "training"
|
|
473
|
+
and status == "finished"
|
|
474
|
+
and self.status == "stopped"
|
|
475
|
+
):
|
|
476
|
+
return
|
|
471
477
|
valid_statuses = [
|
|
472
478
|
"waiting for data/model download",
|
|
473
479
|
"waiting for GPUs",
|
|
@@ -492,12 +498,13 @@ class Job:
|
|
|
492
498
|
"'stopped' status is deprecated for training jobs, use 'finished' instead.",
|
|
493
499
|
DeprecationWarning,
|
|
494
500
|
)
|
|
495
|
-
|
|
496
|
-
|
|
497
|
-
|
|
498
|
-
|
|
499
|
-
|
|
500
|
-
|
|
501
|
+
|
|
502
|
+
MAX_TIMEOUT = 24 * 60 * 60
|
|
503
|
+
if timeout > MAX_TIMEOUT:
|
|
504
|
+
raise SpecificationError(
|
|
505
|
+
"timeout",
|
|
506
|
+
f"timeout must be less than {MAX_TIMEOUT} seconds.",
|
|
507
|
+
)
|
|
501
508
|
|
|
502
509
|
POLL_INTERVAL_MIN = 5
|
|
503
510
|
POLL_INTERVAL_MAX = 60
|