vantage6 5.0.0a36__py3-none-any.whl → 5.0.0a38__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of vantage6 might be problematic. Click here for more details.
- vantage6/cli/algorithm/generate_algorithm_json.py +0 -1
- vantage6/cli/algostore/attach.py +28 -3
- vantage6/cli/algostore/list.py +2 -2
- vantage6/cli/algostore/new.py +3 -2
- vantage6/cli/algostore/start.py +25 -6
- vantage6/cli/algostore/stop.py +3 -0
- vantage6/cli/algostore/version.py +62 -0
- vantage6/cli/auth/attach.py +1 -1
- vantage6/cli/auth/list.py +2 -2
- vantage6/cli/auth/new.py +3 -2
- vantage6/cli/auth/remove.py +58 -0
- vantage6/cli/auth/start.py +27 -9
- vantage6/cli/auth/stop.py +3 -0
- vantage6/cli/cli.py +21 -0
- vantage6/cli/common/attach.py +114 -0
- vantage6/cli/common/decorator.py +25 -4
- vantage6/cli/common/list.py +68 -0
- vantage6/cli/common/new.py +27 -7
- vantage6/cli/common/remove.py +18 -0
- vantage6/cli/common/start.py +48 -40
- vantage6/cli/common/stop.py +16 -4
- vantage6/cli/common/utils.py +65 -74
- vantage6/cli/common/version.py +82 -0
- vantage6/cli/config.py +10 -2
- vantage6/cli/{configuration_wizard.py → configuration_create.py} +22 -14
- vantage6/cli/configuration_manager.py +70 -21
- vantage6/cli/context/__init__.py +10 -5
- vantage6/cli/context/algorithm_store.py +13 -7
- vantage6/cli/context/auth.py +23 -5
- vantage6/cli/context/node.py +25 -8
- vantage6/cli/context/server.py +18 -6
- vantage6/cli/globals.py +1 -0
- vantage6/cli/node/attach.py +27 -3
- vantage6/cli/node/common/__init__.py +26 -10
- vantage6/cli/node/common/task_cleanup.py +153 -0
- vantage6/cli/node/list.py +3 -44
- vantage6/cli/node/new.py +13 -6
- vantage6/cli/node/set_api_key.py +1 -1
- vantage6/cli/node/start.py +30 -7
- vantage6/cli/node/stop.py +151 -7
- vantage6/cli/node/version.py +96 -33
- vantage6/cli/sandbox/config/base.py +109 -0
- vantage6/cli/sandbox/config/core.py +300 -0
- vantage6/cli/sandbox/config/node.py +311 -0
- vantage6/cli/sandbox/data/km_dataset.csv +2401 -0
- vantage6/cli/sandbox/data/olympic_athletes_2016.csv +2425 -0
- vantage6/cli/sandbox/new.py +207 -0
- vantage6/cli/sandbox/populate/__init__.py +173 -0
- vantage6/cli/sandbox/populate/helpers/connect_store.py +203 -0
- vantage6/cli/sandbox/populate/helpers/delete_fixtures.py +67 -0
- vantage6/cli/sandbox/populate/helpers/load_fixtures.py +476 -0
- vantage6/cli/sandbox/populate/helpers/utils.py +35 -0
- vantage6/cli/sandbox/remove.py +155 -0
- vantage6/cli/sandbox/start.py +349 -0
- vantage6/cli/sandbox/stop.py +106 -0
- vantage6/cli/server/attach.py +28 -3
- vantage6/cli/server/common/__init__.py +5 -6
- vantage6/cli/server/import_.py +137 -119
- vantage6/cli/server/list.py +2 -2
- vantage6/cli/server/new.py +5 -3
- vantage6/cli/server/start.py +21 -4
- vantage6/cli/server/stop.py +2 -0
- vantage6/cli/server/version.py +31 -18
- vantage6/cli/template/algo_store_config.j2 +3 -0
- vantage6/cli/template/auth_config.j2 +24 -1
- vantage6/cli/template/node_config.j2 +2 -0
- vantage6/cli/template/server_config.j2 +10 -7
- vantage6/cli/use/context.py +8 -1
- vantage6/cli/use/namespace.py +10 -7
- vantage6/cli/utils.py +33 -1
- vantage6/cli/utils_kubernetes.py +270 -0
- {vantage6-5.0.0a36.dist-info → vantage6-5.0.0a38.dist-info}/METADATA +4 -4
- vantage6-5.0.0a38.dist-info/RECORD +102 -0
- vantage6/cli/rabbitmq/__init__.py +0 -0
- vantage6/cli/rabbitmq/definitions.py +0 -26
- vantage6/cli/rabbitmq/queue_manager.py +0 -220
- vantage6/cli/rabbitmq/rabbitmq.config +0 -8
- vantage6-5.0.0a36.dist-info/RECORD +0 -86
- {vantage6-5.0.0a36.dist-info → vantage6-5.0.0a38.dist-info}/WHEEL +0 -0
- {vantage6-5.0.0a36.dist-info → vantage6-5.0.0a38.dist-info}/entry_points.txt +0 -0
|
@@ -0,0 +1,476 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Development script to populate the server
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
import traceback
|
|
6
|
+
from pathlib import Path
|
|
7
|
+
|
|
8
|
+
from jinja2 import Environment, FileSystemLoader
|
|
9
|
+
|
|
10
|
+
from vantage6.client import Client
|
|
11
|
+
|
|
12
|
+
from vantage6.cli.globals import APPNAME, PACKAGE_FOLDER
|
|
13
|
+
from vantage6.cli.sandbox.populate.helpers.utils import (
|
|
14
|
+
NodeConfigCreationDetails,
|
|
15
|
+
replace_wsl_path,
|
|
16
|
+
)
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
def clear_dev_folder(dev_dir: Path, name: str) -> None:
|
|
20
|
+
node_dev_dir = dev_dir / name
|
|
21
|
+
if node_dev_dir.exists():
|
|
22
|
+
for file_ in node_dev_dir.iterdir():
|
|
23
|
+
file_.unlink()
|
|
24
|
+
node_dev_dir.rmdir()
|
|
25
|
+
print(f"===> Dev folder for node `{name}` cleared")
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
def create_organizations(
|
|
29
|
+
client: Client, number_of_nodes: int
|
|
30
|
+
) -> tuple[list[dict], dict]:
|
|
31
|
+
"""
|
|
32
|
+
Create organizations. If the organization already exists, it is added to the
|
|
33
|
+
existing organizations list. If the organization is the root organization, it is
|
|
34
|
+
patched so that the admin user is also in the organization.
|
|
35
|
+
|
|
36
|
+
Returns
|
|
37
|
+
-------
|
|
38
|
+
tuple[list[dict], dict]
|
|
39
|
+
A tuple containing the list of organizations and the creation details.
|
|
40
|
+
"""
|
|
41
|
+
organizations = []
|
|
42
|
+
creation_details = {
|
|
43
|
+
"created": [],
|
|
44
|
+
"existing": [],
|
|
45
|
+
"root_org_patched": [],
|
|
46
|
+
}
|
|
47
|
+
|
|
48
|
+
existing_organizations = client.organization.list()["data"]
|
|
49
|
+
for i in range(1, number_of_nodes + 1):
|
|
50
|
+
name = f"org_{i}"
|
|
51
|
+
if org := next(
|
|
52
|
+
iter([org for org in existing_organizations if org["name"] == name]), None
|
|
53
|
+
):
|
|
54
|
+
creation_details["existing"].append({"name": name, "domain": org["domain"]})
|
|
55
|
+
organizations.append(org)
|
|
56
|
+
elif i == 1:
|
|
57
|
+
# Patch the root organization so that admin user is also in the org
|
|
58
|
+
org = client.organization.update(
|
|
59
|
+
id_=1,
|
|
60
|
+
name=name,
|
|
61
|
+
)
|
|
62
|
+
creation_details["root_org_patched"].append(
|
|
63
|
+
{"name": name, "domain": org["domain"]}
|
|
64
|
+
)
|
|
65
|
+
organizations.append(org)
|
|
66
|
+
else:
|
|
67
|
+
org = client.organization.create(
|
|
68
|
+
name=name,
|
|
69
|
+
address1=f"First address line {i}",
|
|
70
|
+
address2=f"Second address line {i}",
|
|
71
|
+
zipcode="1234AB",
|
|
72
|
+
country="Earthland",
|
|
73
|
+
domain=f"org{i}.org",
|
|
74
|
+
)
|
|
75
|
+
creation_details["created"].append({"name": name, "domain": org["domain"]})
|
|
76
|
+
organizations.append(org)
|
|
77
|
+
|
|
78
|
+
return organizations, creation_details
|
|
79
|
+
|
|
80
|
+
|
|
81
|
+
def create_collaborations(
|
|
82
|
+
client: Client, organizations: list[dict]
|
|
83
|
+
) -> tuple[list[dict], dict]:
|
|
84
|
+
"""
|
|
85
|
+
Create collaborations. If the collaboration already exists, it is added to the
|
|
86
|
+
existing collaborations list.
|
|
87
|
+
|
|
88
|
+
Returns
|
|
89
|
+
-------
|
|
90
|
+
tuple[dict, dict]
|
|
91
|
+
A tuple containing the collaboration and the creation details.
|
|
92
|
+
"""
|
|
93
|
+
creation_details = {"created": [], "existing": []}
|
|
94
|
+
collab_name = "demo"
|
|
95
|
+
existing_collaborations = client.collaboration.list(
|
|
96
|
+
scope="global", name=collab_name
|
|
97
|
+
)["data"]
|
|
98
|
+
if collab := next(iter(existing_collaborations), None):
|
|
99
|
+
creation_details["existing"].append({"name": collab_name, "id": collab["id"]})
|
|
100
|
+
else:
|
|
101
|
+
collab = client.collaboration.create(
|
|
102
|
+
name=collab_name,
|
|
103
|
+
organizations=[org["id"] for org in organizations],
|
|
104
|
+
encrypted=False,
|
|
105
|
+
)
|
|
106
|
+
creation_details["created"].append({"name": collab_name, "id": collab["id"]})
|
|
107
|
+
return collab, creation_details
|
|
108
|
+
|
|
109
|
+
|
|
110
|
+
def create_users(client: Client, organizations: list[dict]) -> dict:
|
|
111
|
+
"""
|
|
112
|
+
Create users. If the user already exists, it is added to the existing users list.
|
|
113
|
+
|
|
114
|
+
Returns
|
|
115
|
+
-------
|
|
116
|
+
dict
|
|
117
|
+
The creation details.
|
|
118
|
+
"""
|
|
119
|
+
creation_details = {"created": [], "existing": []}
|
|
120
|
+
existing_users = client.user.list()["data"]
|
|
121
|
+
for index, org in enumerate(organizations):
|
|
122
|
+
username = f"user_{index + 1}"
|
|
123
|
+
if next(
|
|
124
|
+
iter([user for user in existing_users if user["username"] == username]),
|
|
125
|
+
None,
|
|
126
|
+
):
|
|
127
|
+
creation_details["existing"].append(
|
|
128
|
+
{
|
|
129
|
+
"username": username,
|
|
130
|
+
"organization": org["name"],
|
|
131
|
+
}
|
|
132
|
+
)
|
|
133
|
+
else:
|
|
134
|
+
password = "Password123!"
|
|
135
|
+
client.user.create(
|
|
136
|
+
username=username,
|
|
137
|
+
password=password,
|
|
138
|
+
organization=org["id"],
|
|
139
|
+
roles=[1], # TODO assign proper roles
|
|
140
|
+
)
|
|
141
|
+
creation_details["created"].append(
|
|
142
|
+
{
|
|
143
|
+
"username": username,
|
|
144
|
+
"password": password,
|
|
145
|
+
"organization": org["name"],
|
|
146
|
+
}
|
|
147
|
+
)
|
|
148
|
+
return creation_details
|
|
149
|
+
|
|
150
|
+
|
|
151
|
+
def register_node(
|
|
152
|
+
client: Client,
|
|
153
|
+
node_name: str,
|
|
154
|
+
collaboration: dict,
|
|
155
|
+
organization: dict,
|
|
156
|
+
) -> dict:
|
|
157
|
+
"""
|
|
158
|
+
Register a node at the server.
|
|
159
|
+
|
|
160
|
+
Returns
|
|
161
|
+
-------
|
|
162
|
+
dict
|
|
163
|
+
The node registration details.
|
|
164
|
+
"""
|
|
165
|
+
return client.node.create(
|
|
166
|
+
collaboration=collaboration["id"],
|
|
167
|
+
organization=organization["id"],
|
|
168
|
+
name=node_name,
|
|
169
|
+
)
|
|
170
|
+
|
|
171
|
+
|
|
172
|
+
def create_node_config(
|
|
173
|
+
node_number: int,
|
|
174
|
+
node_name: str,
|
|
175
|
+
dev_dir: Path,
|
|
176
|
+
task_directory: str,
|
|
177
|
+
task_namespace: str,
|
|
178
|
+
node_starting_port_number: int,
|
|
179
|
+
organization: dict,
|
|
180
|
+
node: dict,
|
|
181
|
+
) -> dict:
|
|
182
|
+
"""
|
|
183
|
+
Create a node configuration file.
|
|
184
|
+
|
|
185
|
+
Returns
|
|
186
|
+
-------
|
|
187
|
+
dict
|
|
188
|
+
The node configuration details.
|
|
189
|
+
"""
|
|
190
|
+
# Create a folder for all config files for a single node
|
|
191
|
+
node_dev_dir = dev_dir / node_name
|
|
192
|
+
node_dev_dir.mkdir(exist_ok=True)
|
|
193
|
+
|
|
194
|
+
# Generate node configuration
|
|
195
|
+
environment = Environment(
|
|
196
|
+
loader=FileSystemLoader(PACKAGE_FOLDER / APPNAME / "cli" / "template"),
|
|
197
|
+
trim_blocks=True,
|
|
198
|
+
lstrip_blocks=True,
|
|
199
|
+
autoescape=True,
|
|
200
|
+
)
|
|
201
|
+
template = environment.get_template("node_config_nonk8s.j2")
|
|
202
|
+
|
|
203
|
+
node_config = template.render(
|
|
204
|
+
{
|
|
205
|
+
"logging": {"file": f"node_{node_number}.log"},
|
|
206
|
+
"port": 7601,
|
|
207
|
+
"server_url": "http://vantage6-server-vantage6-server-service",
|
|
208
|
+
"task_dir": f"{task_directory}/node_{node_number}",
|
|
209
|
+
"task_dir_extension": f"node_{node_number}",
|
|
210
|
+
"api_path": "/server",
|
|
211
|
+
"task_namespace": task_namespace,
|
|
212
|
+
"node_proxy_port": node_starting_port_number + (node_number - 1),
|
|
213
|
+
}
|
|
214
|
+
)
|
|
215
|
+
config_file = node_dev_dir / f"node_org_{node_number}.yaml"
|
|
216
|
+
with open(config_file, "w") as f:
|
|
217
|
+
f.write(node_config)
|
|
218
|
+
|
|
219
|
+
# also make sure the task directory exists
|
|
220
|
+
task_dir = Path(f"{task_directory}/node_{node_number}")
|
|
221
|
+
task_dir = replace_wsl_path(task_dir)
|
|
222
|
+
task_dir.mkdir(parents=True, exist_ok=True)
|
|
223
|
+
|
|
224
|
+
# Create .env file for the node
|
|
225
|
+
env_file = node_dev_dir / ".env"
|
|
226
|
+
with open(env_file, "w") as f:
|
|
227
|
+
f.write(f"V6_API_KEY={node['api_key']}\n")
|
|
228
|
+
f.write(f"V6_NODE_NAME={node_name}\n")
|
|
229
|
+
|
|
230
|
+
return {
|
|
231
|
+
"name": f"node-{node_number}",
|
|
232
|
+
"organization": organization["name"],
|
|
233
|
+
"api_key": node["api_key"],
|
|
234
|
+
"config_file": str(config_file),
|
|
235
|
+
"env_file": str(env_file),
|
|
236
|
+
}
|
|
237
|
+
|
|
238
|
+
|
|
239
|
+
def create_session(client: Client, collaboration: dict) -> dict:
|
|
240
|
+
"""
|
|
241
|
+
Create a session.
|
|
242
|
+
|
|
243
|
+
Returns
|
|
244
|
+
-------
|
|
245
|
+
dict
|
|
246
|
+
The session creation details.
|
|
247
|
+
"""
|
|
248
|
+
session = client.session.create(
|
|
249
|
+
collaboration=collaboration["id"],
|
|
250
|
+
name="session (collaboration scope)",
|
|
251
|
+
scope="collaboration",
|
|
252
|
+
)
|
|
253
|
+
creation_details = {
|
|
254
|
+
"created": [
|
|
255
|
+
{
|
|
256
|
+
"name": "session (collaboration scope)",
|
|
257
|
+
"id": session["id"],
|
|
258
|
+
}
|
|
259
|
+
],
|
|
260
|
+
}
|
|
261
|
+
return creation_details
|
|
262
|
+
|
|
263
|
+
|
|
264
|
+
def print_creation_details(creation_details: dict) -> str:
|
|
265
|
+
"""
|
|
266
|
+
Print the creation details.
|
|
267
|
+
|
|
268
|
+
Returns
|
|
269
|
+
-------
|
|
270
|
+
str
|
|
271
|
+
The creation summary.
|
|
272
|
+
"""
|
|
273
|
+
summary = "=== Creation Summary ===\n"
|
|
274
|
+
|
|
275
|
+
summary += f"\nOrganizations: {len(creation_details['organizations']['created'])} "
|
|
276
|
+
summary += f"created, {len(creation_details['organizations']['existing'])} existing"
|
|
277
|
+
summary += f", {len(creation_details['organizations']['root_org_patched'])} root "
|
|
278
|
+
summary += "org patched"
|
|
279
|
+
if creation_details["organizations"]["created"]:
|
|
280
|
+
summary += "\n Created:"
|
|
281
|
+
for org in creation_details["organizations"]["created"]:
|
|
282
|
+
summary += f"\n - {org['name']} ({org['domain']})"
|
|
283
|
+
if creation_details["organizations"]["existing"]:
|
|
284
|
+
summary += "\n Existing:"
|
|
285
|
+
for org in creation_details["organizations"]["existing"]:
|
|
286
|
+
summary += f"\n - {org['name']} ({org['domain']})"
|
|
287
|
+
if creation_details["organizations"]["root_org_patched"]:
|
|
288
|
+
summary += "\n Root org patched:"
|
|
289
|
+
for org in creation_details["organizations"]["root_org_patched"]:
|
|
290
|
+
summary += f"\n - {org['name']} ({org['domain']})"
|
|
291
|
+
|
|
292
|
+
summary += f"\n\nUsers: {len(creation_details['users']['created'])} created, "
|
|
293
|
+
summary += f"{len(creation_details['users']['existing'])} existing"
|
|
294
|
+
if creation_details["users"]["created"]:
|
|
295
|
+
summary += "\n Created:"
|
|
296
|
+
for user in creation_details["users"]["created"]:
|
|
297
|
+
summary += f"\n - {user['username']} - Password: "
|
|
298
|
+
summary += f"{user['password']} - Org: {user['organization']}"
|
|
299
|
+
if creation_details["users"]["existing"]:
|
|
300
|
+
summary += "\n Existing:"
|
|
301
|
+
for user in creation_details["users"]["existing"]:
|
|
302
|
+
summary += f"\n - {user['username']} - Org: "
|
|
303
|
+
summary += f"{user['organization']}"
|
|
304
|
+
|
|
305
|
+
summary += f"\n\nNodes: {len(creation_details['nodes']['created'])} created, "
|
|
306
|
+
summary += f"{len(creation_details['nodes']['existing'])} existing"
|
|
307
|
+
if creation_details["nodes"]["created"]:
|
|
308
|
+
summary += "\n Created:"
|
|
309
|
+
for node in creation_details["nodes"]["created"]:
|
|
310
|
+
summary += f"\n - {node['name']} (Org: {node['organization']})"
|
|
311
|
+
summary += f"\n API Key: {node['api_key']}"
|
|
312
|
+
if "config_file" in node:
|
|
313
|
+
summary += f"\n Config: {node['config_file']}"
|
|
314
|
+
if "env_file" in node:
|
|
315
|
+
summary += f"\n Env: {node['env_file']}"
|
|
316
|
+
if creation_details["nodes"]["existing"]:
|
|
317
|
+
summary += "\n Existing:"
|
|
318
|
+
for node in creation_details["nodes"]["existing"]:
|
|
319
|
+
summary += f"\n - {node['name']} (Org: {node['organization']})"
|
|
320
|
+
|
|
321
|
+
summary += (
|
|
322
|
+
f"\n\nCollaborations: {len(creation_details['collaborations']['created'])} "
|
|
323
|
+
)
|
|
324
|
+
summary += (
|
|
325
|
+
f"created, {len(creation_details['collaborations']['existing'])} existing"
|
|
326
|
+
)
|
|
327
|
+
if creation_details["collaborations"]["created"]:
|
|
328
|
+
summary += "\n Created:"
|
|
329
|
+
for collab in creation_details["collaborations"]["created"]:
|
|
330
|
+
summary += f"\n - {collab['name']} (ID: {collab['id']})"
|
|
331
|
+
if creation_details["collaborations"]["existing"]:
|
|
332
|
+
summary += "\n Existing:"
|
|
333
|
+
for collab in creation_details["collaborations"]["existing"]:
|
|
334
|
+
summary += f"\n - {collab['name']} (ID: {collab['id']})"
|
|
335
|
+
|
|
336
|
+
summary += f"\n\nSessions: {len(creation_details['sessions']['created'])} created"
|
|
337
|
+
if creation_details["sessions"]["created"]:
|
|
338
|
+
summary += "\n Created:"
|
|
339
|
+
for session in creation_details["sessions"]["created"]:
|
|
340
|
+
summary += f"\n - {session['name']} (ID: {session['id']})"
|
|
341
|
+
|
|
342
|
+
if creation_details["dev_folders_cleared"]:
|
|
343
|
+
summary += (
|
|
344
|
+
f"\n\nDev folders cleared: {len(creation_details['dev_folders_cleared'])}"
|
|
345
|
+
)
|
|
346
|
+
for folder in creation_details["dev_folders_cleared"]:
|
|
347
|
+
summary += f"\n - {folder}"
|
|
348
|
+
|
|
349
|
+
summary += "\n\n======================="
|
|
350
|
+
print(summary)
|
|
351
|
+
return summary
|
|
352
|
+
|
|
353
|
+
|
|
354
|
+
def create_fixtures(
|
|
355
|
+
client: Client,
|
|
356
|
+
number_of_nodes: int,
|
|
357
|
+
return_as_dict: bool = False,
|
|
358
|
+
node_config_creation_details: NodeConfigCreationDetails | None = None,
|
|
359
|
+
clear_dev_folders: bool = False,
|
|
360
|
+
) -> str | dict:
|
|
361
|
+
"""
|
|
362
|
+
Create the fixtures for the server.
|
|
363
|
+
|
|
364
|
+
Parameters
|
|
365
|
+
----------
|
|
366
|
+
client: Client
|
|
367
|
+
The client to use to create the fixtures.
|
|
368
|
+
number_of_nodes: int
|
|
369
|
+
The number of nodes to create.
|
|
370
|
+
return_as_dict: bool
|
|
371
|
+
Whether to return the creation details as a dictionary or as a summary string.
|
|
372
|
+
Default is False.
|
|
373
|
+
node_config_creation_details: NodeConfigCreationDetails | None
|
|
374
|
+
The details to use to create the node configs. If not provided, the node configs
|
|
375
|
+
will not be created.
|
|
376
|
+
clear_dev_folders: bool
|
|
377
|
+
Whether to clear the dev folders.
|
|
378
|
+
|
|
379
|
+
Returns
|
|
380
|
+
-------
|
|
381
|
+
str | dict
|
|
382
|
+
The creation summary or the creation details as a dictionary.
|
|
383
|
+
"""
|
|
384
|
+
|
|
385
|
+
# Track creation details
|
|
386
|
+
creation_details = {
|
|
387
|
+
"organizations": {"created": [], "existing": [], "root_org_patched": []},
|
|
388
|
+
"users": {"created": [], "existing": []},
|
|
389
|
+
"nodes": {"created": [], "existing": []},
|
|
390
|
+
"collaborations": {"created": [], "existing": []},
|
|
391
|
+
"sessions": {"created": []},
|
|
392
|
+
"dev_folders_cleared": [],
|
|
393
|
+
}
|
|
394
|
+
|
|
395
|
+
# Remove old config files
|
|
396
|
+
if clear_dev_folders and node_config_creation_details:
|
|
397
|
+
for node_dir in [
|
|
398
|
+
d for d in node_config_creation_details.dev_dir.iterdir() if d.is_dir()
|
|
399
|
+
]:
|
|
400
|
+
clear_dev_folder(node_config_creation_details.dev_dir, node_dir.name)
|
|
401
|
+
creation_details["dev_folders_cleared"].append(node_dir.name)
|
|
402
|
+
|
|
403
|
+
# Create organizations
|
|
404
|
+
organizations, creation_details["organizations"] = create_organizations(
|
|
405
|
+
client, number_of_nodes
|
|
406
|
+
)
|
|
407
|
+
|
|
408
|
+
# Create collaboration
|
|
409
|
+
collaboration, creation_details["collaborations"] = create_collaborations(
|
|
410
|
+
client, organizations
|
|
411
|
+
)
|
|
412
|
+
|
|
413
|
+
# Create users
|
|
414
|
+
creation_details["users"] = create_users(client, organizations)
|
|
415
|
+
|
|
416
|
+
# create collaboration session
|
|
417
|
+
creation_details["sessions"] = create_session(client, collaboration)
|
|
418
|
+
|
|
419
|
+
# Create nodes
|
|
420
|
+
for i in range(1, number_of_nodes + 1):
|
|
421
|
+
name = f"node-{i}"
|
|
422
|
+
if next(
|
|
423
|
+
iter(
|
|
424
|
+
[
|
|
425
|
+
node
|
|
426
|
+
for node in client.node.list(name=name)["data"]
|
|
427
|
+
if node["name"] == name
|
|
428
|
+
]
|
|
429
|
+
),
|
|
430
|
+
None,
|
|
431
|
+
):
|
|
432
|
+
creation_details["nodes"]["existing"].append(
|
|
433
|
+
{"name": name, "organization": organizations[i - 1]["name"]}
|
|
434
|
+
)
|
|
435
|
+
else:
|
|
436
|
+
try:
|
|
437
|
+
node = register_node(
|
|
438
|
+
client,
|
|
439
|
+
node_name=name,
|
|
440
|
+
collaboration=collaboration,
|
|
441
|
+
organization=organizations[i - 1],
|
|
442
|
+
)
|
|
443
|
+
if node_config_creation_details:
|
|
444
|
+
creation_details["nodes"]["created"].append(
|
|
445
|
+
create_node_config(
|
|
446
|
+
node_number=i,
|
|
447
|
+
node_name=name,
|
|
448
|
+
dev_dir=node_config_creation_details.dev_dir,
|
|
449
|
+
task_directory=node_config_creation_details.task_directory,
|
|
450
|
+
task_namespace=node_config_creation_details.task_namespace,
|
|
451
|
+
node_starting_port_number=(
|
|
452
|
+
node_config_creation_details.node_starting_port_number
|
|
453
|
+
),
|
|
454
|
+
node=node,
|
|
455
|
+
organization=organizations[i - 1],
|
|
456
|
+
)
|
|
457
|
+
)
|
|
458
|
+
else:
|
|
459
|
+
creation_details["nodes"]["created"].append(
|
|
460
|
+
{
|
|
461
|
+
"name": name,
|
|
462
|
+
"organization": organizations[i - 1]["name"],
|
|
463
|
+
"api_key": node["api_key"],
|
|
464
|
+
}
|
|
465
|
+
)
|
|
466
|
+
|
|
467
|
+
except Exception as e:
|
|
468
|
+
traceback.print_exc()
|
|
469
|
+
print(f"Error creating node {name}: {str(e)}")
|
|
470
|
+
|
|
471
|
+
# Print creation details
|
|
472
|
+
printed_summary = print_creation_details(creation_details)
|
|
473
|
+
if return_as_dict:
|
|
474
|
+
return creation_details
|
|
475
|
+
else:
|
|
476
|
+
return printed_summary
|
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
from dataclasses import dataclass
|
|
2
|
+
from pathlib import Path
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
@dataclass
|
|
6
|
+
class NodeConfigCreationDetails:
|
|
7
|
+
node_starting_port_number: int
|
|
8
|
+
dev_dir: Path
|
|
9
|
+
task_directory: str
|
|
10
|
+
task_namespace: str
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def replace_wsl_path(path: Path, to_mnt_wsl: bool = True) -> Path:
|
|
14
|
+
"""
|
|
15
|
+
Replace the WSL path with the regular path.
|
|
16
|
+
|
|
17
|
+
If the directory contains /run/desktop/mnt/host/wsl, this will be replaced
|
|
18
|
+
by /mnt/wsl: this is an idiosyncrasy of WSL (for more details, see
|
|
19
|
+
https://dev.to/nsieg/use-k8s-hostpath-volumes-in-docker-desktop-on-wsl2-4dcl)
|
|
20
|
+
|
|
21
|
+
Parameters
|
|
22
|
+
----------
|
|
23
|
+
path: Path
|
|
24
|
+
Path to replace.
|
|
25
|
+
to_mnt_wsl: bool
|
|
26
|
+
If True, the path will be replaced from the /run/desktop/mnt/host/wsl path to
|
|
27
|
+
the /mnt/wsl path. If false, vice versa. By default, it is False.
|
|
28
|
+
"""
|
|
29
|
+
wsl_reference_path = "/run/desktop/mnt/host/wsl"
|
|
30
|
+
wsl_regular_path = "/mnt/wsl"
|
|
31
|
+
if to_mnt_wsl and str(path).startswith(wsl_reference_path):
|
|
32
|
+
path = Path(wsl_regular_path) / path.relative_to(wsl_reference_path)
|
|
33
|
+
elif not to_mnt_wsl and str(path).startswith(wsl_regular_path):
|
|
34
|
+
path = Path(wsl_reference_path) / path.relative_to(wsl_regular_path)
|
|
35
|
+
return path
|
|
@@ -0,0 +1,155 @@
|
|
|
1
|
+
import itertools
|
|
2
|
+
from pathlib import Path
|
|
3
|
+
from shutil import rmtree
|
|
4
|
+
|
|
5
|
+
import click
|
|
6
|
+
|
|
7
|
+
from vantage6.common import error, warning
|
|
8
|
+
from vantage6.common.globals import InstanceType
|
|
9
|
+
|
|
10
|
+
from vantage6.cli.auth.remove import auth_remove
|
|
11
|
+
from vantage6.cli.common.remove import execute_remove
|
|
12
|
+
from vantage6.cli.configuration_create import select_configuration_questionnaire
|
|
13
|
+
from vantage6.cli.context import get_context
|
|
14
|
+
from vantage6.cli.context.algorithm_store import AlgorithmStoreContext
|
|
15
|
+
from vantage6.cli.context.auth import AuthContext
|
|
16
|
+
from vantage6.cli.context.node import NodeContext
|
|
17
|
+
from vantage6.cli.globals import InfraComponentName
|
|
18
|
+
from vantage6.cli.server.remove import cli_server_remove
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
@click.command()
|
|
22
|
+
@click.option("-n", "--name", default=None, help="Name of the configuration.")
|
|
23
|
+
@click.option(
|
|
24
|
+
"-c",
|
|
25
|
+
"--config",
|
|
26
|
+
default=None,
|
|
27
|
+
help="Path to configuration-file; overrides --name",
|
|
28
|
+
)
|
|
29
|
+
@click.option(
|
|
30
|
+
"--data-dir",
|
|
31
|
+
"custom_data_dir",
|
|
32
|
+
type=click.Path(exists=True),
|
|
33
|
+
default=None,
|
|
34
|
+
help="Path to a custom data directory to use. This option is especially useful "
|
|
35
|
+
"on WSL because of mount issues for default directories. Use the same value as "
|
|
36
|
+
"was provided when creating the sandbox.",
|
|
37
|
+
)
|
|
38
|
+
@click.pass_context
|
|
39
|
+
def cli_sandbox_remove(
|
|
40
|
+
click_ctx: click.Context,
|
|
41
|
+
name: str | None,
|
|
42
|
+
config: str | None,
|
|
43
|
+
custom_data_dir: Path | None,
|
|
44
|
+
) -> None:
|
|
45
|
+
"""Remove all related demo network files and folders.
|
|
46
|
+
|
|
47
|
+
Select a server configuration to remove that server and the nodes attached
|
|
48
|
+
to it.
|
|
49
|
+
"""
|
|
50
|
+
|
|
51
|
+
if not name:
|
|
52
|
+
try:
|
|
53
|
+
name = select_configuration_questionnaire(
|
|
54
|
+
type_=InstanceType.SERVER, system_folders=False, is_sandbox=True
|
|
55
|
+
)
|
|
56
|
+
except Exception:
|
|
57
|
+
error("No configurations could be found!")
|
|
58
|
+
exit()
|
|
59
|
+
|
|
60
|
+
ctx = get_context(InstanceType.SERVER, name, system_folders=False, is_sandbox=True)
|
|
61
|
+
|
|
62
|
+
# remove the store folder
|
|
63
|
+
store_configs = AlgorithmStoreContext.instance_folders(
|
|
64
|
+
InstanceType.ALGORITHM_STORE,
|
|
65
|
+
f"{ctx.name}-store",
|
|
66
|
+
system_folders=False,
|
|
67
|
+
)
|
|
68
|
+
store_folder = store_configs["data"]
|
|
69
|
+
if store_folder.is_dir():
|
|
70
|
+
rmtree(store_folder)
|
|
71
|
+
|
|
72
|
+
# remove the store config file
|
|
73
|
+
AlgorithmStoreContext.LOGGING_ENABLED = False
|
|
74
|
+
store_ctx = AlgorithmStoreContext(
|
|
75
|
+
instance_name=f"{ctx.name}-store",
|
|
76
|
+
system_folders=False,
|
|
77
|
+
is_sandbox=True,
|
|
78
|
+
)
|
|
79
|
+
execute_remove(
|
|
80
|
+
store_ctx,
|
|
81
|
+
InstanceType.ALGORITHM_STORE,
|
|
82
|
+
InfraComponentName.ALGORITHM_STORE,
|
|
83
|
+
f"{ctx.name}-store",
|
|
84
|
+
system_folders=False,
|
|
85
|
+
force=True,
|
|
86
|
+
)
|
|
87
|
+
|
|
88
|
+
# remove the auth folder
|
|
89
|
+
AuthContext.LOGGING_ENABLED = False
|
|
90
|
+
auth_configs = AuthContext.instance_folders(
|
|
91
|
+
InstanceType.AUTH, f"{ctx.name}-auth", system_folders=False
|
|
92
|
+
)
|
|
93
|
+
auth_folder = auth_configs["data"]
|
|
94
|
+
if auth_folder.is_dir():
|
|
95
|
+
rmtree(auth_folder)
|
|
96
|
+
|
|
97
|
+
# remove the auth service
|
|
98
|
+
auth_ctx = AuthContext(
|
|
99
|
+
instance_name=f"{ctx.name}-auth",
|
|
100
|
+
system_folders=False,
|
|
101
|
+
is_sandbox=True,
|
|
102
|
+
)
|
|
103
|
+
auth_remove(auth_ctx, f"{ctx.name}-auth", system_folders=False, force=True)
|
|
104
|
+
|
|
105
|
+
# remove the nodes
|
|
106
|
+
NodeContext.LOGGING_ENABLED = False
|
|
107
|
+
configs, _ = NodeContext.available_configurations(system_folders=False)
|
|
108
|
+
node_names = [
|
|
109
|
+
config.name for config in configs if config.name.startswith(f"{ctx.name}-node-")
|
|
110
|
+
]
|
|
111
|
+
for name in node_names:
|
|
112
|
+
# Context clases are singletons, so we need to clear the cache to force
|
|
113
|
+
# creation of a new instance. Otherwise, we *think* we get the ctx of another
|
|
114
|
+
# node but we actually get the one from the previous node in the loop.
|
|
115
|
+
if NodeContext in NodeContext._instances:
|
|
116
|
+
del NodeContext._instances[NodeContext]
|
|
117
|
+
|
|
118
|
+
node_ctx = NodeContext(
|
|
119
|
+
instance_name=name,
|
|
120
|
+
system_folders=False,
|
|
121
|
+
is_sandbox=True,
|
|
122
|
+
print_log_header=False,
|
|
123
|
+
logger_prefix="",
|
|
124
|
+
in_container=False,
|
|
125
|
+
)
|
|
126
|
+
for handler in itertools.chain(
|
|
127
|
+
node_ctx.log.handlers, node_ctx.log.root.handlers
|
|
128
|
+
):
|
|
129
|
+
handler.close()
|
|
130
|
+
execute_remove(
|
|
131
|
+
node_ctx,
|
|
132
|
+
InstanceType.NODE,
|
|
133
|
+
InfraComponentName.NODE,
|
|
134
|
+
name,
|
|
135
|
+
system_folders=False,
|
|
136
|
+
force=True,
|
|
137
|
+
)
|
|
138
|
+
|
|
139
|
+
# remove data files attached to the network
|
|
140
|
+
data_dirs_nodes = NodeContext.instance_folders("node", "", False)["dev"]
|
|
141
|
+
try:
|
|
142
|
+
rmtree(Path(data_dirs_nodes / ctx.name))
|
|
143
|
+
except Exception as e:
|
|
144
|
+
warning(f"Failed to delete data directory {data_dirs_nodes / ctx.name}: {e}")
|
|
145
|
+
|
|
146
|
+
# remove the server last - if anything goes wrong, the server is still there so the
|
|
147
|
+
# user can still retry the removal.
|
|
148
|
+
# Note that this also checks if the server is running. Therefore, it is prevented
|
|
149
|
+
# that a running sandbox is removed.
|
|
150
|
+
for handler in itertools.chain(ctx.log.handlers, ctx.log.root.handlers):
|
|
151
|
+
handler.close()
|
|
152
|
+
click_ctx.invoke(
|
|
153
|
+
cli_server_remove, ctx=ctx, name=name, system_folders=False, force=True
|
|
154
|
+
)
|
|
155
|
+
# TODO remove the right data in the custom data directory if it is provided
|