vantage6 5.0.0a33__py3-none-any.whl → 5.0.0a35__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of vantage6 might be problematic. Click here for more details.

Files changed (45) hide show
  1. vantage6/cli/algostore/new.py +106 -47
  2. vantage6/cli/algostore/remove.py +18 -34
  3. vantage6/cli/algostore/start.py +36 -67
  4. vantage6/cli/algostore/stop.py +43 -46
  5. vantage6/cli/cli.py +31 -33
  6. vantage6/cli/common/new.py +85 -0
  7. vantage6/cli/common/remove.py +54 -0
  8. vantage6/cli/common/start.py +36 -213
  9. vantage6/cli/common/stop.py +78 -0
  10. vantage6/cli/common/utils.py +253 -16
  11. vantage6/cli/configuration_manager.py +90 -12
  12. vantage6/cli/configuration_wizard.py +49 -414
  13. vantage6/cli/context/algorithm_store.py +7 -6
  14. vantage6/cli/context/base_server.py +22 -30
  15. vantage6/cli/context/node.py +14 -17
  16. vantage6/cli/context/server.py +16 -7
  17. vantage6/cli/globals.py +29 -8
  18. vantage6/cli/node/attach.py +1 -0
  19. vantage6/cli/node/common/__init__.py +1 -1
  20. vantage6/cli/node/create_private_key.py +9 -6
  21. vantage6/cli/node/files.py +12 -25
  22. vantage6/cli/node/new.py +348 -28
  23. vantage6/cli/node/remove.py +14 -90
  24. vantage6/cli/node/restart.py +30 -51
  25. vantage6/cli/node/set_api_key.py +7 -4
  26. vantage6/cli/node/start.py +81 -304
  27. vantage6/cli/node/stop.py +36 -96
  28. vantage6/cli/server/import_.py +1 -2
  29. vantage6/cli/server/list.py +0 -3
  30. vantage6/cli/server/new.py +72 -42
  31. vantage6/cli/server/remove.py +12 -33
  32. vantage6/cli/server/shell.py +1 -1
  33. vantage6/cli/server/start.py +22 -20
  34. vantage6/cli/server/stop.py +37 -17
  35. vantage6/cli/template/algo_store_config.j2 +195 -22
  36. vantage6/cli/template/node_config.j2 +336 -33
  37. vantage6/cli/template/server_config.j2 +255 -33
  38. vantage6/cli/utils.py +0 -2
  39. {vantage6-5.0.0a33.dist-info → vantage6-5.0.0a35.dist-info}/METADATA +4 -4
  40. vantage6-5.0.0a35.dist-info/RECORD +75 -0
  41. vantage6/cli/node/clean.py +0 -46
  42. vantage6/cli/template/server_import_config.j2 +0 -31
  43. vantage6-5.0.0a33.dist-info/RECORD +0 -75
  44. {vantage6-5.0.0a33.dist-info → vantage6-5.0.0a35.dist-info}/WHEEL +0 -0
  45. {vantage6-5.0.0a33.dist-info → vantage6-5.0.0a35.dist-info}/entry_points.txt +0 -0
@@ -1,10 +1,11 @@
1
1
  import click
2
2
  import questionary as q
3
3
 
4
- from vantage6.common import error, info, ensure_config_dir_writable
4
+ from vantage6.common import ensure_config_dir_writable, error, info
5
+
6
+ from vantage6.cli.configuration_wizard import NodeConfigurationManager
5
7
  from vantage6.cli.context.node import NodeContext
6
8
  from vantage6.cli.globals import DEFAULT_NODE_SYSTEM_FOLDERS as N_FOL
7
- from vantage6.cli.configuration_wizard import NodeConfigurationManager
8
9
  from vantage6.cli.node.common import select_node
9
10
 
10
11
 
@@ -15,7 +16,7 @@ from vantage6.cli.node.common import select_node
15
16
  "--system",
16
17
  "system_folders",
17
18
  flag_value=True,
18
- help="Search for configuration in system folders rather than " "user folders",
19
+ help="Search for configuration in system folders rather than user folders",
19
20
  )
20
21
  @click.option(
21
22
  "--user",
@@ -50,6 +51,8 @@ def cli_node_set_api_key(name: str, api_key: str, system_folders: bool) -> None:
50
51
 
51
52
  # set new api key, and save the file
52
53
  ctx.config["api_key"] = api_key
54
+ # TODO v5+ this probably messes up the current config as the template is used...
55
+ # Fix when reimplementing this in v5
53
56
  conf_mgr.put(ctx.config)
54
57
  conf_mgr.save(ctx.config_file)
55
- info("Your new API key has been uploaded to the config file " f"{ctx.config_file}.")
58
+ info(f"Your new API key has been uploaded to the config file {ctx.config_file}.")
@@ -1,334 +1,111 @@
1
- import os.path
2
- import time
3
- from pathlib import Path
4
- from threading import Thread
5
-
6
1
  import click
7
- import docker
8
- from colorama import Fore, Style
9
2
 
10
- from vantage6.common import debug, error, info, warning
11
- from vantage6.common.dataclass import TaskDB
12
- from vantage6.common.docker.addons import (
13
- check_docker_running,
14
- remove_container_if_exists,
15
- )
16
- from vantage6.common.globals import (
17
- APPNAME,
18
- DEFAULT_DOCKER_REGISTRY,
19
- DEFAULT_NODE_IMAGE,
20
- DEFAULT_NODE_IMAGE_WO_TAG,
21
- InstanceType,
22
- )
3
+ from vantage6.common import info
4
+ from vantage6.common.globals import InstanceType
23
5
 
24
- from vantage6.cli import __version__
25
6
  from vantage6.cli.common.decorator import click_insert_context
26
- from vantage6.cli.common.start import pull_infra_image
27
- from vantage6.cli.common.utils import print_log_worker
7
+ from vantage6.cli.common.start import (
8
+ helm_install,
9
+ prestart_checks,
10
+ start_port_forward,
11
+ )
12
+ from vantage6.cli.common.utils import (
13
+ attach_logs,
14
+ create_directory_if_not_exists,
15
+ )
28
16
  from vantage6.cli.context.node import NodeContext
29
- from vantage6.cli.node.common import create_client
30
- from vantage6.cli.utils import check_config_name_allowed
17
+ from vantage6.cli.globals import ChartName
18
+
19
+ from vantage6.node.globals import DEFAULT_PROXY_SERVER_PORT
31
20
 
32
21
 
33
22
  @click.command()
34
- @click.option("-i", "--image", default=None, help="Node Docker image to use")
35
- @click.option(
36
- "--keep/--auto-remove",
37
- default=False,
38
- help="Keep node container after finishing. Useful for debugging",
39
- )
40
- @click.option(
41
- "--force-db-mount",
42
- is_flag=True,
43
- help="Always mount node databases; skip the check if they are existing files.",
44
- )
23
+ @click.option("--context", default=None, help="Kubernetes context to use")
24
+ @click.option("--namespace", default=None, help="Kubernetes namespace to use")
45
25
  @click.option(
46
26
  "--attach/--detach",
47
27
  default=False,
48
28
  help="Show node logs on the current console after starting the node",
49
29
  )
50
- @click.option(
51
- "--mount-src",
52
- default="",
53
- help="Override vantage6 source code in container with the source code in this path",
54
- )
55
30
  @click_insert_context(InstanceType.NODE, include_name=True, include_system_folders=True)
56
31
  def cli_node_start(
57
32
  ctx: NodeContext,
58
33
  name: str,
59
34
  system_folders: bool,
60
- image: str,
61
- keep: bool,
62
- mount_src: str,
35
+ context: str,
36
+ namespace: str,
63
37
  attach: bool,
64
- force_db_mount: bool,
65
38
  ) -> None:
66
39
  """
67
40
  Start the node.
68
41
  """
69
- check_docker_running()
70
42
  info("Starting node...")
71
- info("Finding Docker daemon")
72
- docker_client = docker.from_env()
73
- NodeContext.LOGGING_ENABLED = False
74
-
75
- # check if config name is allowed docker name, else exit
76
- check_config_name_allowed(ctx.name)
77
-
78
- # check that this node is not already running
79
- running_nodes = docker_client.containers.list(
80
- filters={"label": f"{APPNAME}-type={InstanceType.NODE}"}
81
- )
82
-
83
- suffix = "system" if system_folders else "user"
84
- for node in running_nodes:
85
- if node.name == f"{APPNAME}-{name}-{suffix}":
86
- error(f"Node {Fore.RED}{name}{Style.RESET_ALL} is already running")
87
- exit(1)
88
-
89
- # make sure the (host)-task and -log dir exists
90
- info("Checking that data and log dirs exist")
91
- ctx.data_dir.mkdir(parents=True, exist_ok=True)
92
- ctx.log_dir.mkdir(parents=True, exist_ok=True)
93
-
94
- # Determine image-name. First we check if the option --image has been used.
95
- # Then we check if the image has been specified in the config file, and
96
- # finally we use the default settings from the package.
97
- if not image:
98
- custom_images: dict = ctx.config.get("images")
99
- if custom_images:
100
- image = custom_images.get("node")
101
- else:
102
- # if no custom image is specified, find the server version and use
103
- # the latest images from that minor version
104
- client = create_client(ctx)
105
- major_minor = None
106
- try:
107
- # try to get server version, skip if can't get a connection
108
- version = client.util.get_server_version(attempts_on_timeout=3)[
109
- "version"
110
- ]
111
- major_minor = ".".join(version.split(".")[:2])
112
- image = (
113
- f"{DEFAULT_DOCKER_REGISTRY}/"
114
- f"{DEFAULT_NODE_IMAGE_WO_TAG}"
115
- f":{major_minor}"
116
- )
117
- except Exception:
118
- warning("Could not determine server version. Using default node image")
119
-
120
- if major_minor and not __version__.startswith(major_minor):
121
- warning(
122
- "Version mismatch between CLI and server/node. CLI is "
123
- f"running on version {__version__}, while node and server "
124
- f"are on version {major_minor}. This might cause "
125
- f"unexpected issues; changing to {major_minor}.<latest> "
126
- "is recommended."
127
- )
128
-
129
- # fail safe, in case no custom image is specified and we can't get the
130
- # server version
131
- if not image:
132
- image = f"{DEFAULT_DOCKER_REGISTRY}/{DEFAULT_NODE_IMAGE}"
133
-
134
- info(f"Pulling latest node image '{image}'")
135
- pull_infra_image(docker_client, image, InstanceType.NODE)
136
-
137
- data_volume = docker_client.volumes.create(ctx.docker_volume_name)
138
- vpn_volume = docker_client.volumes.create(ctx.docker_vpn_volume_name)
139
- ssh_volume = docker_client.volumes.create(ctx.docker_ssh_volume_name)
140
- squid_volume = docker_client.volumes.create(ctx.docker_squid_volume_name)
141
-
142
- info("Creating file & folder mounts")
143
- # FIXME: should obtain mount points from DockerNodeContext
144
- mounts = [
145
- # (target, source)
146
- ("/mnt/log", str(ctx.log_dir)),
147
- ("/mnt/data", data_volume.name),
148
- ("/mnt/vpn", vpn_volume.name),
149
- ("/mnt/ssh", ssh_volume.name),
150
- ("/mnt/squid", squid_volume.name),
151
- ("/mnt/config", str(ctx.config_dir)),
152
- ("/var/run/docker.sock", "/var/run/docker.sock"),
153
- ]
154
-
155
- if mount_src:
156
- # If mount_src is a relative path, docker will consider it a volume.
157
- mount_src = os.path.abspath(mount_src)
158
- mounts.append(("/vantage6", mount_src))
159
-
160
- # FIXME: Code duplication: Node.__init__() (vantage6/node/__init__.py)
161
- # uses a lot of the same logic. Suggest moving this to
162
- # ctx.get_private_key()
163
- filename = ctx.config.get("encryption", {}).get("private_key")
164
- # filename may be set to an empty string
165
- if not filename:
166
- filename = "private_key.pem"
167
-
168
- # Location may be overridden by the environment
169
- filename = os.environ.get("PRIVATE_KEY", filename)
170
-
171
- # If ctx.get_data_file() receives an absolute path, it is returned as-is
172
- fullpath = Path(ctx.get_data_file(filename))
173
- if fullpath:
174
- if Path(fullpath).exists():
175
- mounts.append(("/mnt/private_key.pem", str(fullpath)))
176
- else:
177
- warning(f"Private key file is provided {fullpath}, but does not exist")
178
-
179
- # Mount private keys for ssh tunnels
180
- ssh_tunnels = ctx.config.get("ssh-tunnels", [])
181
- for ssh_tunnel in ssh_tunnels:
182
- hostname = ssh_tunnel.get("hostname")
183
- key_path = ssh_tunnel.get("ssh", {}).get("identity", {}).get("key")
184
- if not key_path:
185
- error(
186
- f"SSH tunnel identity {Fore.RED}{hostname}{Style.RESET_ALL} "
187
- "key not provided. Continuing to start without this tunnel."
188
- )
189
- key_path = Path(key_path)
190
- if not key_path.exists():
191
- error(
192
- f"SSH tunnel identity {Fore.RED}{hostname}{Style.RESET_ALL} "
193
- "key does not exist. Continuing to start without this "
194
- "tunnel."
195
- )
196
-
197
- info(f" Mounting private key for {hostname} at {key_path}")
198
-
199
- # we remove the .tmp in the container, this is because the file is
200
- # mounted in a volume mount point. Somehow the file is than empty in
201
- # the volume but not for the node instance. By removing the .tmp we
202
- # make sure that the file is not empty in the volume.
203
- mounts.append((f"/mnt/ssh/{hostname}.pem.tmp", str(key_path)))
204
-
205
- env = {
206
- "DATA_VOLUME_NAME": data_volume.name,
207
- "VPN_VOLUME_NAME": vpn_volume.name,
208
- "PRIVATE_KEY": "/mnt/private_key.pem",
209
- }
210
-
211
- # only mount the DB if it is a file
212
- info("Setting up databases")
213
- dbs = [TaskDB.from_dict(db) for db in ctx.databases]
214
- for db in dbs:
215
- # check that label contains only valid characters
216
- if not db.label.isidentifier():
217
- error(
218
- f"Database label {Fore.RED}{db.label}{Style.RESET_ALL} contains"
219
- " invalid characters. Only letters, numbers, and underscores"
220
- " are allowed, and it cannot start with a number."
221
- )
222
- exit(1)
223
-
224
- info(
225
- f" Processing {Fore.GREEN}{db.type}{Style.RESET_ALL} database "
226
- f"{Fore.GREEN}{db.label}:{db.uri}{Style.RESET_ALL}"
227
- )
228
- label_capitals = db.label.upper()
229
-
230
- try:
231
- db_file_exists = Path(db.uri).exists()
232
- except Exception:
233
- # If the database uri cannot be parsed, it is definitely not a
234
- # file. In case of http servers or sql servers, checking the path
235
- # of the the uri will lead to an OS-dependent error, which is why
236
- # we catch all exceptions here.
237
- db_file_exists = False
238
-
239
- if db.type.is_file_based() and not db_file_exists:
240
- error(
241
- f"Database {Fore.RED}{db.uri}{Style.RESET_ALL} not found. Databases of "
242
- f"type '{db.type}' must be present on the harddrive. Please "
243
- "update your node configuration file."
244
- )
245
- exit(1)
246
-
247
- if not db_file_exists and not force_db_mount:
248
- debug(" - non file-based database added")
249
- env[f"{label_capitals}_DATABASE_URI"] = db.uri
250
- else:
251
- debug(" - file-based database added")
252
- suffix = Path(db.uri).suffix
253
- env[f"{label_capitals}_DATABASE_URI"] = f"{db.label}{suffix}"
254
- mounts.append((f"/mnt/{db.label}{suffix}", str(db.uri)))
255
-
256
- system_folders_option = "--system" if system_folders else "--user"
257
- cmd = (
258
- f"vnode-local start -c /mnt/config/{name}.yaml -n {name} "
259
- f" --dockerized {system_folders_option}"
260
- )
261
-
262
- volumes = []
263
- for mount in mounts:
264
- volumes.append(f"{mount[1]}:{mount[0]}")
265
-
266
- extra_mounts = ctx.config.get("node_extra_mounts", [])
267
- for mount in extra_mounts:
268
- volumes.append(mount)
269
-
270
- extra_env = ctx.config.get("node_extra_env", {})
271
- # all extra env var names should be valid identifiers
272
- extra_env_invalid = [key for key in extra_env.keys() if not key.isidentifier()]
273
- if extra_env_invalid:
274
- error(
275
- "Environment variable names should be valid identifiers. "
276
- f"The following break this rule: {extra_env_invalid}"
277
- )
278
- exit(1)
279
- # we won't accept overwrites of existing env vars
280
- env_overwrites = extra_env.keys() & env.keys()
281
- if env_overwrites:
282
- error(f"Cannot overwrite existing node environment variables: {env_overwrites}")
283
- exit(1)
284
- env.update(extra_env)
285
-
286
- # Add extra hosts to the environment
287
- extra_hosts = ctx.config.get("node_extra_hosts", {})
288
-
289
- remove_container_if_exists(
290
- docker_client=docker_client, name=ctx.docker_container_name
291
- )
292
43
 
293
- info("Running Docker container")
294
- container = docker_client.containers.run(
295
- image,
296
- command=cmd,
297
- volumes=volumes,
298
- detach=True,
299
- labels={
300
- f"{APPNAME}-type": InstanceType.NODE.value,
301
- "system": str(system_folders),
302
- "name": ctx.config_file_name,
303
- },
304
- environment=env,
305
- name=ctx.docker_container_name,
306
- auto_remove=not keep,
307
- tty=True,
308
- extra_hosts=extra_hosts,
44
+ prestart_checks(ctx, InstanceType.NODE, name, system_folders, context, namespace)
45
+
46
+ create_directory_if_not_exists(ctx.log_dir)
47
+ create_directory_if_not_exists(ctx.data_dir)
48
+
49
+ # TODO issue #2256 - run same version node as server
50
+ # # Determine image-name. First we check if the option --image has been used.
51
+ # # Then we check if the image has been specified in the config file, and
52
+ # # finally we use the default settings from the package.
53
+ # if not image:
54
+ # custom_images: dict = ctx.config.get("images")
55
+ # if custom_images:
56
+ # image = custom_images.get("node")
57
+ # else:
58
+ # # if no custom image is specified, find the server version and use
59
+ # # the latest images from that minor version
60
+ # client = create_client(ctx)
61
+ # major_minor = None
62
+ # try:
63
+ # # try to get server version, skip if can't get a connection
64
+ # version = client.util.get_server_version(attempts_on_timeout=3)[
65
+ # "version"
66
+ # ]
67
+ # major_minor = ".".join(version.split(".")[:2])
68
+ # image = (
69
+ # f"{DEFAULT_DOCKER_REGISTRY}/"
70
+ # f"{DEFAULT_NODE_IMAGE_WO_TAG}"
71
+ # f":{major_minor}"
72
+ # )
73
+ # except Exception:
74
+ # warning("Could not determine server version. Using default node image")
75
+
76
+ # if major_minor and not __version__.startswith(major_minor):
77
+ # warning(
78
+ # "Version mismatch between CLI and server/node. CLI is "
79
+ # f"running on version {__version__}, while node and server "
80
+ # f"are on version {major_minor}. This might cause "
81
+ # f"unexpected issues; changing to {major_minor}.<latest> "
82
+ # "is recommended."
83
+ # )
84
+
85
+ # # fail safe, in case no custom image is specified and we can't get the
86
+ # # server version
87
+ # if not image:
88
+ # image = f"{DEFAULT_DOCKER_REGISTRY}/{DEFAULT_NODE_IMAGE}"
89
+
90
+ # info(f"Pulling latest node image '{image}'")
91
+ # pull_infra_image(docker_client, image, InstanceType.NODE)
92
+
93
+ helm_install(
94
+ release_name=ctx.helm_release_name,
95
+ chart_name=ChartName.NODE,
96
+ values_file=ctx.config_file,
97
+ context=context,
98
+ namespace=namespace,
309
99
  )
310
100
 
311
- info("Node container was started!")
312
- info(
313
- "Please check the node logs to see if the node successfully connects to the "
314
- "server."
101
+ # start port forward for the node proxy server
102
+ start_port_forward(
103
+ service_name=f"{ctx.helm_release_name}-node-service",
104
+ service_port=ctx.config["node"].get("port", DEFAULT_PROXY_SERVER_PORT),
105
+ port=ctx.config["node"].get("port", DEFAULT_PROXY_SERVER_PORT),
106
+ context=context,
107
+ namespace=namespace,
315
108
  )
316
109
 
317
110
  if attach:
318
- logs = container.attach(stream=True, logs=True)
319
- Thread(target=print_log_worker, args=(logs,), daemon=True).start()
320
- while True:
321
- try:
322
- time.sleep(1)
323
- except KeyboardInterrupt:
324
- info("Closing log file. Keyboard Interrupt.")
325
- info(
326
- "Note that your node is still running! Shut it down with "
327
- f"'{Fore.RED}v6 node stop{Style.RESET_ALL}'"
328
- )
329
- exit(0)
330
- else:
331
- info(
332
- f"To see the logs, run: {Fore.GREEN}v6 node attach --name "
333
- f"{ctx.name}{Style.RESET_ALL}"
334
- )
111
+ attach_logs("app=node")
vantage6/cli/node/stop.py CHANGED
@@ -1,31 +1,24 @@
1
- import time
2
1
  import click
3
- import questionary as q
4
- import docker
5
2
 
6
- from colorama import Fore, Style
7
- from vantage6.cli.context import NodeContext
3
+ from vantage6.common import info
4
+ from vantage6.common.globals import InstanceType
8
5
 
9
- from vantage6.common import warning, error, info
10
- from vantage6.common.globals import APPNAME
11
- from vantage6.common.docker.addons import (
12
- check_docker_running,
13
- delete_volume_if_exists,
14
- get_server_config_name,
15
- stop_container,
6
+ from vantage6.cli.common.stop import execute_stop, helm_uninstall, stop_port_forward
7
+ from vantage6.cli.globals import (
8
+ DEFAULT_NODE_SYSTEM_FOLDERS as N_FOL,
9
+ InfraComponentName,
16
10
  )
17
- from vantage6.cli.globals import DEFAULT_NODE_SYSTEM_FOLDERS as N_FOL
18
-
19
- from vantage6.cli.node.common import find_running_node_names
20
11
 
21
12
 
22
13
  @click.command()
23
14
  @click.option("-n", "--name", default=None, help="Configuration name")
15
+ @click.option("--context", default=None, help="Kubernetes context to use")
16
+ @click.option("--namespace", default=None, help="Kubernetes namespace to use")
24
17
  @click.option(
25
18
  "--system",
26
19
  "system_folders",
27
20
  flag_value=True,
28
- help="Search for configuration in system folders instead of " "user folders",
21
+ help="Search for configuration in system folders instead of user folders",
29
22
  )
30
23
  @click.option(
31
24
  "--user",
@@ -36,96 +29,43 @@ from vantage6.cli.node.common import find_running_node_names
36
29
  "system folders. This is the default.",
37
30
  )
38
31
  @click.option("--all", "all_nodes", flag_value=True, help="Stop all running nodes")
39
- @click.option(
40
- "--force",
41
- "force",
42
- flag_value=True,
43
- help="Kill nodes instantly; don't wait for them to shut down",
44
- )
45
32
  def cli_node_stop(
46
- name: str, system_folders: bool, all_nodes: bool, force: bool
33
+ name: str,
34
+ context: str,
35
+ namespace: str,
36
+ system_folders: bool,
37
+ all_nodes: bool,
47
38
  ) -> None:
48
39
  """
49
40
  Stop one or all running nodes.
50
41
  """
51
- check_docker_running()
52
- client = docker.from_env()
53
-
54
- running_node_names = find_running_node_names(client)
55
-
56
- if not running_node_names:
57
- warning("No nodes are currently running.")
58
- return
59
-
60
- if force:
61
- warning(
62
- "Forcing the node to stop will not terminate helper "
63
- "containers, neither will it remove routing rules made on the "
64
- "host!"
65
- )
66
-
67
- if all_nodes:
68
- for container_name in running_node_names:
69
- _stop_node(client, container_name, force, system_folders)
70
- else:
71
- if not name:
72
- try:
73
- container_name = q.select(
74
- "Select the node you wish to stop:", choices=running_node_names
75
- ).unsafe_ask()
76
- except KeyboardInterrupt:
77
- error("Aborted by user!")
78
- return
79
- else:
80
- post_fix = "system" if system_folders else "user"
81
- container_name = f"{APPNAME}-{name}-{post_fix}"
82
-
83
- if container_name in running_node_names:
84
- _stop_node(client, container_name, force, system_folders)
85
- info(f"Stopped the {Fore.GREEN}{container_name}{Style.RESET_ALL} Node.")
86
- else:
87
- error(f"{Fore.RED}{name}{Style.RESET_ALL} is not running?!")
88
-
89
-
90
- def _stop_node(
91
- client: docker.DockerClient, container_name: str, force: bool, system_folders: bool
92
- ) -> None:
42
+ execute_stop(
43
+ stop_function=_stop_node,
44
+ instance_type=InstanceType.NODE,
45
+ infra_component=InfraComponentName.NODE,
46
+ stop_all=all_nodes,
47
+ to_stop=name,
48
+ namespace=namespace,
49
+ context=context,
50
+ system_folders=system_folders,
51
+ )
52
+
53
+
54
+ def _stop_node(node_name: str, namespace: str, context: str) -> None:
93
55
  """
94
56
  Stop a node
95
57
 
96
58
  Parameters
97
59
  ----------
98
- client : docker.DockerClient
99
- Docker client
100
- name : str
101
- Name of the node container to stop
102
- force : bool
103
- Whether to force the node to stop
104
- system_folders : bool
105
- Whether to use system folders or not
60
+ node_name : str
61
+ Name of the node to stop
62
+ namespace : str
63
+ Kubernetes namespace to use
64
+ context : str
65
+ Kubernetes context to use
106
66
  """
107
- container = client.containers.get(container_name)
108
- # Stop the container. Using stop() gives the container 10s to exit
109
- # itself, if not then it will be killed
110
- stop_container(container, force)
111
-
112
- # Sleep for 1 second. Not doing so often causes errors that docker volumes deleted
113
- # below are 'still in use' when you try to remove them a few ms after the container
114
- # has been removed
115
- time.sleep(1)
67
+ helm_uninstall(release_name=node_name, context=context, namespace=namespace)
116
68
 
117
- # Delete volumes. This is done here rather than within the node container when
118
- # it is stopped, because at that point the volumes are still in use. Here, the node
119
- # has already been stopped
120
- scope = "system" if system_folders else "user"
121
- config_name = get_server_config_name(container_name, scope)
122
- ctx = NodeContext(config_name, system_folders, print_log_header=False)
69
+ stop_port_forward(service_name=f"{node_name}-vantage6-node-service")
123
70
 
124
- # Do not delete the data volume as this would remove all sessions.
125
- for volume in [
126
- # ctx.docker_volume_name,
127
- ctx.docker_squid_volume_name,
128
- ctx.docker_ssh_volume_name,
129
- ctx.docker_vpn_volume_name,
130
- ]:
131
- delete_volume_if_exists(client, volume)
71
+ info(f"Node {node_name} stopped successfully.")
@@ -17,7 +17,6 @@ from vantage6.common.globals import (
17
17
  from vantage6.cli.common.decorator import click_insert_context
18
18
  from vantage6.cli.common.utils import print_log_worker
19
19
  from vantage6.cli.context.server import ServerContext
20
- from vantage6.cli.globals import ServerGlobals
21
20
  from vantage6.cli.utils import check_config_name_allowed
22
21
 
23
22
 
@@ -111,7 +110,7 @@ def cli_server_import(
111
110
  mounts.append(docker.types.Mount("/mnt/database/", dirname, type="bind"))
112
111
 
113
112
  environment_vars = {
114
- ServerGlobals.DB_URI_ENV_VAR.value: f"sqlite:////mnt/database/{basename}"
113
+ # ServerGlobals.DB_URI_ENV_VAR.value: f"sqlite:////mnt/database/{basename}"
115
114
  }
116
115
 
117
116
  else:
@@ -1,6 +1,5 @@
1
1
  import click
2
2
 
3
- from vantage6.common.docker.addons import check_docker_running
4
3
  from vantage6.common.globals import InstanceType
5
4
 
6
5
  from vantage6.cli.common.utils import get_server_configuration_list
@@ -11,6 +10,4 @@ def cli_server_configuration_list() -> None:
11
10
  """
12
11
  Print the available server configurations.
13
12
  """
14
- check_docker_running()
15
-
16
13
  get_server_configuration_list(InstanceType.SERVER)