ybox 0.9.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (76) hide show
  1. ybox/__init__.py +2 -0
  2. ybox/cmd.py +307 -0
  3. ybox/conf/completions/ybox.fish +93 -0
  4. ybox/conf/distros/arch/add-gpg-key.sh +29 -0
  5. ybox/conf/distros/arch/distro.ini +192 -0
  6. ybox/conf/distros/arch/init-base.sh +10 -0
  7. ybox/conf/distros/arch/init-user.sh +35 -0
  8. ybox/conf/distros/arch/init.sh +82 -0
  9. ybox/conf/distros/arch/list_fmt_long.py +76 -0
  10. ybox/conf/distros/arch/pkgdeps.py +276 -0
  11. ybox/conf/distros/deb-generic/check-package.sh +77 -0
  12. ybox/conf/distros/deb-generic/distro.ini +190 -0
  13. ybox/conf/distros/deb-generic/fetch-gpg-key-id.sh +30 -0
  14. ybox/conf/distros/deb-generic/init-base.sh +11 -0
  15. ybox/conf/distros/deb-generic/init-user.sh +3 -0
  16. ybox/conf/distros/deb-generic/init.sh +136 -0
  17. ybox/conf/distros/deb-generic/list_fmt_long.py +114 -0
  18. ybox/conf/distros/deb-generic/pkgdeps.py +208 -0
  19. ybox/conf/distros/deb-oldstable/distro.ini +21 -0
  20. ybox/conf/distros/deb-stable/distro.ini +21 -0
  21. ybox/conf/distros/supported.list +5 -0
  22. ybox/conf/distros/ubuntu2204/distro.ini +21 -0
  23. ybox/conf/distros/ubuntu2404/distro.ini +21 -0
  24. ybox/conf/profiles/apps.ini +26 -0
  25. ybox/conf/profiles/basic.ini +310 -0
  26. ybox/conf/profiles/dev.ini +25 -0
  27. ybox/conf/profiles/games.ini +39 -0
  28. ybox/conf/resources/entrypoint-base.sh +170 -0
  29. ybox/conf/resources/entrypoint-common.sh +23 -0
  30. ybox/conf/resources/entrypoint-cp.sh +32 -0
  31. ybox/conf/resources/entrypoint-root.sh +20 -0
  32. ybox/conf/resources/entrypoint-user.sh +21 -0
  33. ybox/conf/resources/entrypoint.sh +249 -0
  34. ybox/conf/resources/prime-run +13 -0
  35. ybox/conf/resources/run-in-dir +60 -0
  36. ybox/conf/resources/run-user-bash-cmd +14 -0
  37. ybox/config.py +255 -0
  38. ybox/env.py +205 -0
  39. ybox/filelock.py +77 -0
  40. ybox/migrate/0.9.0-0.9.7:0.9.8.py +33 -0
  41. ybox/pkg/__init__.py +0 -0
  42. ybox/pkg/clean.py +33 -0
  43. ybox/pkg/info.py +40 -0
  44. ybox/pkg/inst.py +638 -0
  45. ybox/pkg/list.py +191 -0
  46. ybox/pkg/mark.py +68 -0
  47. ybox/pkg/repair.py +150 -0
  48. ybox/pkg/repo.py +251 -0
  49. ybox/pkg/search.py +52 -0
  50. ybox/pkg/uninst.py +92 -0
  51. ybox/pkg/update.py +56 -0
  52. ybox/print.py +121 -0
  53. ybox/run/__init__.py +0 -0
  54. ybox/run/cmd.py +54 -0
  55. ybox/run/control.py +102 -0
  56. ybox/run/create.py +1116 -0
  57. ybox/run/destroy.py +64 -0
  58. ybox/run/graphics.py +367 -0
  59. ybox/run/logs.py +57 -0
  60. ybox/run/ls.py +64 -0
  61. ybox/run/pkg.py +445 -0
  62. ybox/schema/0.9.1-added.sql +27 -0
  63. ybox/schema/0.9.6-added.sql +18 -0
  64. ybox/schema/init.sql +39 -0
  65. ybox/schema/migrate/0.9.0:0.9.1.sql +42 -0
  66. ybox/schema/migrate/0.9.1:0.9.2.sql +8 -0
  67. ybox/schema/migrate/0.9.2:0.9.3.sql +2 -0
  68. ybox/schema/migrate/0.9.5:0.9.6.sql +2 -0
  69. ybox/state.py +914 -0
  70. ybox/util.py +351 -0
  71. ybox-0.9.8.dist-info/LICENSE +19 -0
  72. ybox-0.9.8.dist-info/METADATA +533 -0
  73. ybox-0.9.8.dist-info/RECORD +76 -0
  74. ybox-0.9.8.dist-info/WHEEL +5 -0
  75. ybox-0.9.8.dist-info/entry_points.txt +8 -0
  76. ybox-0.9.8.dist-info/top_level.txt +1 -0
ybox/run/destroy.py ADDED
@@ -0,0 +1,64 @@
1
+ """
2
+ Code for the `ybox-destroy` script that is used to destroy an active or stopped ybox container.
3
+ """
4
+
5
+ import argparse
6
+ import sys
7
+
8
+ from ybox.cmd import check_ybox_exists, run_command
9
+ from ybox.env import Environ
10
+ from ybox.print import fgcolor, print_color, print_error, print_warn
11
+ from ybox.state import YboxStateManagement
12
+
13
+
14
+ def main() -> None:
15
+ """main function for `ybox-destroy` script"""
16
+ main_argv(sys.argv[1:])
17
+
18
+
19
+ def main_argv(argv: list[str]) -> None:
20
+ """
21
+ Main entrypoint of `ybox-destroy` that takes a list of arguments which are usually the
22
+ command-line arguments of the `main()` function. Pass ["-h"]/["--help"] to see all the
23
+ available arguments with help message for each.
24
+
25
+ :param argv: arguments to the function (main function passes `sys.argv[1:]`)
26
+ """
27
+ args = parse_args(argv)
28
+ env = Environ()
29
+ docker_cmd = env.docker_cmd
30
+ container_name = args.container_name
31
+
32
+ check_ybox_exists(docker_cmd, container_name, exit_on_error=True)
33
+ print_color(f"Stopping ybox container '{container_name}'", fg=fgcolor.cyan)
34
+ # continue even if this fails since the container may already be in stopped state
35
+ run_command([docker_cmd, "container", "stop", container_name],
36
+ exit_on_error=False, error_msg=f"stopping '{container_name}'")
37
+
38
+ print_warn(f"Removing ybox container '{container_name}'")
39
+ rm_args = [docker_cmd, "container", "rm"]
40
+ if args.force:
41
+ rm_args.append("--force")
42
+ rm_args.append(container_name)
43
+ run_command(rm_args, error_msg=f"removing '{container_name}'")
44
+
45
+ # remove the state from the database
46
+ print_warn(f"Clearing ybox state for '{container_name}'")
47
+ with YboxStateManagement(env) as state:
48
+ if not state.unregister_container(container_name):
49
+ print_error(f"No entry found for '{container_name}' in the state database")
50
+ sys.exit(1)
51
+
52
+
53
+ def parse_args(argv: list[str]) -> argparse.Namespace:
54
+ """
55
+ Parse command-line arguments for the program and return the result :class:`argparse.Namespace`.
56
+
57
+ :param argv: the list of arguments to be parsed
58
+ :return: the result of parsing using the `argparse` library as a :class:`argparse.Namespace`
59
+ """
60
+ parser = argparse.ArgumentParser(description="Stop and remove an active ybox container")
61
+ parser.add_argument("-f", "--force", action="store_true",
62
+ help="force destroy the container using SIGKILL if required")
63
+ parser.add_argument("container_name", type=str, help="name of the active ybox")
64
+ return parser.parse_args(argv)
ybox/run/graphics.py ADDED
@@ -0,0 +1,367 @@
1
+ """
2
+ Methods for setting up graphics in the container including X11/Wayland, NVIDIA etc.
3
+ """
4
+
5
+ import glob
6
+ import os
7
+ from itertools import chain
8
+ from os.path import realpath
9
+ from typing import Iterable, Optional
10
+
11
+ from ybox.config import Consts, StaticConfiguration
12
+ from ybox.env import Environ
13
+
14
+ # standard library directories to search for NVIDIA libraries
15
+ _STD_LIB_DIRS = ["/usr/lib", "/lib", "/usr/local/lib", "/usr/lib64", "/lib64",
16
+ "/usr/lib32", "/lib32"]
17
+ # additional library directory glob patterns to search for NVIDIA libraries in 32/64-bit systems;
18
+ # the '&' in front of the paths is an indicator to the code that this is a glob pattern
19
+ _STD_LIB_DIR_PATTERNS = ["&/usr/lib/*-linux-gnu", "&/lib/*-linux-gnu", "&/usr/lib64/*-linux-gnu",
20
+ "&/lib64/*-linux-gnu", "&/usr/lib32/*-linux-gnu", "&/lib32/*-linux-gnu"]
21
+ _STD_LD_LIB_PATH_VARS = ["LD_LIBRARY_PATH", "LD_LIBRARY_PATH_64", "LD_LIBRARY_PATH_32"]
22
+ _NVIDIA_LIB_PATTERNS = ["*nvidia*.so*", "*NVIDIA*.so*", "libcuda*.so*", "libnvcuvid*.so*",
23
+ "libnvoptix*.so*", "gbm/*nvidia*.so*", "vdpau/*nvidia*.so*"]
24
+ _NVIDIA_BIN_PATTERNS = ["nvidia-smi", "nvidia-cuda*", "nvidia-debug*", "nvidia-bug*"]
25
+ # note that the code below assumes that file name pattern below is always of the form *nvidia*
26
+ # (while others are directories), so if that changes then update _process_nvidia_data_files
27
+ _NVIDIA_DATA_PATTERNS = ["/usr/share/nvidia", "/usr/local/share/nvidia", "/lib/firmware/nvidia",
28
+ "/usr/share/egl/*/*nvidia*", "/usr/share/glvnd/*/*nvidia*",
29
+ "/usr/share/vulkan/*/*nvidia*"]
30
+ _LD_SO_CONF = "/etc/ld.so.conf"
31
+
32
+
33
+ def add_env_option(docker_args: list[str], env_var: str, env_val: Optional[str] = None) -> None:
34
+ """
35
+ Add option to the list of podman/docker arguments to set an environment variable.
36
+
37
+ :param docker_args: list of podman/docker arguments to which required option has to be appended
38
+ :param env_var: the environment variable to be set
39
+ :param env_val: the value of the environment variable, defaults to None which implies that
40
+ its value will be set to be the same as in the host environment
41
+ """
42
+ if env_val is None:
43
+ docker_args.append(f"-e={env_var}")
44
+ else:
45
+ docker_args.append(f"-e={env_var}={env_val}")
46
+
47
+
48
+ def add_mount_option(docker_args: list[str], src: str, dest: str, flags: str = "") -> None:
49
+ """
50
+ Add option to the list of podman/docker arguments to bind mount a source directory to
51
+ given destination directory.
52
+
53
+ :param docker_args: list of podman/docker arguments to which required option has to be appended
54
+ :param src: the source directory in the host system
55
+ :param dest: the destination directory in the container
56
+ :param flags: any additional flags to be passed to `-v` podman/docker argument, defaults to ""
57
+ """
58
+ if flags:
59
+ docker_args.append(f"-v={src}:{dest}:{flags}")
60
+ else:
61
+ docker_args.append(f"-v={src}:{dest}")
62
+
63
+
64
+ def enable_x11(docker_args: list[str], env: Environ) -> None:
65
+ """
66
+ Append options to podman/docker arguments to share host machine's Xorg X11 server
67
+ with the new ybox container. This also sets up sharing of XAUTHORITY file (with automatic
68
+ update, if required, in the `run-in-dir` script) so that no additional setup is required for
69
+ X authentication to work.
70
+
71
+ :param docker_args: list of podman/docker arguments to which the options have to be appended
72
+ :param env: an instance of the current :class:`Environ`
73
+ """
74
+ add_env_option(docker_args, "DISPLAY")
75
+ xsock = "/tmp/.X11-unix"
76
+ if os.access(xsock, os.R_OK):
77
+ add_mount_option(docker_args, xsock, xsock, "ro")
78
+ if xauth := os.environ.get("XAUTHORITY"):
79
+ # XAUTHORITY file may change after a restart or login (e.g. with Xwayland), so mount some
80
+ # parent directory which is adjusted by run-in-dir script if it has changed;
81
+ # For now the known common parents are used below since using just the immediate
82
+ # parent can cause trouble if one changes the display manager, for example, which
83
+ # uses an entirely different mount point (e.g. gdm uses /run/user/... while sddm
84
+ # uses /tmp)
85
+ parent_dir = os.path.dirname(xauth)
86
+ # check if parent_dir is in $XDG_RUNTIME_DIR or /tmp
87
+ if not env.xdg_rt_dir:
88
+ parent_dirs = {parent_dir, "/tmp"}
89
+ elif xauth.startswith(f"{env.xdg_rt_dir}/") or xauth.startswith("/tmp/"):
90
+ parent_dirs = (env.xdg_rt_dir, "/tmp")
91
+ parent_dir = "/tmp" if parent_dir.startswith("/tmp") else env.xdg_rt_dir
92
+ else:
93
+ parent_dirs = (parent_dir, env.xdg_rt_dir, "/tmp")
94
+ for p_dir in parent_dirs:
95
+ add_mount_option(docker_args, p_dir, f"{p_dir}-host", "ro")
96
+ target_xauth = xauth.replace(parent_dir, f"{parent_dir}-host")
97
+ add_env_option(docker_args, "XAUTHORITY", target_xauth)
98
+ add_env_option(docker_args, "XAUTHORITY_ORIG", target_xauth)
99
+
100
+
101
+ def enable_wayland(docker_args: list[str], env: Environ) -> None:
102
+ """
103
+ Append options to podman/docker arguments to share host machine's Wayland server
104
+ with the new ybox container.
105
+
106
+ :param docker_args: list of podman/docker arguments to which the options have to be appended
107
+ :param env: an instance of the current :class:`Environ`
108
+ """
109
+ if env.xdg_rt_dir and (wayland_display := os.environ.get("WAYLAND_DISPLAY")):
110
+ add_env_option(docker_args, "WAYLAND_DISPLAY", wayland_display)
111
+ wayland_sock = f"{env.xdg_rt_dir}/{wayland_display}"
112
+ if os.access(wayland_sock, os.W_OK):
113
+ add_mount_option(docker_args, wayland_sock,
114
+ f"{env.target_xdg_rt_dir}/{wayland_display}")
115
+
116
+
117
+ def enable_dri(docker_args: list[str]) -> None:
118
+ """
119
+ Append options to podman/docker arguments to enable DRI access.
120
+
121
+ :param docker_args: list of podman/docker arguments to which the options have to be appended
122
+ """
123
+ if os.access("/dev/dri", os.R_OK):
124
+ docker_args.append("--device=/dev/dri")
125
+ if os.access("/dev/dri/by-path", os.R_OK):
126
+ add_mount_option(docker_args, "/dev/dri/by-path", "/dev/dri/by-path")
127
+
128
+
129
+ def enable_nvidia(docker_args: list[str], conf: StaticConfiguration) -> None:
130
+ """
131
+ Append options to podman/docker arguments to share host machine's NVIDIA libraries and
132
+ data files with the new ybox container.
133
+
134
+ It mounts the required directories from the host system, creates a script in the container
135
+ that is invoked by the container entrypoint script which create links to the NVIDIA libraries
136
+ and data files and sets up LD_LIBRARY_PATH in the container to point to the NVIDIA library
137
+ directories.
138
+
139
+ :param docker_args: list of podman/docker arguments to which the options have to be appended
140
+ :param conf: the :class:`StaticConfiguration` for the container
141
+ """
142
+ # search for nvidia device files and add arguments for those
143
+ for nvidia_dev in _find_nvidia_devices():
144
+ docker_args.append(f"--device={nvidia_dev}")
145
+ # gather the library directories from standard paths, LD_LIBRARY_PATH* and /etc/ld.so.conf
146
+ lib_dirs = _find_all_lib_dirs()
147
+ # find the list of nvidia library directories to be mounted in the target container
148
+ nvidia_lib_dirs = _filter_nvidia_dirs(lib_dirs, _NVIDIA_LIB_PATTERNS)
149
+ # add the directories to tbe mounted to podman/docker arguments
150
+ mount_nvidia_subdir = conf.target_scripts_dir
151
+ mount_lib_dirs = _prepare_mount_dirs(nvidia_lib_dirs, docker_args,
152
+ f"{mount_nvidia_subdir}/mnt_lib")
153
+ # create the script to be run in the container which will create the target
154
+ # directories that will be added to LD_LIBRARY_PATH having symlinks to the mounted libraries
155
+ nvidia_setup = _create_nvidia_setup(docker_args, nvidia_lib_dirs, mount_lib_dirs)
156
+
157
+ # mount nvidia binary directories and add code to script to link to them in container
158
+ nvidia_bin_dirs = _filter_nvidia_dirs({realpath(d) for d in Consts.container_bin_dirs()},
159
+ _NVIDIA_BIN_PATTERNS)
160
+ mount_bin_dirs = _prepare_mount_dirs(nvidia_bin_dirs, docker_args,
161
+ f"{mount_nvidia_subdir}/mnt_bin")
162
+ _add_nvidia_bin_links(mount_bin_dirs, nvidia_setup)
163
+
164
+ # finally mount nvidia data file directories and add code to script to link to them
165
+ # which has to be the same paths as in the host
166
+ _process_nvidia_data_files(docker_args, nvidia_setup, f"{mount_nvidia_subdir}/mnt_share")
167
+
168
+ # create the nvidia setup script
169
+ setup_script = f"{conf.scripts_dir}/{Consts.nvidia_setup_script()}"
170
+ with open(setup_script, "w", encoding="utf-8") as script_fd:
171
+ script_fd.write("\n".join(nvidia_setup))
172
+
173
+
174
+ def _find_nvidia_devices() -> list[str]:
175
+ """
176
+ Return the list of NVIDIA device files in /dev by matching against appropriate glob patterns.
177
+ """
178
+ return [p for p in chain(glob.glob("/dev/nvidia*"), glob.glob(
179
+ "/dev/nvidia*/**/*", recursive=True)) if not os.path.isdir(p)]
180
+
181
+
182
+ def _find_all_lib_dirs() -> Iterable[str]:
183
+ """
184
+ Return the list of all the library directories used by the system for shared libraries which
185
+ includes the LD_LIBRARY_PATH, /etc/ld.so.conf and standard library paths.
186
+ """
187
+ # add LD_LIBRARY_PATH components, then /etc/ld.so.conf and then standard library paths
188
+ ld_libs: list[str] = []
189
+ for lib_path_var in _STD_LD_LIB_PATH_VARS:
190
+ if ld_lib := os.environ.get(lib_path_var):
191
+ ld_libs.extend(ld_lib.split(os.pathsep))
192
+ _parse_ld_so_conf(_LD_SO_CONF, ld_libs)
193
+ # using dict with None values instead of set to preserve order while keeping keys unique
194
+ lib_dirs = {r: None for p in chain(ld_libs, _STD_LIB_DIRS, _STD_LIB_DIR_PATTERNS)
195
+ for d in (glob.glob(p[1:]) if p[0] == "&" else (p,))
196
+ if (r := realpath(d)) and os.path.isdir(r)}
197
+ return lib_dirs.keys()
198
+
199
+
200
+ def _parse_ld_so_conf(conf: str, ld_lib_paths: list[str]) -> None:
201
+ """
202
+ Read /etc/ld.so.conf and append all the mentioned library directories (including the
203
+ `include` directives) in the list that has been passed.
204
+
205
+ :param conf: the path to ld.so.conf being processed which is either /etc/ld.so.conf or
206
+ one of the files included by it (in the recursive call)
207
+ :param ld_lib_paths: list of library directories to which the results are appended
208
+ """
209
+ if not os.access(conf, os.R_OK):
210
+ return
211
+ with open(conf, "r", encoding="utf-8") as conf_fd:
212
+ while line := conf_fd.readline():
213
+ if not (line := line.strip()) or line[0] == '#':
214
+ continue
215
+ if words := line.split():
216
+ if words[0].lower() == "include":
217
+ for inc in glob.glob(words[1]):
218
+ _parse_ld_so_conf(inc, ld_lib_paths)
219
+ else:
220
+ ld_lib_paths.append(realpath(line))
221
+
222
+
223
+ def _filter_nvidia_dirs(dirs: Iterable[str], patterns: list[str]) -> list[str]:
224
+ """
225
+ Filter out the directories having NVIDIA artifacts from the given `dirs`.
226
+
227
+ :param dirs: an `Iterable` of directory paths that are checked for NVIDIA artifacts
228
+ :param patterns: directory or file patterns to search in `dirs`
229
+ :return: list of filtered directories that contain an NVIDIA artifact
230
+ """
231
+ def has_nvidia_artifact(d: str) -> bool:
232
+ for pat in patterns:
233
+ if glob.glob(f"{d}/{pat}"):
234
+ return True
235
+ return False
236
+
237
+ return [nvidia_dir for nvidia_dir in dirs if has_nvidia_artifact(nvidia_dir)]
238
+
239
+
240
+ def _prepare_mount_dirs(dirs: list[str], docker_args: list[str],
241
+ mount_dir_prefix: str) -> list[str]:
242
+ """
243
+ Append options to the list of podman/docker arguments to bind mount given source directories
244
+ to target directories having given prefix and index as the suffix. This means that the first
245
+ directory in the given `dirs` will be mounted in `<prefix>0`, second in `<prefix>1` and so on.
246
+
247
+ :param dirs: the list of source directories to be mounted
248
+ :param docker_args: list of podman/docker arguments to which the options have to be appended
249
+ :param mount_dir_prefix: the prefix of the destination directories
250
+ :return: list of destination directories where the source directories will be mounted
251
+ """
252
+ mount_dirs: list[str] = []
253
+ for idx, d in enumerate(dirs):
254
+ mount_dir = f"{mount_dir_prefix}{idx}"
255
+ add_mount_option(docker_args, d, mount_dir, "ro")
256
+ mount_dirs.append(mount_dir)
257
+ return mount_dirs
258
+
259
+
260
+ def _create_nvidia_setup(docker_args: list[str], src_dirs: list[str],
261
+ mount_lib_dirs: list[str]) -> list[str]:
262
+ """
263
+ Generate contents of a `bash` script (returned as a list of strings) to be run on container
264
+ which will set up required NVIDIA libraries from the mounted host library directories.
265
+
266
+ The script will create new directories in the container and links to NVIDIA libraries in those
267
+ from the mounted directories. Then it will add option to podman/docker arguments to set
268
+ LD_LIBRARY_PATH in the target container to point to these directories. The returned `bash`
269
+ script should be executed as superuser by the container entrypoint script.
270
+
271
+ :param docker_args: list of podman/docker arguments to which the options have to be appended
272
+ :param src_dirs: the list of source directories to be mounted
273
+ :param mount_lib_dirs: list of destination directory mounts
274
+ :return: contents of a `bash` script as a list of strings for each line of the script which
275
+ should be joined with newlines to get the final contents of the script
276
+ """
277
+ target_dir = Consts.nvidia_target_base_dir()
278
+ setup_script = ["#!/bin/bash", "", "# this script should be run using bash", "",
279
+ "# setup libraries", "", f"mkdir -p {target_dir} && chmod 0755 {target_dir}"]
280
+ ld_lib_path: list[str] = []
281
+ for idx, mount_lib_dir in enumerate(mount_lib_dirs):
282
+ target_lib_dir = f"{target_dir}/lib{idx}"
283
+ setup_script.append(f"rm -rf {target_lib_dir}")
284
+ setup_script.append(f"mkdir -p {target_lib_dir} && chmod 0755 {target_lib_dir}")
285
+ for pat in _NVIDIA_LIB_PATTERNS:
286
+ setup_script.append(f'libs="$(compgen -G "{mount_lib_dir}/{pat}")"')
287
+ setup_script.append('if [ "$?" -eq 0 ]; then')
288
+ setup_script.append(f" ln -s $libs {target_lib_dir}/. 2>/dev/null")
289
+ # if host library is in a sub-directory then create sub-directory on target too
290
+ if (slash_index := pat.find("/")) != -1:
291
+ # check for corresponding library in host path and /usr/lib
292
+ pat_subdir = pat[:slash_index]
293
+ src_dir = f"{src_dirs[idx]}/{pat_subdir}"
294
+ usr_lib_dir = f"/usr/lib/{pat_subdir}"
295
+ setup_script.append(
296
+ f' if compgen -G "{src_dirs[idx]}/lib{pat_subdir}.so*" >/dev/null; then')
297
+ setup_script.append(f" mkdir -p {src_dir} && chmod 0755 {src_dir}")
298
+ setup_script.append(f" ln -s $libs {src_dir}/. 2>/dev/null")
299
+ setup_script.append(
300
+ f' elif compgen -G "/usr/lib/lib{pat_subdir}.so*" >/dev/null; then')
301
+ setup_script.append(f" mkdir -p {usr_lib_dir} && chmod 0755 {usr_lib_dir}")
302
+ setup_script.append(f" ln -s $libs {usr_lib_dir}/. 2>/dev/null")
303
+ setup_script.append(" fi")
304
+ setup_script.append("fi")
305
+ ld_lib_path.append(target_lib_dir)
306
+ # add libraries to LD_LIBRARY_PATH rather than adding to system /etc/ld.so.conf in the
307
+ # container since the system ldconfig cache may go out of sync with latter due to `ldconfig`
308
+ # invocation on another container having the same shared root but with disabled NVIDIA support
309
+ if ld_lib_path:
310
+ # this assumes that LD_LIBRARY_PATH is not touched anywhere else, so NVIDIA will not
311
+ # work if user explicitly overrides LD_LIBRARY_PATH in the [env] section
312
+ add_env_option(docker_args, "LD_LIBRARY_PATH", os.pathsep.join(ld_lib_path))
313
+ return setup_script
314
+
315
+
316
+ def _add_nvidia_bin_links(mount_bin_dirs: list[str], script: list[str]) -> None:
317
+ """
318
+ Add `bash` code to given script contents to create links to NVIDIA programs in `/usr/local/bin`
319
+ inside the container.
320
+
321
+ :param mount_bin_dirs: target directories where host's directories having NVIDIA programs
322
+ will be mounted
323
+ :param script: the `bash` script contents as a list of string to which the new code is appended
324
+ """
325
+ script.append("# setup binaries")
326
+ for mount_bin_dir in mount_bin_dirs:
327
+ for pat in _NVIDIA_BIN_PATTERNS:
328
+ script.append(f'bins="$(compgen -G "{mount_bin_dir}/{pat}")"')
329
+ script.append('if [ "$?" -eq 0 ]; then ln -sf $bins /usr/local/bin/. 2>/dev/null; fi')
330
+
331
+
332
+ def _process_nvidia_data_files(docker_args: list[str], script: list[str],
333
+ mount_data_dir_prefix: str) -> None:
334
+ """
335
+ Add `bash` code to given script contents to create symlinks to NVIDIA data files mounted
336
+ from the host environment.
337
+
338
+ :param docker_args: list of podman/docker arguments to which the options have to be appended
339
+ :param script: the `bash` script contents as a list of string to which the new code is appended
340
+ :param mount_data_dir_prefix: the prefix of the destination directories where the host
341
+ data directories have to be mounted
342
+ """
343
+ script.append("# setup data files")
344
+ nvidia_data_dirs = set[str]()
345
+ idx = 0
346
+ for pat in _NVIDIA_DATA_PATTERNS:
347
+ for path in glob.glob(pat):
348
+ if not os.path.exists(resolved_path := realpath(path)):
349
+ continue
350
+ path_is_dir = os.path.isdir(resolved_path)
351
+ data_dir = resolved_path if path_is_dir else os.path.dirname(resolved_path)
352
+ if data_dir in nvidia_data_dirs:
353
+ continue
354
+ mount_data_dir = f"{mount_data_dir_prefix}{idx}"
355
+ idx += 1
356
+ add_mount_option(docker_args, data_dir, mount_data_dir, "ro")
357
+ nvidia_data_dirs.add(data_dir)
358
+ path_dir = os.path.dirname(path)
359
+ script.append(f"mkdir -p {path_dir} && chmod 0755 {path_dir} && \\")
360
+ if path_is_dir:
361
+ # links for data directories need to be in the same location as original
362
+ script.append(f" rm -rf {path} && ln -s {mount_data_dir} {path}")
363
+ else:
364
+ # assume that files inside other directories have the pattern "*nvidia*",
365
+ # so the code avoids hard-coding fully resolved patterns to deal with
366
+ # a case when the data file name changes after driver upgrade
367
+ script.append(f" ln -sf {mount_data_dir}/*nvidia* {path_dir}/. 2>/dev/null")
ybox/run/logs.py ADDED
@@ -0,0 +1,57 @@
1
+ """
2
+ Code for the `ybox-logs` script that is used to show the podman/docker logs of an active
3
+ ybox container.
4
+ """
5
+
6
+ import argparse
7
+ import sys
8
+
9
+ from ybox.cmd import check_ybox_exists, run_command
10
+ from ybox.env import get_docker_command
11
+ from ybox.print import print_info
12
+
13
+
14
+ def main() -> None:
15
+ """main function for `ybox-logs` script"""
16
+ main_argv(sys.argv[1:])
17
+
18
+
19
+ def main_argv(argv: list[str]) -> None:
20
+ """
21
+ Main entrypoint of `ybox-logs` that takes a list of arguments which are usually the
22
+ command-line arguments of the `main()` function. Pass ["-h"]/["--help"] to see all the
23
+ available arguments with help message for each.
24
+
25
+ :param argv: arguments to the function (main function passes `sys.argv[1:]`)
26
+ """
27
+ args = parse_args(argv)
28
+ docker_cmd = get_docker_command()
29
+ container_name = args.container_name
30
+
31
+ check_ybox_exists(docker_cmd, container_name, exit_on_error=True)
32
+
33
+ docker_args = [docker_cmd, "container", "logs"]
34
+ if args.follow:
35
+ docker_args.append("-f")
36
+ docker_args.append(container_name)
37
+ try:
38
+ run_command(docker_args, error_msg=f"showing logs '{container_name}'")
39
+ except KeyboardInterrupt:
40
+ # allow for user interruption during follow or otherwise for a large log
41
+ print()
42
+ print_info("Interrupt")
43
+
44
+
45
+ def parse_args(argv: list[str]) -> argparse.Namespace:
46
+ """
47
+ Parse command-line arguments for the program and return the result :class:`argparse.Namespace`.
48
+
49
+ :param argv: the list of arguments to be parsed
50
+ :return: the result of parsing using the `argparse` library as a :class:`argparse.Namespace`
51
+ """
52
+ parser = argparse.ArgumentParser(
53
+ description="Show logs from an active or stopped ybox container")
54
+ parser.add_argument("-f", "--follow", action="store_true",
55
+ help="follow log output like 'tail -f'")
56
+ parser.add_argument("container_name", type=str, help="name of the running ybox")
57
+ return parser.parse_args(argv)
ybox/run/ls.py ADDED
@@ -0,0 +1,64 @@
1
+ """
2
+ Code for the `ybox-ls` script that is used to show the active or stopped ybox containers.
3
+ """
4
+
5
+ import argparse
6
+ import sys
7
+
8
+ from ybox.cmd import YboxLabel, run_command
9
+ from ybox.env import get_docker_command
10
+
11
+
12
+ def main() -> None:
13
+ """main function for `ybox-ls` script"""
14
+ main_argv(sys.argv[1:])
15
+
16
+
17
+ def main_argv(argv: list[str]) -> None:
18
+ """
19
+ Main entrypoint of `ybox-ls` that takes a list of arguments which are usually the
20
+ command-line arguments of the `main()` function. Pass ["-h"]/["--help"] to see all the
21
+ available arguments with help message for each.
22
+
23
+ :param argv: arguments to the function (main function passes `sys.argv[1:]`)
24
+ """
25
+ args = parse_args(argv)
26
+ docker_cmd = get_docker_command()
27
+
28
+ docker_args = [docker_cmd, "container", "ls"]
29
+ if args.all:
30
+ docker_args.append("--all")
31
+ docker_args.append(f"--filter=label={YboxLabel.CONTAINER_TYPE.value}")
32
+ else:
33
+ docker_args.append(f"--filter=label={YboxLabel.CONTAINER_PRIMARY.value}")
34
+ if args.filter:
35
+ for flt in args.filter:
36
+ docker_args.append(f"--filter={flt}")
37
+ if args.format:
38
+ docker_args.append(f"--format={args.format}")
39
+ if args.long_format:
40
+ docker_args.append("--no-trunc")
41
+ run_command(docker_args, error_msg="listing ybox containers")
42
+
43
+
44
+ def parse_args(argv: list[str]) -> argparse.Namespace:
45
+ """
46
+ Parse command-line arguments for the program and return the result :class:`argparse.Namespace`.
47
+
48
+ :param argv: the list of arguments to be parsed
49
+ :return: the result of parsing using the `argparse` library as a :class:`argparse.Namespace`
50
+ """
51
+ parser = argparse.ArgumentParser(description="List ybox containers")
52
+ parser.add_argument("-a", "--all", action="store_true",
53
+ help="show all containers including stopped and temporary ones; "
54
+ "default is to show only active ybox containers and also skip "
55
+ "any temporary containers spun by ybox-create")
56
+ parser.add_argument("-f", "--filter", type=str, action="append",
57
+ help="apply filter to output which is in the <key>=<value> format as "
58
+ "accepted by podman/docker (can be specified multiple times)")
59
+ parser.add_argument("-s", "--format", type=str,
60
+ help="format output using a template as accepted by podman/docker (see "
61
+ "https://docs.docker.com/reference/cli/docker/container/ls)")
62
+ parser.add_argument("-l", "--long-format", action="store_true",
63
+ help="display extended information without truncating fields")
64
+ return parser.parse_args(argv)