ybox 0.9.8.1__py3-none-any.whl → 0.9.11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (45) hide show
  1. ybox/__init__.py +1 -1
  2. ybox/cmd.py +17 -1
  3. ybox/conf/completions/ybox.fish +2 -0
  4. ybox/conf/distros/arch/init-user.sh +2 -2
  5. ybox/conf/distros/arch/init.sh +1 -0
  6. ybox/conf/distros/arch/pkgdeps.py +2 -0
  7. ybox/conf/distros/deb-generic/pkgdeps.py +2 -1
  8. ybox/conf/profiles/apps.ini +10 -5
  9. ybox/conf/profiles/basic.ini +48 -23
  10. ybox/conf/profiles/dev.ini +4 -6
  11. ybox/conf/resources/entrypoint-cp.sh +1 -1
  12. ybox/conf/resources/entrypoint-root.sh +4 -3
  13. ybox/conf/resources/entrypoint-user.sh +5 -3
  14. ybox/conf/resources/entrypoint.sh +24 -22
  15. ybox/conf/resources/prime-run +0 -2
  16. ybox/conf/resources/run-in-dir +30 -16
  17. ybox/conf/resources/run-user-bash-cmd +17 -1
  18. ybox/conf/resources/ybox-systemd.template +24 -0
  19. ybox/config.py +9 -1
  20. ybox/env.py +18 -7
  21. ybox/migrate/{0.9.0-0.9.7:0.9.8.py → 0.9.0-0.9.10:0.9.11.py} +6 -5
  22. ybox/pkg/clean.py +1 -7
  23. ybox/pkg/info.py +1 -7
  24. ybox/pkg/inst.py +40 -22
  25. ybox/pkg/list.py +1 -6
  26. ybox/pkg/mark.py +1 -1
  27. ybox/pkg/repair.py +4 -0
  28. ybox/pkg/search.py +1 -7
  29. ybox/run/cmd.py +2 -1
  30. ybox/run/control.py +107 -25
  31. ybox/run/create.py +254 -63
  32. ybox/run/destroy.py +89 -4
  33. ybox/run/graphics.py +37 -17
  34. ybox/run/logs.py +2 -1
  35. ybox/run/ls.py +2 -1
  36. ybox/run/pkg.py +49 -7
  37. ybox/state.py +22 -3
  38. ybox/util.py +5 -5
  39. {ybox-0.9.8.1.dist-info → ybox-0.9.11.dist-info}/METADATA +68 -34
  40. ybox-0.9.11.dist-info/RECORD +77 -0
  41. {ybox-0.9.8.1.dist-info → ybox-0.9.11.dist-info}/WHEEL +1 -1
  42. ybox-0.9.8.1.dist-info/RECORD +0 -76
  43. {ybox-0.9.8.1.dist-info → ybox-0.9.11.dist-info}/entry_points.txt +0 -0
  44. {ybox-0.9.8.1.dist-info → ybox-0.9.11.dist-info/licenses}/LICENSE +0 -0
  45. {ybox-0.9.8.1.dist-info → ybox-0.9.11.dist-info}/top_level.txt +0 -0
ybox/run/create.py CHANGED
@@ -18,15 +18,20 @@ from pathlib import Path
18
18
  from textwrap import dedent
19
19
  from typing import Optional
20
20
 
21
- from ybox.cmd import PkgMgr, RepoCmd, YboxLabel, check_ybox_exists, run_command
21
+ from ybox import __version__ as product_version
22
+ from ybox.cmd import (PkgMgr, RepoCmd, YboxLabel, check_ybox_exists,
23
+ parser_version_check, run_command)
22
24
  from ybox.config import Consts, StaticConfiguration
23
25
  from ybox.env import Environ, NotSupportedError, PathName
24
26
  from ybox.filelock import FileLock
25
27
  from ybox.pkg.inst import install_package, wrap_container_files
26
28
  from ybox.print import (bgcolor, fgcolor, print_color, print_error, print_info,
27
- print_warn)
29
+ print_notice, print_warn)
30
+ from ybox.run.destroy import (get_all_containers, remove_orphans_from_db,
31
+ ybox_systemd_service_prefix)
28
32
  from ybox.run.graphics import (add_env_option, add_mount_option, enable_dri,
29
- enable_nvidia, enable_wayland, enable_x11)
33
+ enable_nvidia, enable_wayland, enable_x11,
34
+ handle_variable_mount)
30
35
  from ybox.run.pkg import parse_args as pkg_parse_args
31
36
  from ybox.state import RuntimeConfiguration, YboxStateManagement
32
37
  from ybox.util import (EnvInterpolation, config_reader,
@@ -181,19 +186,30 @@ def main_argv(argv: list[str]) -> None:
181
186
 
182
187
  # set up the final container with all the required arguments
183
188
  print_info(f"Initializing container for '{distro}' using '{profile}'")
184
- start_container(docker_full_args, current_user, shared_root, shared_root_dirs, conf)
189
+ run_container(docker_full_args, current_user, shared_root, shared_root_dirs, conf)
185
190
  print_info("Waiting for the container to initialize (see "
186
191
  f"'ybox-logs -f {box_name}' for detailed progress)")
187
192
  # wait for container to initialize while printing out its progress from conf.status_file
188
- wait_for_ybox_container(docker_cmd, conf)
193
+ wait_for_ybox_container(docker_cmd, conf, 600)
189
194
 
190
195
  # remove distribution specific scripts and restart container the final time
191
- print_info(f"Restarting the final container '{box_name}'")
196
+ print_info(f"Starting the final container '{box_name}'")
192
197
  Path(f"{conf.scripts_dir}/{Consts.entrypoint_init_done_file()}").touch(mode=0o644)
193
- restart_container(docker_cmd, conf)
194
- print_info("Waiting for the container to be ready (see "
195
- f"'ybox-logs -f {box_name}' for detailed progress)")
196
- wait_for_ybox_container(docker_cmd, conf)
198
+ wait_msg = ("Waiting for the container to be ready "
199
+ f"(see ybox-logs -f {box_name}' for detailed progress)")
200
+ if not args.skip_systemd_service and (sys_path := os.pathsep.join(Consts.sys_bin_dirs())) and (
201
+ systemctl := shutil.which("systemctl", path=sys_path)) and run_command(
202
+ [systemctl, "--user", "--quiet", "is-enabled", "default.target"],
203
+ exit_on_error=False) == 0:
204
+ create_and_start_service(box_name, env, systemctl, sys_path, wait_msg)
205
+ else:
206
+ if not args.skip_systemd_service:
207
+ print_warn("Skipping user systemd service generation due to missing systemctl in "
208
+ f"PATH={os.pathsep.join(Consts.sys_bin_dirs())} or failure in "
209
+ "'systemctl --user is-enabled default.target'")
210
+ start_container(docker_cmd, conf)
211
+ print_info(wait_msg)
212
+ wait_for_ybox_container(docker_cmd, conf, 120)
197
213
  # truncate the app.list and config.list files so that those actions are skipped if the
198
214
  # container is restarted later
199
215
  if os.access(conf.app_list, os.W_OK):
@@ -201,11 +217,17 @@ def main_argv(argv: list[str]) -> None:
201
217
  if os.access(conf.config_list, os.W_OK):
202
218
  truncate_file(conf.config_list)
203
219
 
220
+ # check and remove any dangling container references in state database
221
+ valid_containers = set(get_all_containers(docker_cmd))
222
+
204
223
  # finally add the state and register the installed packages that were reassigned to this
205
224
  # container (because the previously destroyed one has the same configuration and shared root)
206
225
  with YboxStateManagement(env) as state:
226
+ state.begin_transaction()
227
+ remove_orphans_from_db(valid_containers, state)
207
228
  owned_packages = state.register_container(box_name, distro, shared_root, box_conf,
208
229
  args.force_own_orphans)
230
+ state.commit()
209
231
  # create wrappers for owned_packages
210
232
  if owned_packages:
211
233
  list_cmd = pkgmgr[PkgMgr.LIST_FILES.value]
@@ -254,6 +276,12 @@ def parse_args(argv: list[str]) -> argparse.Namespace:
254
276
  parser.add_argument("-n", "--name", type=str,
255
277
  help="name of the ybox; default is ybox-<distribution>_<profile> "
256
278
  "if not provided (removing the .ini suffix from <profile> file)")
279
+ parser.add_argument("-S", "--skip-systemd-service", action="store_true",
280
+ help="skip creation of user systemd service file for the ybox container; "
281
+ "by default a user systemd service file is created and enabled in "
282
+ "~/.config/systemd/user with the name 'ybox-<name>.service' if the "
283
+ "<name> does not begin with 'ybox-' prefix else '<name>.service' if "
284
+ "it already has 'ybox-' prefix")
257
285
  parser.add_argument("-F", "--force-own-orphans", action="store_true",
258
286
  help="force ownership of orphan packages on the same shared root even "
259
287
  "if container configuration does not match, meaning the packages "
@@ -262,7 +290,14 @@ def parse_args(argv: list[str]) -> argparse.Namespace:
262
290
  "container regardless of the container configuration")
263
291
  parser.add_argument("-C", "--distribution-config", type=str,
264
292
  help="path to distribution configuration file to use instead of the "
265
- "`distro.ini` from user/system configuration paths")
293
+ "'distro.ini' from user/system configuration paths")
294
+ parser.add_argument("--distribution-image", type=str,
295
+ help="custom container image to use that overrides the one specified in "
296
+ "the distribution's 'distro.ini'; note that the distribution "
297
+ "configuration scripts make assumptions on the available utilities "
298
+ "in the image so you should ensure that the provided image is "
299
+ "compatible with and a superset of the base image specified in the "
300
+ "builtin profile of the distribution in the installed version")
266
301
  parser.add_argument("-q", "--quiet", action="store_true",
267
302
  help="proceed without asking any questions using defaults where possible; "
268
303
  "this should usually be used with explicit specification of "
@@ -281,6 +316,7 @@ def parse_args(argv: list[str]) -> argparse.Namespace:
281
316
  "optional and user is presented with a selection menu of the "
282
317
  "available profiles in the user or system profiles directory "
283
318
  "whichever is found (in that order)")
319
+ parser_version_check(parser, argv)
284
320
  return parser.parse_args(argv)
285
321
 
286
322
 
@@ -363,7 +399,7 @@ def select_profile(args: argparse.Namespace, env: Environ) -> PathName:
363
399
  if len(profiles) == 1:
364
400
  print_info(f"Using profile '{profiles[0]}'")
365
401
  return profiles[0]
366
- if len(profiles) == 0:
402
+ if not profiles:
367
403
  print_error(f"No valid profile found in '{profiles_dir}'")
368
404
  sys.exit(1)
369
405
 
@@ -491,6 +527,12 @@ def read_distribution_config(args: argparse.Namespace,
491
527
  distro_conf_file, only_sys_conf=True), env_interpolation)
492
528
  distro_base_section = distro_config["base"]
493
529
  image_name = distro_base_section["image"] # should always exist
530
+ if args.distribution_image:
531
+ print()
532
+ print_notice(f"Overriding distribution's container image '{image_name}' with the one "
533
+ f"provided on the command-line: {args.distribution_image}")
534
+ print()
535
+ image_name = args.distribution_image
494
536
  shared_root_dirs = distro_base_section["shared_root_dirs"] # should always exist
495
537
  secondary_groups = distro_base_section["secondary_groups"] # should always exist
496
538
  return image_name, shared_root_dirs, secondary_groups, distro_config
@@ -576,6 +618,12 @@ def process_base_section(base_section: SectionProxy, profile: PathName, conf: St
576
618
  elif key == "dbus":
577
619
  if _get_boolean(val):
578
620
  enable_dbus(docker_args, base_section.getboolean("dbus_sys", fallback=False), env)
621
+ elif key == "ssh_agent":
622
+ if _get_boolean(val):
623
+ enable_ssh_agent(docker_args, env)
624
+ elif key == "gpg_agent":
625
+ if _get_boolean(val):
626
+ enable_gpg_agent(docker_args, env)
579
627
  elif key == "dri":
580
628
  dri = _get_boolean(val)
581
629
  elif key == "nvidia":
@@ -598,6 +646,9 @@ def process_base_section(base_section: SectionProxy, profile: PathName, conf: St
598
646
  (re.match("^--log-opt=path=(.*)/.*$", path) for path in docker_args) if mt]
599
647
  for log_dir in log_dirs:
600
648
  os.makedirs(log_dir, mode=Consts.default_directory_mode(), exist_ok=True)
649
+ elif key == "devices":
650
+ if val:
651
+ add_multi_opt(docker_args, "device", val)
601
652
  elif key not in ("name", "dbus_sys", "includes"):
602
653
  raise NotSupportedError(f"Unknown key '{key}' in the [base] of {profile} "
603
654
  "or its includes")
@@ -654,21 +705,51 @@ def enable_dbus(docker_args: list[str], sys_enable: bool, env: Environ) -> None:
654
705
  to the user dbus message bus
655
706
  :param env: an instance of the current :class:`Environ`
656
707
  """
657
- def replace_target_dir(src: str) -> str:
658
- return src.replace(f"{env.xdg_rt_dir}/", f"{env.target_xdg_rt_dir}/")
659
708
  if dbus_session := os.environ.get("DBUS_SESSION_BUS_ADDRESS"):
660
709
  dbus_user = dbus_session[dbus_session.find("=") + 1:]
661
710
  if (dbus_opts_idx := dbus_user.find(",")) != -1:
662
711
  dbus_user = dbus_user[:dbus_opts_idx]
663
- add_mount_option(docker_args, dbus_user, replace_target_dir(dbus_user))
664
- add_env_option(docker_args, "DBUS_SESSION_BUS_ADDRESS", replace_target_dir(dbus_session))
712
+ add_mount_option(docker_args, dbus_user, _replace_xdg_rt_dir(dbus_user, env))
713
+ add_env_option(docker_args, "DBUS_SESSION_BUS_ADDRESS",
714
+ _replace_xdg_rt_dir(dbus_session, env))
665
715
  if sys_enable:
666
- dbus_sys = "/run/dbus/system_bus_socket"
667
- dbus_sys2 = "/var/run/dbus/system_bus_socket"
668
- if os.access(dbus_sys, os.W_OK):
669
- add_mount_option(docker_args, dbus_sys, dbus_sys)
670
- elif os.access(dbus_sys2, os.W_OK):
671
- add_mount_option(docker_args, dbus_sys2, dbus_sys)
716
+ for dbus_sys in ("/run/dbus/system_bus_socket", "/var/run/dbus/system_bus_socket"):
717
+ if os.access(dbus_sys, os.W_OK):
718
+ add_mount_option(docker_args, dbus_sys, dbus_sys)
719
+ break
720
+
721
+
722
+ def enable_ssh_agent(docker_args: list[str], env: Environ) -> None:
723
+ """
724
+ Append options to podman/docker arguments to share host machine's ssh agent socket
725
+ with the new ybox container.
726
+
727
+ :param docker_args: list of podman/docker arguments to which the options have to be appended
728
+ :param env: an instance of the current :class:`Environ`
729
+ """
730
+ if ssh_auth_sock := os.environ.get("SSH_AUTH_SOCK"):
731
+ target_ssh_auth_sock = handle_variable_mount(docker_args, env, ssh_auth_sock)
732
+ add_env_option(docker_args, "SSH_AUTH_SOCK", target_ssh_auth_sock)
733
+ add_env_option(docker_args, "SSH_AUTH_SOCK_ORIG", target_ssh_auth_sock)
734
+
735
+
736
+ def enable_gpg_agent(docker_args: list[str], env: Environ) -> None:
737
+ """
738
+ Append options to podman/docker arguments to share host machine's gpg agent sockets
739
+ with the new ybox container.
740
+
741
+ :param docker_args: list of podman/docker arguments to which the options have to be appended
742
+ :param env: an instance of the current :class:`Environ`
743
+ """
744
+ if gpg_agent_info := os.environ.get("GPG_AGENT_INFO"):
745
+ target_gpg_agent_info = handle_variable_mount(docker_args, env, gpg_agent_info)
746
+ add_env_option(docker_args, "GPG_AGENT_INFO", target_gpg_agent_info)
747
+ add_env_option(docker_args, "GPG_AGENT_INFO_ORIG", target_gpg_agent_info)
748
+
749
+
750
+ def _replace_xdg_rt_dir(src: str, env: Environ) -> str:
751
+ """replace host's $XDG_RUNTIME_DIR in `src` with that of container user's $XDG_RUNTIME_DIR"""
752
+ return src.replace(env.xdg_rt_dir + "/", env.target_xdg_rt_dir + "/")
672
753
 
673
754
 
674
755
  def add_multi_opt(docker_args: list[str], opt: str, val: Optional[str]) -> None:
@@ -681,7 +762,7 @@ def add_multi_opt(docker_args: list[str], opt: str, val: Optional[str]) -> None:
681
762
  """
682
763
  if val:
683
764
  for opt_val in val.split(","):
684
- docker_args.append(f"--{opt}={opt_val}")
765
+ docker_args.append(f"--{opt}={opt_val.strip()}")
685
766
 
686
767
 
687
768
  def process_security_section(sec_section: SectionProxy, profile: PathName,
@@ -759,8 +840,7 @@ def process_configs_section(configs_section: SectionProxy, config_hardlinks: boo
759
840
  # this is refreshed on every container start
760
841
 
761
842
  # always recreate the directory to pick up any changes
762
- if os.path.exists(conf.configs_dir):
763
- shutil.rmtree(conf.configs_dir)
843
+ shutil.rmtree(conf.configs_dir, ignore_errors=True)
764
844
  os.makedirs(conf.configs_dir, mode=Consts.default_directory_mode(), exist_ok=True)
765
845
  if config_hardlinks:
766
846
  print_info("Creating hard links to paths specified in [configs] ...")
@@ -777,24 +857,38 @@ def process_configs_section(configs_section: SectionProxy, config_hardlinks: boo
777
857
  raise NotSupportedError("Incorrect value format in [configs] section for "
778
858
  f"'{key}'. Required: '{{src}} -> {{dest}}'")
779
859
  src_path = os.path.realpath(f_val[:split_idx].strip())
780
- dest_path = f"{conf.configs_dir}/{f_val[split_idx + 2:].strip()}"
860
+ dest_rel_path = f_val[split_idx + 2:].strip()
861
+ dest_path = f"{conf.configs_dir}/{dest_rel_path}"
781
862
  if os.access(src_path, os.R_OK):
782
- os.makedirs(os.path.dirname(dest_path),
783
- mode=Consts.default_directory_mode(), exist_ok=True)
863
+ if os.path.exists(dest_path):
864
+ shutil.rmtree(dest_path, ignore_errors=True)
865
+ else:
866
+ os.makedirs(os.path.dirname(dest_path),
867
+ mode=Consts.default_directory_mode(), exist_ok=True)
784
868
  if os.path.isdir(src_path):
785
869
  copytree(src_path, dest_path, hardlink=config_hardlinks)
786
870
  else:
787
871
  if config_hardlinks:
788
- os.link(os.path.realpath(src_path), dest_path, follow_symlinks=True)
872
+ try:
873
+ os.link(os.path.realpath(src_path), dest_path, follow_symlinks=True)
874
+ except OSError:
875
+ # in case of error (likely due to cross-device link) fallback to copy
876
+ shutil.copy2(src_path, dest_path, follow_symlinks=True)
789
877
  else:
790
878
  shutil.copy2(src_path, dest_path, follow_symlinks=True)
791
- config_list_fd.write(val)
792
- config_list_fd.write("\n")
879
+ # - if key has ":copy", then indicate creation of copies in the target
880
+ # - if key has ":dir", then indicate replication of directory structure with links
881
+ # for individual files
882
+ # - else a symlink should be created
883
+ # handled by "replicate_config_files" function in entrypoint.sh
884
+ prefix = "COPY" if key.endswith(":copy") else (
885
+ "LINK_DIR" if key.endswith(":dir") else "LINK")
886
+ config_list_fd.write(f"{prefix}:{dest_rel_path}\n")
793
887
  else:
794
888
  print_warn(f"Skipping inaccessible configuration path '{src_path}'")
795
889
  print_info("DONE.")
796
890
  # finally mount the configs directory to corresponding directory in the target container
797
- add_mount_option(docker_args, conf.configs_dir, conf.target_configs_dir, "ro")
891
+ add_mount_option(docker_args, conf.configs_dir, conf.target_configs_dir)
798
892
 
799
893
 
800
894
  def process_env_section(env_section: SectionProxy, docker_args: list[str]) -> None:
@@ -822,7 +916,7 @@ def process_apps_section(apps_section: SectionProxy, conf: StaticConfiguration,
822
916
  :return: dictionary of package names mapped to their list of dependencies as specified
823
917
  in the `[apps]` section
824
918
  """
825
- if len(apps_section) == 0:
919
+ if not apps_section:
826
920
  return {}
827
921
  quiet_flag = pkgmgr[PkgMgr.QUIET_FLAG.value]
828
922
  opt_dep_flag = pkgmgr[PkgMgr.OPT_DEP_FLAG.value]
@@ -859,37 +953,78 @@ def process_apps_section(apps_section: SectionProxy, conf: StaticConfiguration,
859
953
 
860
954
 
861
955
  # The shutil.copytree(...) method does not work correctly for "symlinks=False" (or at least
862
- # not like 'cp -rL' or 'cp -rlL') where it does not create the source symlinked file rather
863
- # only the target one in the destination directory.
864
- # This is a simplified version using os.walk(...) that works correctly that always has:
865
- # a. follow_symlinks=True, and b. ignore_dangling_symlinks=True
866
- def copytree(src: str, dest: str, hardlink: bool = False) -> None:
956
+ # not like 'cp -aL' or 'cp -alL') where it does not create the source symlinked file rather
957
+ # only the target one in the destination directory, and neither does it provide the option to
958
+ # create hardlinks.
959
+ #
960
+ # This is a simplified version using recursive os.scandir(...) that works correctly so that the
961
+ # copy will continue to work even if source disappears in all cases but still avoid making copies
962
+ # for all symlinks. So it behaves like follow_symlinks=True if the symlink destination is outside
963
+ # the "src_path" else it is False.
964
+ def copytree(src_path: str, dest: str, hardlink: bool = False,
965
+ src_root: Optional[str] = None) -> None:
867
966
  """
868
967
  Copy or create hard links to a source directory tree in the given destination directory.
869
968
  Since hard links to directories are not supported, the destination will mirror the directories
870
969
  of the source while the files inside will be either copies or hard links to the source.
970
+ Symlinks are copied as such if the source ones point within the tree, else the target is
971
+ followed and copied recursively.
871
972
 
872
- :param src: the source directory tree
873
- :param dest: the destination directory which should exist
973
+ Note: this function only handles regular files and directories (and hard/symbolic links to
974
+ them) and will skip special files like device files, fifos etc.
975
+
976
+ :param src_path: the source directory to be copied (should have been resolved using
977
+ `os.path.realpath` or `Path.resolve` if `src_root` argument is not supplied)
978
+ :param dest: the destination directory which should not already exist (but its parent should)
874
979
  :param hardlink: if True then create hard links to the files in the source (so it should
875
980
  be in the same filesystem) else copy the files, defaults to False
981
+ :param src_root: the resolved root source directory (same as `src_path` if `None` which is
982
+ assumed to have been resolved using `os.path.realpath` or `Path.resolve`)
876
983
  """
877
- for src_dir, _, src_files in os.walk(src, followlinks=True):
878
- # substitute 'src' prefix with 'dest'
879
- dest_dir = f"{dest}{src_dir[len(src):]}"
880
- os.mkdir(dest_dir, mode=stat.S_IMODE(os.stat(src_dir).st_mode))
881
- for src_file in src_files:
882
- src_path = f"{src_dir}/{src_file}"
883
- if os.path.exists(src_path):
884
- if hardlink:
885
- try:
886
- os.link(os.path.realpath(src_path), f"{dest_dir}/{src_file}",
887
- follow_symlinks=True)
984
+ src_root = src_root or src_path
985
+ src_root = src_root.rstrip("/")
986
+ os.mkdir(dest, mode=stat.S_IMODE(os.stat(src_path).st_mode))
987
+ # follow symlink if it leads to outside the "src" tree, else copy as a symlink which
988
+ # ensures that all destination files are always accessible regardless of source going
989
+ # away (for example), and also reduces the size with hardlink=False as much as possible
990
+ with os.scandir(src_path) as src_it:
991
+ for entry in src_it:
992
+ entry_path = ""
993
+ entry_st_mode = 0
994
+ dest_path = f"{dest}/{entry.name}"
995
+ try:
996
+ if entry.is_symlink():
997
+ # check if entry is a symlink inside the tree or outside
998
+ l_name = os.readlink(entry.path)
999
+ if "/" not in l_name: # shortcut check for links in the same directory
1000
+ os.symlink(l_name, dest_path)
888
1001
  continue
889
- except OSError:
890
- # in case of error (likely due to cross-device link) fallback to copying
891
- pass
892
- shutil.copy2(src_path, f"{dest_dir}/{src_file}", follow_symlinks=True)
1002
+ entry_path = os.path.realpath(entry.path)
1003
+ if entry_path.startswith(src_root + "/"):
1004
+ rpath = entry_path[len(src_root) + 1:]
1005
+ os.symlink(("../" * rpath.count("/")) + rpath, dest_path)
1006
+ continue
1007
+ entry_st_mode = os.stat(entry_path).st_mode
1008
+ entry_path = entry_path or entry.path
1009
+ if stat.S_ISREG(entry_st_mode) or (entry_st_mode == 0 and entry.is_file()):
1010
+ if hardlink:
1011
+ try:
1012
+ os.link(entry_path, dest_path)
1013
+ continue
1014
+ except OSError:
1015
+ # in case of error (likely due to cross-device link) fallback to copy
1016
+ pass
1017
+ shutil.copy2(entry_path, dest_path)
1018
+ elif stat.S_ISDIR(entry_st_mode) or (entry_st_mode == 0 and entry.is_dir()):
1019
+ copytree(entry_path, dest_path, hardlink,
1020
+ entry_path if entry_st_mode else src_root)
1021
+ else:
1022
+ print_warn(f"Skipping copy/link of special file (fifo/dev/...) '{entry_path}'")
1023
+ except OSError as err:
1024
+ # ignore permission and related errors and continue
1025
+ print_warn(f"Skipping copy/link of '{entry_path}' due to error: {err}")
1026
+ # TODO: SW: check for success in all copytree's else return False, then check at caller
1027
+ # to print a bold warning
893
1028
 
894
1029
 
895
1030
  def setup_ybox_scripts(conf: StaticConfiguration, distro_config: ConfigParser) -> None:
@@ -902,7 +1037,7 @@ def setup_ybox_scripts(conf: StaticConfiguration, distro_config: ConfigParser) -
902
1037
  distribution's `distro.ini`
903
1038
  """
904
1039
  # first create local mount directory having entrypoint and other scripts
905
- if os.path.exists(conf.scripts_dir):
1040
+ if os.path.isdir(conf.scripts_dir):
906
1041
  shutil.rmtree(conf.scripts_dir)
907
1042
  os.makedirs(conf.scripts_dir, exist_ok=True)
908
1043
  # allow for read/execute permissions for all since non-root user needs access with docker
@@ -1030,8 +1165,8 @@ def remove_image(docker_cmd: str, image_name: str) -> None:
1030
1165
  error_msg="image remove")
1031
1166
 
1032
1167
 
1033
- def start_container(docker_full_cmd: list[str], current_user: str, shared_root: str,
1034
- shared_root_dirs: str, conf: StaticConfiguration) -> None:
1168
+ def run_container(docker_full_cmd: list[str], current_user: str, shared_root: str,
1169
+ shared_root_dirs: str, conf: StaticConfiguration) -> None:
1035
1170
  """
1036
1171
  Create and start the final ybox container applying all the provided configuration.
1037
1172
  The following characteristics of the container are noteworthy:
@@ -1054,9 +1189,8 @@ def start_container(docker_full_cmd: list[str], current_user: str, shared_root:
1054
1189
  programs from less secure containers; the `ybox-pkg` tool provided a convenient high-level
1055
1190
  package manager that users should use for managing packages in the containers which will
1056
1191
  help in exposing packages only in designated containers
1057
- * systemd user service file can be generated for podman to start the container automatically
1058
- on user login; docker installations run a background user service in any case which starts
1059
- up the container without any additional setup
1192
+ * systemd user service file is generated for podman/docker to start the container
1193
+ automatically on user login (in absence of -S/--skip-systemd-service option)
1060
1194
 
1061
1195
  :param docker_full_cmd: the `docker`/`podman run -itd` command with all the options filled
1062
1196
  in from the container profile specification as a list of string
@@ -1089,8 +1223,10 @@ def start_container(docker_full_cmd: list[str], current_user: str, shared_root:
1089
1223
  if conf.env.uses_podman:
1090
1224
  docker_full_cmd.append(f"--user={user_uid}")
1091
1225
  docker_full_cmd.append("--userns=keep-id")
1226
+ docker_full_cmd.append(f"-e=USER={current_user}")
1092
1227
  else:
1093
1228
  docker_full_cmd.append("--user=0")
1229
+ docker_full_cmd.append("-e=USER=root")
1094
1230
  docker_full_cmd.append(f"-e=YBOX_HOST_UID={user_uid}")
1095
1231
  docker_full_cmd.append(f"-e=YBOX_HOST_GID={user_gid}")
1096
1232
  docker_full_cmd.append(conf.box_image(bool(shared_root)))
@@ -1108,8 +1244,63 @@ def start_container(docker_full_cmd: list[str], current_user: str, shared_root:
1108
1244
  sys.exit(code)
1109
1245
 
1110
1246
 
1111
- def restart_container(docker_cmd: str, conf: StaticConfiguration) -> None:
1112
- """restart a stopped podman/docker container"""
1247
+ def create_and_start_service(box_name: str, env: Environ, systemctl: str, sys_path: str,
1248
+ wait_msg: str) -> None:
1249
+ """
1250
+ Create, enable and start systemd service for a ybox container.
1251
+
1252
+ :param box_name: name of the ybox container
1253
+ :param env: an instance of the current :class:`Environ`
1254
+ :param systemctl: resolved path to the `systemctl` utility
1255
+ :param sys_path: PATH used for searching system utilities
1256
+ :param wait_msg: message to output before waiting for the service to start
1257
+ """
1258
+ svc_file = env.search_config_path("resources/ybox-systemd.template", only_sys_conf=True)
1259
+ with svc_file.open("r", encoding="utf-8") as svc_fd:
1260
+ svc_tmpl = svc_fd.read()
1261
+ if env.uses_podman:
1262
+ manager_name = "Podman"
1263
+ docker_requires = ""
1264
+ else:
1265
+ manager_name = "Docker"
1266
+ docker_requires = "After=docker.service\nRequires=docker.service\n"
1267
+ systemd_dir = env.systemd_user_conf_dir()
1268
+ ybox_svc_prefix = ybox_systemd_service_prefix(box_name)
1269
+ ybox_svc = f"{ybox_svc_prefix}.service"
1270
+ ybox_env = f".{ybox_svc_prefix}.env"
1271
+ formatted_now = env.now.astimezone().strftime("%a %d %b %Y %H:%M:%S %Z")
1272
+ # get the path of ybox-control and replace $HOME by %h to keep it generic
1273
+ if ybox_ctrl_path := shutil.which("ybox-control"):
1274
+ ybox_bin_dir = os.path.dirname(ybox_ctrl_path)
1275
+ if ybox_bin_dir.startswith(env.home + "/"):
1276
+ ybox_bin_dir = f"%h{ybox_bin_dir[len(env.home):]}"
1277
+ else:
1278
+ ybox_bin_dir = "%h/.local/bin"
1279
+ svc_content = svc_tmpl.format(name=box_name, version=product_version, date=formatted_now,
1280
+ manager_name=manager_name, docker_requires=docker_requires,
1281
+ sys_path=sys_path, ybox_bin_dir=ybox_bin_dir, env_file=ybox_env)
1282
+ env_content = f"""
1283
+ SLEEP_SECS={{sleep_secs}}
1284
+ # set the container manager to the one configured during ybox-create
1285
+ YBOX_CONTAINER_MANAGER={env.docker_cmd}
1286
+ """
1287
+ os.makedirs(systemd_dir, Consts.default_directory_mode(), exist_ok=True)
1288
+ print_color(f"Generating user systemd service '{ybox_svc}' and reloading daemon", fgcolor.cyan)
1289
+ with open(f"{systemd_dir}/{ybox_svc}", "w", encoding="utf-8") as svc_fd:
1290
+ svc_fd.write(svc_content)
1291
+ with open(f"{systemd_dir}/{ybox_env}", "w", encoding="utf-8") as env_fd:
1292
+ env_fd.write(dedent(env_content.format(sleep_secs=0))) # don't sleep for the start below
1293
+ run_command([systemctl, "--user", "daemon-reload"], exit_on_error=False)
1294
+ run_command([systemctl, "--user", "enable", ybox_svc], exit_on_error=True)
1295
+ print_info(wait_msg)
1296
+ run_command([systemctl, "--user", "start", ybox_svc], exit_on_error=True)
1297
+ # change SLEEP_SECS to 5 for subsequent starts
1298
+ with open(f"{systemd_dir}/{ybox_env}", "w", encoding="utf-8") as env_fd:
1299
+ env_fd.write(dedent(env_content.format(sleep_secs=5)))
1300
+
1301
+
1302
+ def start_container(docker_cmd: str, conf: StaticConfiguration) -> None:
1303
+ """start a stopped podman/docker container"""
1113
1304
  if (code := int(run_command([docker_cmd, "container", "start", conf.box_name],
1114
1305
  exit_on_error=False, error_msg="container restart"))) != 0:
1115
1306
  print_error(f"Also check 'ybox-logs {conf.box_name}' for details")
ybox/run/destroy.py CHANGED
@@ -3,11 +3,16 @@ Code for the `ybox-destroy` script that is used to destroy an active or stopped
3
3
  """
4
4
 
5
5
  import argparse
6
+ import os
7
+ import shutil
8
+ import subprocess
6
9
  import sys
7
10
 
8
- from ybox.cmd import check_ybox_exists, run_command
11
+ from ybox.cmd import check_ybox_exists, parser_version_check, run_command
12
+ from ybox.config import Consts
9
13
  from ybox.env import Environ
10
- from ybox.print import fgcolor, print_color, print_error, print_warn
14
+ from ybox.print import (fgcolor, print_color, print_error, print_notice,
15
+ print_warn)
11
16
  from ybox.state import YboxStateManagement
12
17
 
13
18
 
@@ -31,9 +36,18 @@ def main_argv(argv: list[str]) -> None:
31
36
 
32
37
  check_ybox_exists(docker_cmd, container_name, exit_on_error=True)
33
38
  print_color(f"Stopping ybox container '{container_name}'", fg=fgcolor.cyan)
39
+ # check if there is a systemd service for the container
40
+ ybox_svc_prefix = ybox_systemd_service_prefix(container_name)
41
+ ybox_svc = f"{ybox_svc_prefix}.service"
42
+ systemctl = check_systemd_service_present(ybox_svc)
43
+
34
44
  # continue even if this fails since the container may already be in stopped state
35
- run_command([docker_cmd, "container", "stop", container_name],
36
- exit_on_error=False, error_msg=f"stopping '{container_name}'")
45
+ if systemctl:
46
+ run_command([systemctl, "--user", "stop", ybox_svc],
47
+ exit_on_error=False, error_msg=f"stopping '{container_name}'")
48
+ else:
49
+ run_command([docker_cmd, "container", "stop", container_name],
50
+ exit_on_error=False, error_msg=f"stopping '{container_name}'")
37
51
 
38
52
  print_warn(f"Removing ybox container '{container_name}'")
39
53
  rm_args = [docker_cmd, "container", "rm"]
@@ -42,12 +56,32 @@ def main_argv(argv: list[str]) -> None:
42
56
  rm_args.append(container_name)
43
57
  run_command(rm_args, error_msg=f"removing '{container_name}'")
44
58
 
59
+ # remove systemd service file and reload daemon
60
+ if systemctl:
61
+ print_color(f"Removing systemd service '{ybox_svc}' and reloading daemon", fg=fgcolor.cyan)
62
+ run_command([systemctl, "--user", "disable", ybox_svc], exit_on_error=False)
63
+ systemd_dir = env.systemd_user_conf_dir()
64
+ try:
65
+ os.unlink(f"{systemd_dir}/{ybox_svc}")
66
+ except OSError:
67
+ pass
68
+ try:
69
+ os.unlink(f"{systemd_dir}/.{ybox_svc_prefix}.env")
70
+ except OSError:
71
+ pass
72
+ run_command([systemctl, "--user", "daemon-reload"], exit_on_error=False)
73
+
74
+ # check and remove any dangling container references in state database
75
+ valid_containers = set(get_all_containers(docker_cmd))
76
+
45
77
  # remove the state from the database
46
78
  print_warn(f"Clearing ybox state for '{container_name}'")
47
79
  with YboxStateManagement(env) as state:
80
+ state.begin_transaction()
48
81
  if not state.unregister_container(container_name):
49
82
  print_error(f"No entry found for '{container_name}' in the state database")
50
83
  sys.exit(1)
84
+ remove_orphans_from_db(valid_containers, state)
51
85
 
52
86
 
53
87
  def parse_args(argv: list[str]) -> argparse.Namespace:
@@ -61,4 +95,55 @@ def parse_args(argv: list[str]) -> argparse.Namespace:
61
95
  parser.add_argument("-f", "--force", action="store_true",
62
96
  help="force destroy the container using SIGKILL if required")
63
97
  parser.add_argument("container_name", type=str, help="name of the active ybox")
98
+ parser_version_check(parser, argv)
64
99
  return parser.parse_args(argv)
100
+
101
+
102
+ def ybox_systemd_service_prefix(container_name: str) -> str:
103
+ """systemd service name prefix for given ybox container name"""
104
+ return container_name if container_name.startswith("ybox-") else f"ybox-{container_name}"
105
+
106
+
107
+ def check_systemd_service_present(user_svc: str) -> str:
108
+ """
109
+ Check if the given user systemd service is present and return the PATH of system installed
110
+ `systemctl` tool if true, else return empty string.
111
+
112
+ :param user_svc: name the user systemd service file
113
+ :return: full path of `systemctl` if installed and user systemd service is available else empty
114
+ """
115
+ if (systemctl := shutil.which("systemctl", path=os.pathsep.join(Consts.sys_bin_dirs()))) and \
116
+ subprocess.run([systemctl, "--user", "--quiet", "list-unit-files", user_svc],
117
+ check=False, capture_output=True).returncode == 0:
118
+ return systemctl
119
+ return ""
120
+
121
+
122
+ def get_all_containers(docker_cmd: str) -> list[str]:
123
+ """
124
+ Get all the valid containers as known to the container manager.
125
+
126
+ :param docker_cmd: the podman/docker executable to use
127
+ :return: list of valid container names
128
+ """
129
+ result = run_command([docker_cmd, "container", "ls", "--all", "--format={{ .Names }}"],
130
+ capture_output=True, exit_on_error=False, error_msg="listing containers")
131
+ return [] if isinstance(result, int) else result.splitlines()
132
+
133
+
134
+ def remove_orphans_from_db(valid_containers: set[str], state: YboxStateManagement) -> None:
135
+ """
136
+ Unregister orphan container entries from the state database. This takes the output of
137
+ :func:`get_all_containers` as argument and should be invoked inside `YboxStateManagement`
138
+ context manager (i.e. with state database as locked), while the call to `get_all_containers`
139
+ can be outside the lock.
140
+
141
+ :param valid_containers: set of valid container names from :func:`get_all_containers`
142
+ :param state: instance of `YboxStateManagement` having the state of all ybox containers
143
+ """
144
+ if not os.environ.get("YBOX_TESTING"):
145
+ orphans = set(state.get_containers()) - valid_containers
146
+ if orphans:
147
+ print_notice(f"Removing orphan container entries from database: {', '.join(orphans)}")
148
+ for orphan in orphans:
149
+ state.unregister_container(orphan)