atomicshop 3.3.8__py3-none-any.whl → 3.10.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of atomicshop might be problematic. Click here for more details.

Files changed (120) hide show
  1. atomicshop/__init__.py +1 -1
  2. atomicshop/a_mains/get_local_tcp_ports.py +85 -0
  3. atomicshop/a_mains/install_ca_certificate.py +172 -0
  4. atomicshop/a_mains/process_from_port.py +119 -0
  5. atomicshop/a_mains/set_default_dns_gateway.py +90 -0
  6. atomicshop/basics/strings.py +1 -1
  7. atomicshop/certificates.py +2 -2
  8. atomicshop/dns.py +26 -28
  9. atomicshop/etws/traces/trace_tcp.py +1 -2
  10. atomicshop/mitm/centered_settings.py +133 -0
  11. atomicshop/mitm/config_static.py +22 -44
  12. atomicshop/mitm/connection_thread_worker.py +383 -165
  13. atomicshop/mitm/engines/__parent/recorder___parent.py +1 -1
  14. atomicshop/mitm/engines/__parent/requester___parent.py +1 -1
  15. atomicshop/mitm/engines/__parent/responder___parent.py +15 -2
  16. atomicshop/mitm/engines/create_module_template.py +1 -2
  17. atomicshop/mitm/import_config.py +91 -89
  18. atomicshop/mitm/initialize_engines.py +1 -2
  19. atomicshop/mitm/message.py +5 -4
  20. atomicshop/mitm/mitm_main.py +238 -122
  21. atomicshop/mitm/recs_files.py +61 -5
  22. atomicshop/mitm/ssh_tester.py +82 -0
  23. atomicshop/mitm/statistic_analyzer.py +33 -12
  24. atomicshop/mitm/statistic_analyzer_helper/moving_average_helper.py +104 -31
  25. atomicshop/networks.py +160 -92
  26. atomicshop/package_mains_processor.py +84 -0
  27. atomicshop/permissions/ubuntu_permissions.py +47 -0
  28. atomicshop/print_api.py +3 -5
  29. atomicshop/process.py +11 -4
  30. atomicshop/python_functions.py +23 -108
  31. atomicshop/speech_recognize.py +8 -0
  32. atomicshop/ssh_remote.py +140 -164
  33. atomicshop/web.py +63 -22
  34. atomicshop/web_apis/google_llm.py +22 -14
  35. atomicshop/wrappers/ctyping/msi_windows_installer/cabs.py +2 -1
  36. atomicshop/wrappers/ctyping/msi_windows_installer/extract_msi_main.py +2 -1
  37. atomicshop/wrappers/dockerw/dockerw.py +2 -2
  38. atomicshop/wrappers/elasticsearchw/config_basic.py +0 -12
  39. atomicshop/wrappers/elasticsearchw/elastic_infra.py +0 -190
  40. atomicshop/wrappers/factw/install/pre_install_and_install_before_restart.py +5 -5
  41. atomicshop/wrappers/githubw.py +180 -68
  42. atomicshop/wrappers/loggingw/consts.py +1 -1
  43. atomicshop/wrappers/loggingw/handlers.py +1 -1
  44. atomicshop/wrappers/loggingw/loggingw.py +20 -4
  45. atomicshop/wrappers/loggingw/reading.py +18 -0
  46. atomicshop/wrappers/mongodbw/mongo_infra.py +0 -38
  47. atomicshop/wrappers/netshw.py +124 -3
  48. atomicshop/wrappers/playwrightw/scenarios.py +1 -1
  49. atomicshop/wrappers/powershell_networking.py +80 -0
  50. atomicshop/wrappers/psutilw/psutil_networks.py +9 -0
  51. atomicshop/wrappers/pywin32w/win_event_log/fetch.py +174 -0
  52. atomicshop/wrappers/pywin32w/win_event_log/subscribes/process_create.py +3 -105
  53. atomicshop/wrappers/pywin32w/win_event_log/subscribes/process_terminate.py +3 -57
  54. atomicshop/wrappers/pywin32w/wmis/win32_networkadapterconfiguration.py +12 -27
  55. atomicshop/wrappers/pywin32w/wmis/win32networkadapter.py +15 -9
  56. atomicshop/wrappers/socketw/certificator.py +19 -9
  57. atomicshop/wrappers/socketw/creator.py +101 -14
  58. atomicshop/wrappers/socketw/dns_server.py +17 -5
  59. atomicshop/wrappers/socketw/exception_wrapper.py +21 -16
  60. atomicshop/wrappers/socketw/process_getter.py +86 -0
  61. atomicshop/wrappers/socketw/receiver.py +29 -9
  62. atomicshop/wrappers/socketw/sender.py +10 -9
  63. atomicshop/wrappers/socketw/sni.py +31 -10
  64. atomicshop/wrappers/socketw/{base.py → socket_base.py} +33 -1
  65. atomicshop/wrappers/socketw/socket_client.py +11 -10
  66. atomicshop/wrappers/socketw/socket_wrapper.py +125 -32
  67. atomicshop/wrappers/socketw/ssl_base.py +6 -2
  68. atomicshop/wrappers/ubuntu_terminal.py +21 -18
  69. atomicshop/wrappers/win_auditw.py +189 -0
  70. {atomicshop-3.3.8.dist-info → atomicshop-3.10.0.dist-info}/METADATA +25 -30
  71. {atomicshop-3.3.8.dist-info → atomicshop-3.10.0.dist-info}/RECORD +83 -109
  72. atomicshop/_basics_temp.py +0 -101
  73. atomicshop/a_installs/ubuntu/docker_rootless.py +0 -11
  74. atomicshop/a_installs/ubuntu/docker_sudo.py +0 -11
  75. atomicshop/a_installs/ubuntu/elastic_search_and_kibana.py +0 -10
  76. atomicshop/a_installs/ubuntu/mongodb.py +0 -12
  77. atomicshop/a_installs/win/fibratus.py +0 -9
  78. atomicshop/a_installs/win/mongodb.py +0 -9
  79. atomicshop/a_installs/win/wsl_ubuntu_lts.py +0 -10
  80. atomicshop/addons/a_setup_scripts/install_psycopg2_ubuntu.sh +0 -3
  81. atomicshop/addons/package_setup/CreateWheel.cmd +0 -7
  82. atomicshop/addons/package_setup/Setup in Edit mode.cmd +0 -6
  83. atomicshop/addons/package_setup/Setup.cmd +0 -7
  84. atomicshop/archiver/__init__.py +0 -0
  85. atomicshop/archiver/_search_in_zip.py +0 -189
  86. atomicshop/archiver/search_in_archive.py +0 -284
  87. atomicshop/archiver/sevenz_app_w.py +0 -86
  88. atomicshop/archiver/sevenzs.py +0 -73
  89. atomicshop/archiver/shutils.py +0 -34
  90. atomicshop/archiver/zips.py +0 -353
  91. atomicshop/file_types.py +0 -24
  92. atomicshop/pbtkmultifile_argparse.py +0 -88
  93. atomicshop/script_as_string_processor.py +0 -42
  94. atomicshop/ssh_scripts/process_from_ipv4.py +0 -37
  95. atomicshop/ssh_scripts/process_from_port.py +0 -27
  96. atomicshop/wrappers/_process_wrapper_curl.py +0 -27
  97. atomicshop/wrappers/_process_wrapper_tar.py +0 -21
  98. atomicshop/wrappers/dockerw/install_docker.py +0 -449
  99. atomicshop/wrappers/elasticsearchw/install_elastic.py +0 -233
  100. atomicshop/wrappers/ffmpegw.py +0 -125
  101. atomicshop/wrappers/fibratusw/__init__.py +0 -0
  102. atomicshop/wrappers/fibratusw/install.py +0 -80
  103. atomicshop/wrappers/mongodbw/install_mongodb_ubuntu.py +0 -100
  104. atomicshop/wrappers/mongodbw/install_mongodb_win.py +0 -244
  105. atomicshop/wrappers/process_wrapper_pbtk.py +0 -16
  106. atomicshop/wrappers/socketw/get_process.py +0 -123
  107. atomicshop/wrappers/wslw.py +0 -192
  108. atomicshop-3.3.8.dist-info/entry_points.txt +0 -2
  109. /atomicshop/{addons → a_mains/addons}/PlayWrightCodegen.cmd +0 -0
  110. /atomicshop/{addons → a_mains/addons}/ScriptExecution.cmd +0 -0
  111. /atomicshop/{addons → a_mains/addons}/inits/init_to_import_all_modules.py +0 -0
  112. /atomicshop/{addons → a_mains/addons}/process_list/ReadMe.txt +0 -0
  113. /atomicshop/{addons → a_mains/addons}/process_list/compile.cmd +0 -0
  114. /atomicshop/{addons → a_mains/addons}/process_list/compiled/Win10x64/process_list.dll +0 -0
  115. /atomicshop/{addons → a_mains/addons}/process_list/compiled/Win10x64/process_list.exp +0 -0
  116. /atomicshop/{addons → a_mains/addons}/process_list/compiled/Win10x64/process_list.lib +0 -0
  117. /atomicshop/{addons → a_mains/addons}/process_list/process_list.cpp +0 -0
  118. {atomicshop-3.3.8.dist-info → atomicshop-3.10.0.dist-info}/WHEEL +0 -0
  119. {atomicshop-3.3.8.dist-info → atomicshop-3.10.0.dist-info}/licenses/LICENSE.txt +0 -0
  120. {atomicshop-3.3.8.dist-info → atomicshop-3.10.0.dist-info}/top_level.txt +0 -0
@@ -1,449 +0,0 @@
1
- import sys
2
- import os
3
- import subprocess
4
- import getpass
5
- import tempfile
6
- import textwrap
7
- from pathlib import Path
8
-
9
- from ... import process, filesystem
10
- from ...permissions import permissions, ubuntu_permissions
11
- from ...print_api import print_api
12
- from .. import ubuntu_terminal
13
-
14
-
15
- PREPARATION_OUTPUT_DIR: str = str(Path(__file__).parent / "offline-bundle")
16
- PREPARATION_OUTPUT_ZIP: str = f"{PREPARATION_OUTPUT_DIR}.zip"
17
- GET_DOCKER_URL: str = "https://get.docker.com"
18
-
19
-
20
- def is_docker_installed():
21
- """
22
- The function will check if docker is installed.
23
- :return: bool.
24
- """
25
-
26
- try:
27
- # Run the command 'docker --version'
28
- result = subprocess.run(['docker', '--version'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
29
-
30
- # Check if the command was successful
31
- if result.returncode == 0:
32
- message = f"Docker is installed. Version: {result.stdout.strip()}"
33
- print_api(message, color='green')
34
- return True
35
- else:
36
- print_api("Docker is not installed.")
37
- return False
38
- except FileNotFoundError:
39
- print_api("Docker command not found. Docker is not installed.")
40
- return False
41
-
42
-
43
- def add_current_user_to_docker_group(print_kwargs: dict = None):
44
- """
45
- The function will add the current user to the docker group.
46
-
47
- :param print_kwargs: dict, the print arguments.
48
- :return:
49
- """
50
- # Check if current user that executed the script is a sudo user. If not, use the current user.
51
- sudo_executer_username: str = ubuntu_permissions.get_sudo_executer_username()
52
- if sudo_executer_username:
53
- current_user = sudo_executer_username
54
- else:
55
- current_user = getpass.getuser()
56
-
57
- # Add the current user to the docker group.
58
- # subprocess.check_call(['sudo', 'usermod', '-aG', 'docker', current_user])
59
- command = f"sudo usermod -aG docker {current_user}"
60
- # Execute the command
61
- subprocess.run(command, shell=True, capture_output=True, text=True)
62
-
63
- # Check if the user was added to the docker group.
64
- result = subprocess.run(['groups', current_user], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
65
- if 'docker' in result.stdout:
66
- print_api(f"User {current_user} was added to the docker group.", color='green', **(print_kwargs or {}))
67
- return True
68
- else:
69
- print_api(f"User {current_user} was not added to the docker group. Try executing with sudo", color='red',
70
- **(print_kwargs or {}))
71
- return False
72
-
73
-
74
- def install_docker_ubuntu(
75
- use_docker_installer: bool = True,
76
- rootless: bool = False,
77
- add_current_user_to_docker_group_bool: bool = False
78
- ) -> int:
79
- """
80
- The function will install docker on ubuntu.
81
- Note: If you want to install docker in rootless mode, you need to run the script without sudo.
82
-
83
- :param rootless: bool, if True, the rootless installation will be performed.
84
- Meaning, you will be able to run the 'docker' command without sudo and you will not need to add the
85
- current user to the docker group.
86
- :param use_docker_installer: bool, if True, the docker installer will be used.
87
- If False, the docker will be installed using the apt package manager, custom repo and keyring.
88
- :param add_current_user_to_docker_group_bool: bool, if True, the current user will be added to the docker group.
89
- So the user will be able to run the 'docker' command without sudo. If you install docker in rootless mode
90
- this is not needed.
91
-
92
- Usage in main.py (run with sudo):
93
- import sys
94
- from atomicshop.wrappers.dockerw import install_docker
95
-
96
-
97
- def main():
98
- execution_result: int = install_docker.install_docker_ubuntu()
99
- return execution_result
100
-
101
-
102
- if __name__ == '__main__':
103
- sys.exit(main())
104
- """
105
-
106
- if rootless and permissions.is_admin():
107
- print_api('Rootless installation requires running the script without sudo.', color='red')
108
- sys.exit()
109
-
110
- if use_docker_installer:
111
- if not ubuntu_terminal.is_executable_exists('curl'):
112
- print_api('curl is not installed, installing...', color='yellow')
113
- ubuntu_terminal.update_system_packages()
114
- ubuntu_terminal.install_packages(['curl'])
115
-
116
- # Use the docker installer script.
117
- # The script will install docker and add the current user to the docker group.
118
- # The script will also install docker-compose and docker-buildx.
119
- # process.execute_script('curl -fsSL https://get.docker.com -o get-docker.sh && sh get-docker.sh', shell=True)
120
- process.execute_script('curl -fsSL https://get.docker.com | sh', shell=True)
121
- # subprocess.run("curl -fsSL https://get.docker.com | sh", shell=True, check=True)
122
- # process.execute_script('curl -fsSL https://get.docker.com -o get-docker.sh', shell=True)
123
- # process.execute_script('sh get-docker.sh', shell=True)
124
- # filesystem.remove_file('get-docker.sh')
125
- else:
126
- # Remove the existing keyrings, so we will not be asked to overwrite it if it exists.
127
- docker_keyring_file_path: str = "/etc/apt/keyrings/docker.gpg"
128
- filesystem.remove_file(docker_keyring_file_path)
129
-
130
- script = f"""
131
- # Step 1: Set up Docker's apt repository
132
- sudo apt-get update
133
- sudo apt-get install -y ca-certificates curl gnupg
134
- sudo install -m 0755 -d /etc/apt/keyrings
135
- curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg
136
- sudo chmod a+r /etc/apt/keyrings/docker.gpg
137
-
138
- # Add the repository to Apt sources
139
- echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu $(. /etc/os-release && echo "$VERSION_CODENAME") stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
140
- sudo apt-get update
141
-
142
- # Step 2: Install the Docker packages
143
- sudo apt-get install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin docker-ce-rootless-extras
144
-
145
- # Step 3: Verify the installation
146
- # sudo docker run hello-world
147
-
148
- # Add Privileges to run docker without sudo. Add current user to Docker superuser group.
149
- # sudo usermod -aG docker $USER
150
- """
151
-
152
- process.execute_script(script, shell=True)
153
-
154
- if rootless:
155
- # Install uidmap package.
156
- if not ubuntu_terminal.is_package_installed('uidmap'):
157
- print_api('uidmap is not installed, installing...', color='yellow')
158
- ubuntu_terminal.update_system_packages()
159
- ubuntu_terminal.install_packages(['uidmap'])
160
-
161
- with ubuntu_permissions.temporary_regular_permissions():
162
- # After 'get-docker.sh' execution, we will install docker in rootless mode.
163
- # process.execute_script('dockerd-rootless-setuptool.sh install', shell=True, as_regular_user=True)
164
- process.execute_script(
165
- '/usr/bin/dockerd-rootless-setuptool.sh install',
166
- as_regular_user=True,
167
- shell=True,
168
- executable=None)
169
-
170
- # Start and enable the docker service in user mode.
171
- docker_start_command = ubuntu_terminal.get_command_execution_as_sudo_executer(
172
- 'systemctl --user start docker.service')
173
- docker_enable_command = ubuntu_terminal.get_command_execution_as_sudo_executer(
174
- 'systemctl --user enable docker.service')
175
- print_api('Starting and enabling the docker service in user mode...')
176
- process.execute_script(docker_start_command, shell=True, executable=None)
177
- process.execute_script(docker_enable_command, shell=True, executable=None)
178
-
179
- print_api('Executing "loginctl enable-linger" to enable Docker to run when the user is not logged in...')
180
- non_sudo_executer = ubuntu_permissions.get_sudo_executer_username()
181
- # Enable lingering so Docker runs when the user is not logged in
182
- process.execute_script(f'sudo loginctl enable-linger {non_sudo_executer}', shell=True)
183
-
184
- print_api('Adding $HOME/bin to your PATH...')
185
- # Add $HOME/bin to your PATH if it's not already there.
186
- with ubuntu_permissions.temporary_regular_permissions():
187
- ubuntu_terminal.add_path_to_bashrc(as_regular_user=True)
188
-
189
- # Add appropriate permissions to the docker socket, so the user can run docker commands without sudo in python.
190
- # with open('/etc/profile.d/docker_vars.sh', 'w') as file:
191
- # file.write('export DOCKER_HOST=unix:///run/user/1000/docker.sock')
192
-
193
- # Since we are installing the rootless mode, this script runs without sudo, so to add the DOCKER_HOST variable
194
- # to the environment, we need to add it to the /etc/profile.d/docker_vars.sh file with sudo.
195
- command = "echo 'export DOCKER_HOST=unix:///run/user/1000/docker.sock' | sudo tee /etc/profile.d/docker_vars.sh"
196
- subprocess.run(command, shell=True, check=True)
197
-
198
- # ubuntu_terminal.add_line_to_bashrc(
199
- # 'export DOCKER_HOST=unix:///run/user/1000/docker.sock', as_regular_user=True)
200
- # process.execute_script('export DOCKER_HOST=unix:///run/user/1000/docker.sock', shell=True)
201
- # Restart shell.
202
- # process.execute_script('source ~/.bashrc', shell=True)
203
-
204
- if add_current_user_to_docker_group_bool:
205
- # Check if current user that executed the script is a sudo user. If not, use the current user.
206
- # Add the current user to the docker group.
207
- add_current_user_to_docker_group()
208
-
209
- # Verify the installation.
210
- result: list = process.execute_with_live_output('sudo docker run hello-world')
211
- else:
212
- result: list = process.execute_with_live_output('docker run hello-world')
213
-
214
- print_api('\n'.join(result))
215
-
216
- if 'Hello from Docker!' in '\n'.join(result):
217
- print_api('Docker installed successfully.', color='green')
218
- return 0
219
- else:
220
- print_api('Docker installation failed.', color='red')
221
- print_api('Please check the logs above for more information.', color='red')
222
- return 1
223
-
224
-
225
- def prepare_offline_installation_bundle():
226
- # The Bash script in a single triple-quoted string - this is to easier copy-paste it if needed to run directly.
227
- bash_script = textwrap.dedent(r"""#!/usr/bin/env bash
228
- #
229
- # Build an offline-install bundle for Docker Engine on Ubuntu 24.04 LTS.
230
- # The package list is auto-discovered from `get.docker.com --dry-run`.
231
- #
232
- # sudo ./prepare_docker_offline.sh [/path/to/output_dir]
233
- #
234
- set -Eeuo pipefail
235
-
236
- ################################################################################
237
- # CLI PARAMETERS
238
- # $1 → OUTDIR (already supported: where to build the bundle)
239
- # $2 → GET_DOCKER_URL (defaults to https://get.docker.com)
240
- # $3 → OUTPUT_ZIP (defaults to "$OUTDIR.zip")
241
- ################################################################################
242
- OUTDIR="${1:-"$PWD/offline-bundle"}"
243
- GET_DOCKER_URL="${2:-https://get.docker.com}"
244
- OUTPUT_ZIP="${3:-$OUTDIR.zip}"
245
-
246
- die() { echo "ERROR: $*" >&2; exit 1; }
247
- need_root() { [[ $EUID -eq 0 ]] || die "Run as root (use sudo)"; }
248
- need_cmd() {
249
- local cmd=$1
250
- local pkg=${2:-$1} # default package == command
251
- if ! command -v "$cmd" &>/dev/null; then
252
- echo "[*] $cmd not found – installing $pkg ..."
253
- apt-get update -qq
254
- DEBIAN_FRONTEND=noninteractive \
255
- apt-get install -y --no-install-recommends "$pkg" || \
256
- die "Unable to install required package: $pkg"
257
- fi
258
- }
259
-
260
- need_root
261
- need_cmd curl
262
-
263
- echo "[*] Discovering package list via get.docker.com --dry-run ..."
264
- DRY_LOG=$(curl -fsSL "$GET_DOCKER_URL" | bash -s -- --dry-run)
265
-
266
- echo "[*] Determining package list via --dry-run ..."
267
- PKGS=$(printf '%s\n' "$DRY_LOG" | sed -n 's/.* install \(.*\) >\/dev\/null.*/\1/p')
268
-
269
- if ! grep -q '\S' <<< "$PKGS"; then
270
- echo "No packages detected in dry-run output – aborting." >&2
271
- exit 1
272
- fi
273
-
274
- echo "[*] Install Docker before preparing the offline bundle."
275
- curl -fsSL "$GET_DOCKER_URL" | sh
276
-
277
- mkdir -p "$OUTDIR"/packages
278
- echo "[*] Output directory: $OUTDIR"
279
-
280
- echo "Packages to install:"
281
- echo "$PKGS"
282
-
283
- echo "[*] Downloading packages and all dependencies …"
284
- apt-get update -qq
285
- apt-get clean
286
- mkdir -p /var/cache/apt/archives/partial
287
- apt-get -y --download-only --reinstall install $PKGS
288
- cp -v /var/cache/apt/archives/*.deb "$OUTDIR/packages/"
289
- echo "[*] $(ls "$OUTDIR/packages" | wc -l) .deb files written to packages/"
290
-
291
- echo "[*] Building local Packages.gz index …"
292
- pushd "$OUTDIR/packages" >/dev/null
293
- for deb in *.deb; do
294
- dpkg-deb -f "$deb" Package
295
- done | awk '{printf "%s\tmisc\toptional\n",$1}' > override
296
- apt-ftparchive packages . override | tee Packages | gzip -9c > Packages.gz
297
- popd >/dev/null
298
-
299
-
300
- echo ">> Checking for Docker ..."
301
- command -v docker >/dev/null 2>&1 || { echo "Docker not found."; exit 1; }
302
-
303
- # Pack final bundle
304
- echo "[*] Creating a zip archive ..."
305
- parent_dir=$(dirname "$OUTDIR")
306
- base_name=$(basename "$OUTDIR")
307
-
308
- # Create new shell, cd into the directory, and zip the contents. So that the zip file will not contain the full path.
309
- (
310
- cd "$parent_dir"
311
- zip -rq "$OUTPUT_ZIP" "$base_name"
312
- )
313
-
314
- rm -rf "$OUTDIR"
315
- echo "Docker offline bundle created at $OUTPUT_ZIP"
316
- echo
317
- echo "Copy the zip file and the offline installation python script to the target machine and execute."
318
- """)
319
-
320
- # Write it to a secure temporary file.
321
- with tempfile.NamedTemporaryFile('w', delete=False, suffix='.sh') as f:
322
- f.write(bash_script)
323
- temp_path = f.name
324
- os.chmod(temp_path, 0o755) # make it executable
325
-
326
- cmd = [
327
- "sudo", "bash", temp_path,
328
- PREPARATION_OUTPUT_DIR,
329
- GET_DOCKER_URL,
330
- PREPARATION_OUTPUT_ZIP,
331
- ]
332
-
333
- # Run it and stream output live.
334
- try:
335
- subprocess.run(cmd, check=True)
336
- finally:
337
- # 5. Clean up the temp file unless you want to inspect it.
338
- os.remove(temp_path)
339
-
340
-
341
- def install_offline_installation_bundle():
342
- bash_script = textwrap.dedent(r"""#!/usr/bin/env bash
343
- # Offline installer for the Docker bundle produced by prepare_docker_offline.sh
344
- set -euo pipefail
345
-
346
- die() { echo "ERROR: $*" >&2; exit 1; }
347
- need_root() { [[ $EUID -eq 0 ]] || die "Run as root (use sudo)"; }
348
-
349
- need_root
350
-
351
- # ------------------------------------------------------------------------------
352
- # Paths
353
- # ------------------------------------------------------------------------------
354
- BUNDLE_ZIP="${1:-"$PWD/offline-bundle.zip"}"
355
-
356
- BUNDLE_DIR="${BUNDLE_ZIP%.zip}" # remove .zip suffix
357
- REPO_DIR="$BUNDLE_DIR/packages" # contains *.deb + Packages
358
- OFFLINE_LIST="/etc/apt/sources.list.d/docker-offline.list"
359
-
360
- # Extract zip archive if it exists
361
- if [[ -f "$BUNDLE_ZIP" ]]; then
362
- echo "[*] Extracting offline bundle from $BUNDLE_ZIP ..."
363
- mkdir -p "$BUNDLE_DIR"
364
- unzip -q "$BUNDLE_ZIP" -d "."
365
- else
366
- die "Bundle zip file '$BUNDLE_ZIP' not found. Provide a valid path."
367
- fi
368
-
369
- TEMP_PARTS="$(mktemp -d)" # empty dir ⇒ no extra lists
370
-
371
- # ------------------------------------------------------------------------------
372
- # Helper to clean up even if the script aborts
373
- # ------------------------------------------------------------------------------
374
- cleanup() {
375
- sudo rm -f "$OFFLINE_LIST"
376
- sudo rm -rf "$TEMP_PARTS"
377
- }
378
- trap cleanup EXIT
379
-
380
- # ------------------------------------------------------------------------------
381
- # 1. Add the local repository (trusted) as the *only* source we will use
382
- # ------------------------------------------------------------------------------
383
- echo "[*] Adding temporary APT source for the offline bundle …"
384
- echo "deb [trusted=yes] file:$REPO_DIR ./" | sudo tee "$OFFLINE_LIST" >/dev/null
385
-
386
- # Ensure plain index exists (APT always understands the un-compressed form)
387
- if [[ ! -f "$REPO_DIR/Packages" && -f "$REPO_DIR/Packages.gz" ]]; then
388
- gunzip -c "$REPO_DIR/Packages.gz" > "$REPO_DIR/Packages"
389
- fi
390
-
391
- # ------------------------------------------------------------------------------
392
- # 2. Update metadata – but ONLY from our offline list
393
- # ------------------------------------------------------------------------------
394
- echo "[*] Updating APT metadata – offline only …"
395
- sudo apt-get -o Dir::Etc::sourcelist="$OFFLINE_LIST" \
396
- -o Dir::Etc::sourceparts="$TEMP_PARTS" \
397
- -o APT::Get::List-Cleanup="0" \
398
- update -qq
399
-
400
- # ------------------------------------------------------------------------------
401
- # 3. Figure out which packages are inside the bundle
402
- # ------------------------------------------------------------------------------
403
- PKGS=$(awk '/^Package: /{print $2}' "$REPO_DIR/Packages")
404
-
405
- echo "[*] Installing:"
406
- printf ' • %s\n' $PKGS
407
-
408
- # ------------------------------------------------------------------------------
409
- # 4. Install them, again restricting APT to the offline repo only
410
- # ------------------------------------------------------------------------------
411
- sudo apt-get -y \
412
- -o Dir::Etc::sourcelist="$OFFLINE_LIST" \
413
- -o Dir::Etc::sourceparts="$TEMP_PARTS" \
414
- install $PKGS
415
-
416
- echo "[✓] Docker installed completely offline!"
417
-
418
- usage() {
419
- echo "Usage: $0 <image-archive.tar.gz>"
420
- exit 1
421
- }
422
-
423
- echo ">> Checking for Docker ..."
424
- command -v docker >/dev/null 2>&1 || {
425
- echo "Docker is not installed; install Docker and try again."
426
- exit 1
427
- }
428
-
429
- echo "Removing extracted files..."
430
- rm -rf "$BUNDLE_DIR"
431
- """)
432
-
433
- # Write it to a secure temporary file.
434
- with tempfile.NamedTemporaryFile('w', delete=False, suffix='.sh') as f:
435
- f.write(bash_script)
436
- temp_path = f.name
437
- os.chmod(temp_path, 0o755) # make it executable
438
-
439
- cmd = [
440
- "sudo", "bash", temp_path,
441
- PREPARATION_OUTPUT_ZIP, # $1 BUNDLE_ZIP
442
- ]
443
-
444
- # 4. Run it and stream output live.
445
- try:
446
- subprocess.run(cmd, check=True)
447
- finally:
448
- # 5. Clean up the temp file unless you want to inspect it.
449
- os.remove(temp_path)
@@ -1,233 +0,0 @@
1
- import sys
2
-
3
- from ...print_api import print_api
4
- from ... import process
5
- from ...permissions import permissions
6
- from .. import ubuntu_terminal
7
- from . import config_basic, elastic_infra
8
-
9
-
10
- def install_elastic_kibana_ubuntu(install_elastic: bool = True, install_kibana: bool = True):
11
- """
12
- The function will install docker on ubuntu.
13
-
14
- :param install_elastic: bool, if True, install Elasticsearch.
15
- :param install_kibana: bool, if True, install Kibana.
16
-
17
- Usage in main.py (run with sudo):
18
- from atomicshop.wrappers.elasticw import install_elastic
19
-
20
-
21
- def main():
22
- install_elastic.install_elastic_ubuntu()
23
-
24
-
25
- if __name__ == '__main__':
26
- main()
27
- """
28
-
29
- # This is pure bash script.
30
- """
31
- #!/bin/bash
32
-
33
- # Color text in red.
34
- echo_red() {
35
- local color="\e[31m" # Red color
36
- local reset="\e[0m" # Reset formatting
37
- echo -e "${color}$1${reset}"
38
- }
39
-
40
- # Function to check if a service is running
41
- check_service_running() {
42
- local service_name=$1
43
- local status=$(systemctl is-active "$service_name")
44
-
45
- if [ "$status" == "active" ]; then
46
- echo "$service_name service is active and running."
47
- return 0
48
- else
49
- echo_red "$service_name service is not running or has failed. Status: $service_status, Failed: $service_failed"
50
- return 1
51
- fi
52
- }
53
-
54
- # Update and upgrade system packages
55
- sudo apt-get update && sudo apt-get upgrade -y
56
-
57
- # Install necessary dependencies
58
- sudo apt-get install apt-transport-https openjdk-11-jdk wget -y
59
-
60
- # Download and install the GPG signing key
61
- wget -qO - https://artifacts.elastic.co/GPG-KEY-elasticsearch | gpg --dearmor | sudo tee /usr/share/keyrings/elasticsearch-keyring.gpg > /dev/null
62
-
63
- # Add the Elastic repository to the system
64
- echo "deb [signed-by=/usr/share/keyrings/elasticsearch-keyring.gpg] https://artifacts.elastic.co/packages/8.x/apt stable main" | sudo tee /etc/apt/sources.list.d/elastic-8.x.list
65
-
66
- # Update package index
67
- sudo apt-get update
68
-
69
- # Install Elasticsearch
70
- sudo apt-get install elasticsearch -y
71
-
72
- # Path to the Elasticsearch configuration file
73
- CONFIG_FILE="/etc/elasticsearch/elasticsearch.yml"
74
-
75
- # Check if the configuration file exists
76
- if [ ! -f "$CONFIG_FILE" ]; then
77
- echo "Configuration file does not exist at $CONFIG_FILE."
78
- exit 1
79
- fi
80
-
81
- # Function to check the setting in the configuration file
82
- check_setting() {
83
- if grep -q "^xpack.security.enabled: false" "$CONFIG_FILE"; then
84
- echo "The setting is confirmed to be 'xpack.security.enabled: false'."
85
- else
86
- echo "Failed to set 'xpack.security.enabled: false'."
87
- exit 1
88
- fi
89
- }
90
-
91
- # Check if 'xpack.security.enabled' is set to 'false'
92
- if grep -q "^xpack.security.enabled: false" "$CONFIG_FILE"; then
93
- echo "The setting is already set to false."
94
- elif grep -q "^xpack.security.enabled: true" "$CONFIG_FILE"; then
95
- # If the setting is true, change it to false
96
- sudo sed -i 's/^xpack.security.enabled: true/xpack.security.enabled: false/' "$CONFIG_FILE"
97
- echo "Changed xpack.security.enabled to false."
98
- check_setting
99
- else
100
- # If the setting doesn't exist, add it
101
- echo "xpack.security.enabled: false" | sudo tee -a "$CONFIG_FILE" > /dev/null
102
- echo "Added xpack.security.enabled: false to the configuration."
103
- check_setting
104
- fi
105
-
106
- # Start and enable Elasticsearch service
107
- sudo systemctl start elasticsearch
108
- sudo systemctl enable elasticsearch
109
-
110
- echo "Waiting 30 seconds for program to start before availability check..."
111
- sleep 30
112
-
113
- # Check if Elasticsearch service is running
114
- if ! check_service_running "elasticsearch"; then
115
- echo "Elasticsearch service failed to start. Exiting."
116
- exit 1
117
- fi
118
-
119
- # Function to check if Elasticsearch is up and running
120
- check_elasticsearch() {
121
- max_attempts=5
122
- wait_seconds=10
123
-
124
- for ((i=1; i<=max_attempts; i++)); do
125
- echo "Checking if Elasticsearch is running (Attempt $i/$max_attempts)..."
126
-
127
- # Using curl to get the HTTP status code
128
- status=$(curl --write-out %{http_code} --silent --output /dev/null http://localhost:9200)
129
-
130
- if [ "$status" -eq 200 ]; then
131
- echo "Elasticsearch is up and running."
132
- return 0
133
- else
134
- echo "Elasticsearch is not running. Status code: $status"
135
- fi
136
-
137
- echo "Waiting for Elasticsearch to start..."
138
- sleep $wait_seconds
139
- done
140
-
141
- echo "Elasticsearch did not start within the expected time."
142
- return 1
143
- }
144
-
145
- # Check if Elasticsearch is running
146
- if ! check_elasticsearch; then
147
- echo "Elasticsearch failed to start. Exiting."
148
- exit 1
149
- fi
150
-
151
- # Install Kibana
152
- sudo apt-get install kibana -y
153
-
154
- # Start and enable Kibana service
155
- sudo systemctl start kibana
156
- sudo systemctl enable kibana
157
-
158
- echo "Waiting 30 seconds for program to start before availability check..."
159
- sleep 30
160
-
161
- # Check if Kibana service is running
162
- if ! check_service_running "kibana"; then
163
- echo "Kibana service failed to start. Exiting."
164
- exit 1
165
- fi
166
-
167
- # Print status
168
- echo "Elasticsearch and Kibana installation completed."
169
- echo "Elasticsearch is running on http://localhost:9200"
170
- echo "Kibana is running on http://localhost:5601"
171
- """
172
-
173
- if not install_elastic and not install_kibana:
174
- raise ValueError("At least one of the services (Elasticsearch or Kibana) must be installed.")
175
-
176
- # Update and upgrade system packages.
177
- ubuntu_terminal.update_system_packages()
178
- ubuntu_terminal.upgrade_system_packages()
179
-
180
- # Install necessary dependencies.
181
- ubuntu_terminal.install_packages(config_basic.UBUNTU_DEPENDENCY_PACKAGES)
182
-
183
- # Install the GPG key and add elastic repository.
184
- script = f"""
185
- # Download and install the GPG signing key
186
- wget -qO - https://artifacts.elastic.co/GPG-KEY-elasticsearch | gpg --dearmor | sudo tee /usr/share/keyrings/elasticsearch-keyring.gpg > /dev/null
187
-
188
- # Add the Elastic repository to the system
189
- echo "deb [signed-by=/usr/share/keyrings/elasticsearch-keyring.gpg] https://artifacts.elastic.co/packages/8.x/apt stable main" | sudo tee /etc/apt/sources.list.d/elastic-8.x.list
190
- """
191
- process.execute_script(script, shell=True)
192
-
193
- # Update system with elastic search packages.
194
- ubuntu_terminal.update_system_packages()
195
-
196
- if install_elastic:
197
- # Install Elasticsearch.
198
- ubuntu_terminal.install_packages([config_basic.UBUNTU_ELASTIC_PACKAGE_NAME])
199
-
200
- if not permissions.is_admin():
201
- print_api("This script requires root privileges...", color='red')
202
- sys.exit(1)
203
-
204
- # Check if the configuration file exists.
205
- elastic_infra.is_elastic_config_file_exists(exit_on_error=True, output_message=True)
206
-
207
- # Check if the specific setting exists or not and set it to false.
208
- elastic_infra.modify_xpack_security_setting(setting=False, output_message=True)
209
-
210
- # Check if the setting was really set to false.
211
- if elastic_infra.check_xpack_security_setting() is False:
212
- print_api(f"The setting is confirmed to be [{config_basic.XPACK_SECURITY_SETTING_NAME}: false].")
213
- else:
214
- print_api(f"Failed to set [{config_basic.XPACK_SECURITY_SETTING_NAME}: false].")
215
- sys.exit(1)
216
-
217
- elastic_infra.start_elastic_and_check_service_availability()
218
-
219
- print_api("Creating custom JVM options file with 4GB memory usage.")
220
- elastic_infra.create_jvm_options_custom_4gb_memory_heap_file()
221
-
222
- if install_kibana:
223
- # Install Kibana.
224
- ubuntu_terminal.install_packages([config_basic.UBUNTU_KIBANA_PACKAGE_NAME])
225
-
226
- # Start and enable Kibana service.
227
- elastic_infra.start_kibana_and_check_service_availability()
228
-
229
- print_api("Installation completed.", color='green')
230
- if install_elastic:
231
- print_api(f"Default Elasticsearch on {config_basic.DEFAULT_ELASTIC_URL}")
232
- if install_kibana:
233
- print_api(f"Default Kibana on {config_basic.DEFAULT_KIBANA_URL}")