atomicshop 3.3.8__py3-none-any.whl → 3.10.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of atomicshop might be problematic. Click here for more details.
- atomicshop/__init__.py +1 -1
- atomicshop/a_mains/get_local_tcp_ports.py +85 -0
- atomicshop/a_mains/install_ca_certificate.py +172 -0
- atomicshop/a_mains/process_from_port.py +119 -0
- atomicshop/a_mains/set_default_dns_gateway.py +90 -0
- atomicshop/basics/strings.py +1 -1
- atomicshop/certificates.py +2 -2
- atomicshop/dns.py +26 -28
- atomicshop/etws/traces/trace_tcp.py +1 -2
- atomicshop/mitm/centered_settings.py +133 -0
- atomicshop/mitm/config_static.py +22 -44
- atomicshop/mitm/connection_thread_worker.py +383 -165
- atomicshop/mitm/engines/__parent/recorder___parent.py +1 -1
- atomicshop/mitm/engines/__parent/requester___parent.py +1 -1
- atomicshop/mitm/engines/__parent/responder___parent.py +15 -2
- atomicshop/mitm/engines/create_module_template.py +1 -2
- atomicshop/mitm/import_config.py +91 -89
- atomicshop/mitm/initialize_engines.py +1 -2
- atomicshop/mitm/message.py +5 -4
- atomicshop/mitm/mitm_main.py +238 -122
- atomicshop/mitm/recs_files.py +61 -5
- atomicshop/mitm/ssh_tester.py +82 -0
- atomicshop/mitm/statistic_analyzer.py +33 -12
- atomicshop/mitm/statistic_analyzer_helper/moving_average_helper.py +104 -31
- atomicshop/networks.py +160 -92
- atomicshop/package_mains_processor.py +84 -0
- atomicshop/permissions/ubuntu_permissions.py +47 -0
- atomicshop/print_api.py +3 -5
- atomicshop/process.py +11 -4
- atomicshop/python_functions.py +23 -108
- atomicshop/speech_recognize.py +8 -0
- atomicshop/ssh_remote.py +140 -164
- atomicshop/web.py +63 -22
- atomicshop/web_apis/google_llm.py +22 -14
- atomicshop/wrappers/ctyping/msi_windows_installer/cabs.py +2 -1
- atomicshop/wrappers/ctyping/msi_windows_installer/extract_msi_main.py +2 -1
- atomicshop/wrappers/dockerw/dockerw.py +2 -2
- atomicshop/wrappers/elasticsearchw/config_basic.py +0 -12
- atomicshop/wrappers/elasticsearchw/elastic_infra.py +0 -190
- atomicshop/wrappers/factw/install/pre_install_and_install_before_restart.py +5 -5
- atomicshop/wrappers/githubw.py +180 -68
- atomicshop/wrappers/loggingw/consts.py +1 -1
- atomicshop/wrappers/loggingw/handlers.py +1 -1
- atomicshop/wrappers/loggingw/loggingw.py +20 -4
- atomicshop/wrappers/loggingw/reading.py +18 -0
- atomicshop/wrappers/mongodbw/mongo_infra.py +0 -38
- atomicshop/wrappers/netshw.py +124 -3
- atomicshop/wrappers/playwrightw/scenarios.py +1 -1
- atomicshop/wrappers/powershell_networking.py +80 -0
- atomicshop/wrappers/psutilw/psutil_networks.py +9 -0
- atomicshop/wrappers/pywin32w/win_event_log/fetch.py +174 -0
- atomicshop/wrappers/pywin32w/win_event_log/subscribes/process_create.py +3 -105
- atomicshop/wrappers/pywin32w/win_event_log/subscribes/process_terminate.py +3 -57
- atomicshop/wrappers/pywin32w/wmis/win32_networkadapterconfiguration.py +12 -27
- atomicshop/wrappers/pywin32w/wmis/win32networkadapter.py +15 -9
- atomicshop/wrappers/socketw/certificator.py +19 -9
- atomicshop/wrappers/socketw/creator.py +101 -14
- atomicshop/wrappers/socketw/dns_server.py +17 -5
- atomicshop/wrappers/socketw/exception_wrapper.py +21 -16
- atomicshop/wrappers/socketw/process_getter.py +86 -0
- atomicshop/wrappers/socketw/receiver.py +29 -9
- atomicshop/wrappers/socketw/sender.py +10 -9
- atomicshop/wrappers/socketw/sni.py +31 -10
- atomicshop/wrappers/socketw/{base.py → socket_base.py} +33 -1
- atomicshop/wrappers/socketw/socket_client.py +11 -10
- atomicshop/wrappers/socketw/socket_wrapper.py +125 -32
- atomicshop/wrappers/socketw/ssl_base.py +6 -2
- atomicshop/wrappers/ubuntu_terminal.py +21 -18
- atomicshop/wrappers/win_auditw.py +189 -0
- {atomicshop-3.3.8.dist-info → atomicshop-3.10.0.dist-info}/METADATA +25 -30
- {atomicshop-3.3.8.dist-info → atomicshop-3.10.0.dist-info}/RECORD +83 -109
- atomicshop/_basics_temp.py +0 -101
- atomicshop/a_installs/ubuntu/docker_rootless.py +0 -11
- atomicshop/a_installs/ubuntu/docker_sudo.py +0 -11
- atomicshop/a_installs/ubuntu/elastic_search_and_kibana.py +0 -10
- atomicshop/a_installs/ubuntu/mongodb.py +0 -12
- atomicshop/a_installs/win/fibratus.py +0 -9
- atomicshop/a_installs/win/mongodb.py +0 -9
- atomicshop/a_installs/win/wsl_ubuntu_lts.py +0 -10
- atomicshop/addons/a_setup_scripts/install_psycopg2_ubuntu.sh +0 -3
- atomicshop/addons/package_setup/CreateWheel.cmd +0 -7
- atomicshop/addons/package_setup/Setup in Edit mode.cmd +0 -6
- atomicshop/addons/package_setup/Setup.cmd +0 -7
- atomicshop/archiver/__init__.py +0 -0
- atomicshop/archiver/_search_in_zip.py +0 -189
- atomicshop/archiver/search_in_archive.py +0 -284
- atomicshop/archiver/sevenz_app_w.py +0 -86
- atomicshop/archiver/sevenzs.py +0 -73
- atomicshop/archiver/shutils.py +0 -34
- atomicshop/archiver/zips.py +0 -353
- atomicshop/file_types.py +0 -24
- atomicshop/pbtkmultifile_argparse.py +0 -88
- atomicshop/script_as_string_processor.py +0 -42
- atomicshop/ssh_scripts/process_from_ipv4.py +0 -37
- atomicshop/ssh_scripts/process_from_port.py +0 -27
- atomicshop/wrappers/_process_wrapper_curl.py +0 -27
- atomicshop/wrappers/_process_wrapper_tar.py +0 -21
- atomicshop/wrappers/dockerw/install_docker.py +0 -449
- atomicshop/wrappers/elasticsearchw/install_elastic.py +0 -233
- atomicshop/wrappers/ffmpegw.py +0 -125
- atomicshop/wrappers/fibratusw/__init__.py +0 -0
- atomicshop/wrappers/fibratusw/install.py +0 -80
- atomicshop/wrappers/mongodbw/install_mongodb_ubuntu.py +0 -100
- atomicshop/wrappers/mongodbw/install_mongodb_win.py +0 -244
- atomicshop/wrappers/process_wrapper_pbtk.py +0 -16
- atomicshop/wrappers/socketw/get_process.py +0 -123
- atomicshop/wrappers/wslw.py +0 -192
- atomicshop-3.3.8.dist-info/entry_points.txt +0 -2
- /atomicshop/{addons → a_mains/addons}/PlayWrightCodegen.cmd +0 -0
- /atomicshop/{addons → a_mains/addons}/ScriptExecution.cmd +0 -0
- /atomicshop/{addons → a_mains/addons}/inits/init_to_import_all_modules.py +0 -0
- /atomicshop/{addons → a_mains/addons}/process_list/ReadMe.txt +0 -0
- /atomicshop/{addons → a_mains/addons}/process_list/compile.cmd +0 -0
- /atomicshop/{addons → a_mains/addons}/process_list/compiled/Win10x64/process_list.dll +0 -0
- /atomicshop/{addons → a_mains/addons}/process_list/compiled/Win10x64/process_list.exp +0 -0
- /atomicshop/{addons → a_mains/addons}/process_list/compiled/Win10x64/process_list.lib +0 -0
- /atomicshop/{addons → a_mains/addons}/process_list/process_list.cpp +0 -0
- {atomicshop-3.3.8.dist-info → atomicshop-3.10.0.dist-info}/WHEEL +0 -0
- {atomicshop-3.3.8.dist-info → atomicshop-3.10.0.dist-info}/licenses/LICENSE.txt +0 -0
- {atomicshop-3.3.8.dist-info → atomicshop-3.10.0.dist-info}/top_level.txt +0 -0
atomicshop/archiver/zips.py
DELETED
|
@@ -1,353 +0,0 @@
|
|
|
1
|
-
import os
|
|
2
|
-
import time
|
|
3
|
-
import zipfile
|
|
4
|
-
from io import BytesIO
|
|
5
|
-
from typing import Union, Literal
|
|
6
|
-
|
|
7
|
-
from .. import filesystem, print_api
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
def is_zip_zipfile(file_object: Union[str, bytes]) -> bool:
|
|
11
|
-
"""
|
|
12
|
-
Function checks if the file is a zip file.
|
|
13
|
-
:param file_object: can be two types:
|
|
14
|
-
string, full path to the file.
|
|
15
|
-
bytes or BytesIO, the bytes of the file.
|
|
16
|
-
:return: boolean.
|
|
17
|
-
"""
|
|
18
|
-
|
|
19
|
-
try:
|
|
20
|
-
if isinstance(file_object, bytes):
|
|
21
|
-
with BytesIO(file_object) as file_object:
|
|
22
|
-
with zipfile.ZipFile(file_object) as zip_object:
|
|
23
|
-
zip_object.testzip()
|
|
24
|
-
return True
|
|
25
|
-
elif isinstance(file_object, str):
|
|
26
|
-
with zipfile.ZipFile(file_object) as zip_object:
|
|
27
|
-
zip_object.testzip()
|
|
28
|
-
return True
|
|
29
|
-
except zipfile.BadZipFile:
|
|
30
|
-
return False
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
def is_zip_magic_number(file_path: str) -> bool:
|
|
34
|
-
"""
|
|
35
|
-
Function checks if the file is a zip file using magic number.
|
|
36
|
-
:param file_path: string, full path to the file.
|
|
37
|
-
:return: boolean.
|
|
38
|
-
|
|
39
|
-
50 4B 03 04: This is the most common signature, found at the beginning of a ZIP file.
|
|
40
|
-
It signifies the start of a file within the ZIP archive and is present in almost all ZIP files.
|
|
41
|
-
Each file within the ZIP archive starts with this signature.
|
|
42
|
-
50 4B 05 06: This is the end of central directory record signature.
|
|
43
|
-
It's found at the end of a ZIP file and is essential for identifying the structure of the ZIP archive,
|
|
44
|
-
especially in cases where the file is split or is a multipart archive.
|
|
45
|
-
50 4B 07 08: This signature is used for spanned ZIP archives (also known as split or multi-volume ZIP archives).
|
|
46
|
-
It's found in the end of central directory locator for ZIP files that are split across multiple volumes.
|
|
47
|
-
"""
|
|
48
|
-
|
|
49
|
-
with open(file_path, 'rb') as file:
|
|
50
|
-
# Read the first 4 bytes of the file
|
|
51
|
-
signature = file.read(4)
|
|
52
|
-
|
|
53
|
-
# Check if the signature matches any of the ZIP signatures
|
|
54
|
-
return signature in [b'PK\x03\x04', b'PK\x05\x06', b'PK\x07\x08']
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
def extract_archive_with_zipfile(
|
|
58
|
-
archive_path: str,
|
|
59
|
-
extract_directory: str = None,
|
|
60
|
-
files_without_directories: bool = False,
|
|
61
|
-
remove_first_directory: bool = False,
|
|
62
|
-
print_kwargs: dict = None
|
|
63
|
-
) -> str:
|
|
64
|
-
"""
|
|
65
|
-
Function will extract the archive using standard library 'zipfile'.
|
|
66
|
-
This method preserves original date and time of the files inside the archive.
|
|
67
|
-
|
|
68
|
-
:param archive_path: string, full path to archived file.
|
|
69
|
-
:param extract_directory: string, full path to directory that the files will be extracted to.
|
|
70
|
-
If not specified, the files will be extracted to the same directory as the archived file, using the file name
|
|
71
|
-
without extension as the directory name.
|
|
72
|
-
:param files_without_directories: boolean, default 'False'.
|
|
73
|
-
'True': All the files in the archive will be extracted without subdirectories hierarchy.
|
|
74
|
-
Meaning, that if there are duplicate file names, the latest file with the same file name will overwrite
|
|
75
|
-
all the rest of the files with the same name.
|
|
76
|
-
'False': Subdirectory hierarchy will be preserved as it is currently in the archived file.
|
|
77
|
-
:param remove_first_directory: boolean, default is 'False'.
|
|
78
|
-
'True': all the files will be extracted without first directory in the hierarchy.
|
|
79
|
-
Example: package_some_name_1.1.1_build/subdir1/file.exe
|
|
80
|
-
Will be extracted as: subdir/file.exe
|
|
81
|
-
:param print_kwargs: dict, kwargs for print_api.
|
|
82
|
-
|
|
83
|
-
:return: string, full path to directory that the files were extracted to.
|
|
84
|
-
"""
|
|
85
|
-
|
|
86
|
-
if print_kwargs is None:
|
|
87
|
-
print_kwargs = dict()
|
|
88
|
-
|
|
89
|
-
# If 'extract_directory' is not specified, extract to the same directory as the archived file.
|
|
90
|
-
if extract_directory is None:
|
|
91
|
-
extract_directory = (
|
|
92
|
-
filesystem.get_file_directory(archive_path) + os.sep +
|
|
93
|
-
filesystem.get_file_name_without_extension(archive_path))
|
|
94
|
-
|
|
95
|
-
print_api.print_api(f'Extracting to directory: {extract_directory}', **print_kwargs)
|
|
96
|
-
|
|
97
|
-
# initiating the archived file path as 'zipfile.ZipFile' object.
|
|
98
|
-
with zipfile.ZipFile(archive_path) as zip_object:
|
|
99
|
-
# '.infolist()' method of the object contains all the directories and files that are in the archive including
|
|
100
|
-
# information about each one, like date and time of archiving.
|
|
101
|
-
for zip_info in zip_object.infolist():
|
|
102
|
-
# '.filename' attribute of the 'infolist()' method is relative path to each directory and file.
|
|
103
|
-
# If 'filename' ends with '/' it is a directory (it doesn't matter if it is windows or *nix)
|
|
104
|
-
# If so, skip current iteration.
|
|
105
|
-
if zip_info.filename[-1] == '/':
|
|
106
|
-
continue
|
|
107
|
-
|
|
108
|
-
if files_without_directories:
|
|
109
|
-
# Put into 'filename' the string that contains only the filename without subdirectories.
|
|
110
|
-
zip_info.filename = os.path.basename(zip_info.filename)
|
|
111
|
-
elif remove_first_directory:
|
|
112
|
-
# Cut the first directory from the filename.
|
|
113
|
-
zip_info.filename = zip_info.filename.split('/', maxsplit=1)[1]
|
|
114
|
-
|
|
115
|
-
print_api.print_api(f'Extracting: {zip_info.filename}', **print_kwargs)
|
|
116
|
-
|
|
117
|
-
# Extract current file from the archive using 'zip_info' of the current file with 'filename' that we
|
|
118
|
-
# updated under specified parameters to specified directory.
|
|
119
|
-
zip_object.extract(zip_info, extract_directory)
|
|
120
|
-
|
|
121
|
-
# === Change the date and time of extracted file from current time to the time specified in 'zip_info'.
|
|
122
|
-
# Get full path to extracted file.
|
|
123
|
-
extracted_file_path: str = extract_directory + os.sep + zip_info.filename
|
|
124
|
-
# Create needed datetime object with original archived datetime from 'zip_info.date_time'.
|
|
125
|
-
date_time = time.mktime(zip_info.date_time + (0, 0, -1))
|
|
126
|
-
# Using 'os' library, changed the datetime of the file to the object created in previous step.
|
|
127
|
-
os.utime(extracted_file_path, (date_time, date_time))
|
|
128
|
-
print_api.print_api('Extraction done.', color="green", **print_kwargs)
|
|
129
|
-
|
|
130
|
-
return extract_directory
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
def get_file_list_from_zip(file_path: str) -> list:
|
|
134
|
-
"""
|
|
135
|
-
Function returns the list of file names and their relative directories inside the zip file.
|
|
136
|
-
:param file_path: string, full path to the zip file.
|
|
137
|
-
:return: list of strings.
|
|
138
|
-
"""
|
|
139
|
-
|
|
140
|
-
with zipfile.ZipFile(file_path, 'r') as zip_object:
|
|
141
|
-
return zip_object.namelist()
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
def archive_directory(
|
|
145
|
-
directory_path: str,
|
|
146
|
-
compression: Literal[
|
|
147
|
-
'store',
|
|
148
|
-
'deflate',
|
|
149
|
-
'bzip2',
|
|
150
|
-
'lzma'] = 'deflate',
|
|
151
|
-
include_root_directory: bool = True,
|
|
152
|
-
remove_original: bool = False
|
|
153
|
-
) -> str:
|
|
154
|
-
"""
|
|
155
|
-
Function archives the directory.
|
|
156
|
-
:param directory_path: string, full path to the directory.
|
|
157
|
-
:param compression: string, default is 'deflate'.
|
|
158
|
-
'store': No compression.
|
|
159
|
-
'deflate': Standard ZIP compression.
|
|
160
|
-
'bzip2': BZIP2 compression.
|
|
161
|
-
Provides better compression than Deflate but is typically slower. This method might not be supported by
|
|
162
|
-
all ZIP utilities.
|
|
163
|
-
'lzma': LZMA compression.
|
|
164
|
-
high compression ratios but is also slower compared to Deflate. This method is less commonly used and
|
|
165
|
-
may not be supported by all ZIP utilities.
|
|
166
|
-
:param include_root_directory: boolean, default is 'True'.
|
|
167
|
-
'True': The root directory will be included in the archive.
|
|
168
|
-
'False': The root directory will not be included in the archive.
|
|
169
|
-
True is usually the case in most archiving utilities.
|
|
170
|
-
:param remove_original: boolean, default is 'False'. If 'True', the original directory will be removed.
|
|
171
|
-
:return: string, full path to the archived file.
|
|
172
|
-
"""
|
|
173
|
-
|
|
174
|
-
if compression == 'store':
|
|
175
|
-
compression_method = zipfile.ZIP_STORED
|
|
176
|
-
elif compression == 'deflate':
|
|
177
|
-
compression_method = zipfile.ZIP_DEFLATED
|
|
178
|
-
elif compression == 'bzip2':
|
|
179
|
-
compression_method = zipfile.ZIP_BZIP2
|
|
180
|
-
elif compression == 'lzma':
|
|
181
|
-
compression_method = zipfile.ZIP_LZMA
|
|
182
|
-
else:
|
|
183
|
-
raise ValueError(f"Unsupported compression method: {compression}")
|
|
184
|
-
|
|
185
|
-
archive_path: str = directory_path + '.zip'
|
|
186
|
-
with zipfile.ZipFile(archive_path, 'w', compression_method) as zip_object:
|
|
187
|
-
for root, _, files in os.walk(directory_path):
|
|
188
|
-
for file in files:
|
|
189
|
-
file_path = os.path.join(root, file)
|
|
190
|
-
|
|
191
|
-
# If including the root directory, use the relative path from the parent directory of the root
|
|
192
|
-
if include_root_directory:
|
|
193
|
-
arcname = os.path.relpath(file_path, os.path.dirname(directory_path))
|
|
194
|
-
else:
|
|
195
|
-
arcname = os.path.relpath(file_path, directory_path)
|
|
196
|
-
|
|
197
|
-
zip_object.write(file_path, arcname)
|
|
198
|
-
|
|
199
|
-
if remove_original:
|
|
200
|
-
filesystem.remove_directory(directory_path)
|
|
201
|
-
|
|
202
|
-
return archive_path
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
# def search_file_in_zip(
|
|
206
|
-
# file_path: str = None,
|
|
207
|
-
# file_bytes: bytes = None,
|
|
208
|
-
# file_names_to_search: list[str] = None,
|
|
209
|
-
# case_sensitive: bool = True,
|
|
210
|
-
# return_first_only: bool = False,
|
|
211
|
-
# return_empty_list_per_file_name: bool = False,
|
|
212
|
-
# recursive: bool = False,
|
|
213
|
-
# callback_functions: list = None,
|
|
214
|
-
# extract_file_to_path: str = None
|
|
215
|
-
# ) -> dict[str, list[bytes]]:
|
|
216
|
-
# """
|
|
217
|
-
# Function searches for the file names inside the zip file and returns a dictionary where the keys are the
|
|
218
|
-
# names of the callback functions and the values are lists of found file bytes.
|
|
219
|
-
# :param file_path: string, full path to the zip file.
|
|
220
|
-
# :param file_bytes: bytes, the bytes of the zip file.
|
|
221
|
-
# :param file_names_to_search: list of strings, the names of the files to search.
|
|
222
|
-
# :param case_sensitive: boolean, default is 'True'. Determines if file name search should be case sensitive.
|
|
223
|
-
# :param return_first_only: boolean, default is 'False'. Return only the first found file for each file name.
|
|
224
|
-
# :param return_empty_list_per_file_name: boolean, default is 'False'.
|
|
225
|
-
# True: Return empty list for each file name that wasn't found.
|
|
226
|
-
# False: Don't return empty list for each file name that wasn't found.
|
|
227
|
-
# :param recursive: boolean, default is 'False'. If True, search for file names recursively in nested zip files.
|
|
228
|
-
# :param callback_functions: list of callables, default is None. Each function takes a file name and should return a
|
|
229
|
-
# boolean that will tell the main function if this file is 'found' or not.
|
|
230
|
-
# :param extract_file_to_path: string, full path to the directory where the found files should be extracted.
|
|
231
|
-
# :return: dictionary of lists of bytes.
|
|
232
|
-
# """
|
|
233
|
-
#
|
|
234
|
-
# def get_unique_filename(directory, filename):
|
|
235
|
-
# """
|
|
236
|
-
# Generates a unique filename by appending a number if the file already exists.
|
|
237
|
-
# """
|
|
238
|
-
# name, ext = os.path.splitext(filename)
|
|
239
|
-
# counter = 1
|
|
240
|
-
# unique_filename = filename
|
|
241
|
-
# while os.path.exists(os.path.join(directory, unique_filename)):
|
|
242
|
-
# unique_filename = f"{name}_{counter}{ext}"
|
|
243
|
-
# counter += 1
|
|
244
|
-
# return unique_filename
|
|
245
|
-
#
|
|
246
|
-
# def is_zip_file(file, zip_obj):
|
|
247
|
-
# try:
|
|
248
|
-
# with zip_obj.open(file) as file_data:
|
|
249
|
-
# with zipfile.ZipFile(BytesIO(file_data.read())) as zip_file:
|
|
250
|
-
# if zip_file.testzip() is None: # No errors found
|
|
251
|
-
# return True
|
|
252
|
-
# except zipfile.BadZipFile:
|
|
253
|
-
# return False
|
|
254
|
-
# return False
|
|
255
|
-
#
|
|
256
|
-
# def match_file_name(target, current):
|
|
257
|
-
# if case_sensitive:
|
|
258
|
-
# return current.endswith(target)
|
|
259
|
-
# else:
|
|
260
|
-
# return current.lower().endswith(target.lower())
|
|
261
|
-
#
|
|
262
|
-
# def search_in_zip(zip_obj, file_names, results, found_set):
|
|
263
|
-
# for item in zip_obj.infolist():
|
|
264
|
-
# if item.filename.endswith('/'): # Skip directories
|
|
265
|
-
# continue
|
|
266
|
-
# is_nested_zip = recursive and is_zip_file(item.filename, zip_obj)
|
|
267
|
-
#
|
|
268
|
-
# with zip_obj.open(item) as file_data:
|
|
269
|
-
# archived_file_bytes = file_data.read()
|
|
270
|
-
#
|
|
271
|
-
# # This is needed to know if the file should be extracted to directory or not.
|
|
272
|
-
# should_extract = False
|
|
273
|
-
#
|
|
274
|
-
# name_matched = False
|
|
275
|
-
# if file_names is not None:
|
|
276
|
-
# name_matched = any(match_file_name(file_name, item.filename) for file_name in file_names)
|
|
277
|
-
# if name_matched:
|
|
278
|
-
# should_extract = True
|
|
279
|
-
#
|
|
280
|
-
# callback_matched = False
|
|
281
|
-
# if callback_functions:
|
|
282
|
-
# for callback in callback_functions:
|
|
283
|
-
# callback_result = callback(archived_file_bytes)
|
|
284
|
-
# if callback_result:
|
|
285
|
-
# callback_matched = True
|
|
286
|
-
# # Initialize key for callback function name if not present
|
|
287
|
-
# if callback.__name__ not in results:
|
|
288
|
-
# results[callback.__name__] = []
|
|
289
|
-
# file_info = {
|
|
290
|
-
# 'bytes': archived_file_bytes,
|
|
291
|
-
# 'name': item.filename,
|
|
292
|
-
# 'size': item.file_size,
|
|
293
|
-
# 'modified_time': item.date_time
|
|
294
|
-
# }
|
|
295
|
-
# results[callback.__name__].append(file_info)
|
|
296
|
-
# if return_first_only:
|
|
297
|
-
# found_set.add(item.filename)
|
|
298
|
-
#
|
|
299
|
-
# should_extract = True
|
|
300
|
-
# break # Stop checking other callbacks if one has found it
|
|
301
|
-
#
|
|
302
|
-
# if should_extract and extract_file_to_path:
|
|
303
|
-
# unique_filename = get_unique_filename(extract_file_to_path, os.path.basename(item.filename))
|
|
304
|
-
# with open(os.path.join(extract_file_to_path, unique_filename), 'wb') as f:
|
|
305
|
-
# f.write(archived_file_bytes)
|
|
306
|
-
#
|
|
307
|
-
# if not callback_matched:
|
|
308
|
-
# if is_nested_zip:
|
|
309
|
-
# # If the file is a nested ZIP and hasn't matched a callback, search recursively
|
|
310
|
-
# nested_zip_bytes = BytesIO(archived_file_bytes)
|
|
311
|
-
# with zipfile.ZipFile(nested_zip_bytes) as nested_zip:
|
|
312
|
-
# search_in_zip(nested_zip, file_names, results, found_set)
|
|
313
|
-
# elif name_matched:
|
|
314
|
-
# # Handle name match when no callbacks are provided or no callback matched
|
|
315
|
-
# if item.filename not in results:
|
|
316
|
-
# results[item.filename] = []
|
|
317
|
-
# file_info = {
|
|
318
|
-
# 'bytes': archived_file_bytes,
|
|
319
|
-
# 'name': item.filename,
|
|
320
|
-
# 'size': item.file_size,
|
|
321
|
-
# 'modified_time': item.date_time
|
|
322
|
-
# }
|
|
323
|
-
# results[item.filename].append(file_info)
|
|
324
|
-
# if return_first_only:
|
|
325
|
-
# found_set.add(item.filename) # Mark as found
|
|
326
|
-
#
|
|
327
|
-
# if file_names is not None and len(found_set) == len(file_names):
|
|
328
|
-
# return # All files found, stop searching
|
|
329
|
-
#
|
|
330
|
-
# if file_names_to_search is None and callback_functions is None:
|
|
331
|
-
# raise ValueError("Either file_names_to_search or callback_functions must be provided.")
|
|
332
|
-
#
|
|
333
|
-
# # Initialize results dictionary.
|
|
334
|
-
# if callback_functions:
|
|
335
|
-
# results = {callback.__name__: [] for callback in callback_functions}
|
|
336
|
-
# else:
|
|
337
|
-
# results = {}
|
|
338
|
-
#
|
|
339
|
-
# found_set = set()
|
|
340
|
-
# if file_bytes is not None:
|
|
341
|
-
# with zipfile.ZipFile(BytesIO(file_bytes), 'r') as zip_ref:
|
|
342
|
-
# search_in_zip(zip_ref, file_names_to_search, results, found_set)
|
|
343
|
-
# elif file_path is not None:
|
|
344
|
-
# with zipfile.ZipFile(file_path, 'r') as zip_ref:
|
|
345
|
-
# search_in_zip(zip_ref, file_names_to_search, results, found_set)
|
|
346
|
-
# else:
|
|
347
|
-
# raise ValueError("Either file_path or file_bytes must be provided.")
|
|
348
|
-
#
|
|
349
|
-
# if not return_empty_list_per_file_name:
|
|
350
|
-
# # Filter out keys with empty lists
|
|
351
|
-
# results = {key: value for key, value in results.items() if value}
|
|
352
|
-
#
|
|
353
|
-
# return results
|
atomicshop/file_types.py
DELETED
|
@@ -1,24 +0,0 @@
|
|
|
1
|
-
from typing import Union
|
|
2
|
-
|
|
3
|
-
import magic
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
def get_mime_type(file_object: Union[str, bytes]):
|
|
7
|
-
"""
|
|
8
|
-
Determine the MIME type of the given input.
|
|
9
|
-
The input can be a file path (string) or a bytes object.
|
|
10
|
-
|
|
11
|
-
:param file_object: File path as a string or bytes object.
|
|
12
|
-
:return: MIME type as a string.
|
|
13
|
-
"""
|
|
14
|
-
mime = magic.Magic(mime=True)
|
|
15
|
-
|
|
16
|
-
# Check if input is a file path (str) or bytes
|
|
17
|
-
if isinstance(file_object, str):
|
|
18
|
-
# Assuming input_data is a file path
|
|
19
|
-
return mime.from_file(file_object)
|
|
20
|
-
elif isinstance(file_object, bytes):
|
|
21
|
-
# Assuming input_data is bytes
|
|
22
|
-
return mime.from_buffer(file_object)
|
|
23
|
-
else:
|
|
24
|
-
raise TypeError("Input must be a file path (str) or bytes object.")
|
|
@@ -1,88 +0,0 @@
|
|
|
1
|
-
# v1.0.1 - 26.03.2023 23:50
|
|
2
|
-
import sys
|
|
3
|
-
import argparse
|
|
4
|
-
from argparse import RawTextHelpFormatter
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
class ArgparseWrapper:
|
|
8
|
-
"""
|
|
9
|
-
# Usage in the main:
|
|
10
|
-
args = ArgparseWrapper().parser_arguments
|
|
11
|
-
# Defining variables to each argument
|
|
12
|
-
input_path: str = args.input
|
|
13
|
-
output_path: str = args.output
|
|
14
|
-
"""
|
|
15
|
-
|
|
16
|
-
def __init__(self):
|
|
17
|
-
self.application_short: str = 'pbtkMultiFile'
|
|
18
|
-
self.application_full: str = 'pbtk Multi File wrapper'
|
|
19
|
-
self.version: str = '1.0.0'
|
|
20
|
-
self.description: str = 'Find ".proto" files in directory of binaries.'
|
|
21
|
-
self.description_full: str = f'{self.application_full} v{self.version}\n' \
|
|
22
|
-
f'Description: {self.description}'
|
|
23
|
-
self.usage_variable: str = "%(prog)s [-h] -in folder_with_binary_files -out full_path_to_output_files\n" \
|
|
24
|
-
"Input or Output path shouldn't end with separator. Example: '\\'."
|
|
25
|
-
self.parser_arguments = None
|
|
26
|
-
|
|
27
|
-
# Execute argparse.
|
|
28
|
-
self.define_argparse()
|
|
29
|
-
|
|
30
|
-
# Function to define argument parser
|
|
31
|
-
def define_argparse(self):
|
|
32
|
-
# Create the parser
|
|
33
|
-
# formatter_class=RawTextHelpFormatter: shows raw text and not the default argparse text parsing.
|
|
34
|
-
parser = argparse.ArgumentParser(description=self.description_full,
|
|
35
|
-
usage=self.usage_variable,
|
|
36
|
-
formatter_class=RawTextHelpFormatter)
|
|
37
|
-
|
|
38
|
-
# Add arguments
|
|
39
|
-
parser.add_argument('-in', '--input',
|
|
40
|
-
action='store', type=str, metavar='PATH_TO_FOLDER_WITH_BINARY_FILES',
|
|
41
|
-
required=True,
|
|
42
|
-
help='Provide full path to folder that contains binary files.')
|
|
43
|
-
parser.add_argument('-out', '--output', action='store', type=str, metavar='PATH_TO_SAVE_EXPORTED_FILES',
|
|
44
|
-
required=True,
|
|
45
|
-
help='Provide full path where you want to store exported file.')
|
|
46
|
-
|
|
47
|
-
# A problem before executing 'parse_args()'.
|
|
48
|
-
# If we get directory path as argument, on windows we can get a path that ends with backslash:
|
|
49
|
-
# C:\Users\user\documents\
|
|
50
|
-
# This is the default behaviour of windows when copying path of only the directory.
|
|
51
|
-
# When the path contains spaces, we need to pass it with double quotes:
|
|
52
|
-
# "C:\Users\user\documents\some folder name\another\"
|
|
53
|
-
# When python receives the arguments from CMD they get already parsed, meaning python can do nothing about it.
|
|
54
|
-
# From input:
|
|
55
|
-
# python_script.py -in "C:\some folder name\another\" -out "C:\some folder name\another1\"
|
|
56
|
-
# You will get output:
|
|
57
|
-
# ['python_script.py',
|
|
58
|
-
# '-in',
|
|
59
|
-
# 'C:\some folder name\another" -out C:\some',
|
|
60
|
-
# 'folder',
|
|
61
|
-
# 'name\another1"]
|
|
62
|
-
# 'parse_args()' gets its input from 'sys.argv'. Meaning, you will need to do some manipulations on that
|
|
63
|
-
# Before executing the argparse argument parsing.
|
|
64
|
-
# Probably the fix should be individual for each case.
|
|
65
|
-
# The simplest solution though is to tell the user not to use backslash in the end of directory in case
|
|
66
|
-
# of exception.
|
|
67
|
-
|
|
68
|
-
try:
|
|
69
|
-
# Execute parse_args()
|
|
70
|
-
parsed_arguments = parser.parse_args()
|
|
71
|
-
# The only thing that you can catch on without modifying Argparse code is 'SystemExit' exception.
|
|
72
|
-
# You can also provide just 'except' without anything, which isn't the best practice.
|
|
73
|
-
# Another fix would be to use
|
|
74
|
-
# argparse.ArgumentParser(exit_on_error=False)
|
|
75
|
-
# But as of python 3.10.8 it is not working yet.
|
|
76
|
-
except SystemExit as exception_object:
|
|
77
|
-
print('======================================')
|
|
78
|
-
print('[*] Info: Error in provided arguments.')
|
|
79
|
-
print('[*] Tip: Check if you have backslash "\\" in the end of folder path, if so remove it.')
|
|
80
|
-
print('======================================')
|
|
81
|
-
sys.exit()
|
|
82
|
-
|
|
83
|
-
# if the folder path argument in the middle will have backslash "\" it will cause an exception.
|
|
84
|
-
# If the backslash will be in the end, it will not cause exception, but the string will end with double quotes.
|
|
85
|
-
parsed_arguments.input = parsed_arguments.input.replace('"', '')
|
|
86
|
-
parsed_arguments.output = parsed_arguments.output.replace('"', '')
|
|
87
|
-
|
|
88
|
-
self.parser_arguments = parsed_arguments
|
|
@@ -1,42 +0,0 @@
|
|
|
1
|
-
"""Loading resources using stdlib importlib.resources APIs (Python 3.7+)
|
|
2
|
-
https://docs.python.org/3/library/importlib.html#module-importlib.resources"""
|
|
3
|
-
import importlib.resources
|
|
4
|
-
from typing import Literal
|
|
5
|
-
|
|
6
|
-
from .print_api import print_api
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
class ScriptAsStringProcessor:
|
|
10
|
-
def __init__(self):
|
|
11
|
-
self.resources_directory_name: str = 'ssh_scripts'
|
|
12
|
-
|
|
13
|
-
# string variable that is going to be exchanged with variable from main script.
|
|
14
|
-
self.exchange_input_variable_string: str = "exchange_input_variable"
|
|
15
|
-
self.script_string: str = str()
|
|
16
|
-
|
|
17
|
-
def read_script_to_string(
|
|
18
|
-
self,
|
|
19
|
-
script_file_name: Literal['process_from_port', 'process_from_ipv4']
|
|
20
|
-
):
|
|
21
|
-
self.script_string = importlib.resources.read_text(
|
|
22
|
-
f'{__package__}.{self.resources_directory_name}',
|
|
23
|
-
f'{script_file_name}.py')
|
|
24
|
-
|
|
25
|
-
return self
|
|
26
|
-
|
|
27
|
-
def put_variable_into_script_string(self, input_variable: any, print_kwargs: dict = None):
|
|
28
|
-
# Defining variables
|
|
29
|
-
function_result: str = str()
|
|
30
|
-
|
|
31
|
-
if self.exchange_input_variable_string in self.script_string:
|
|
32
|
-
# string.replace(old, new, count)
|
|
33
|
-
# old – old substring you want to replace.
|
|
34
|
-
# new – new substring which would replace the old substring.
|
|
35
|
-
# count – the number of times you want to replace the old substring with the new substring. (Optional)
|
|
36
|
-
# We want to replace our string only one time in the beginning.
|
|
37
|
-
function_result = self.script_string.replace(self.exchange_input_variable_string, str(input_variable), 1)
|
|
38
|
-
else:
|
|
39
|
-
message = f"The script string provided doesn't contain {self.exchange_input_variable_string}"
|
|
40
|
-
print_api(message, error_type=True, logger_method='error', **print_kwargs)
|
|
41
|
-
|
|
42
|
-
return function_result
|
|
@@ -1,37 +0,0 @@
|
|
|
1
|
-
# importing the psutil library to get the source ports and get the process full command line from it.
|
|
2
|
-
import psutil
|
|
3
|
-
# 'psutil.Process(connection.pid).cmdline()' returns list of full command line parts, it is needed to reassemble
|
|
4
|
-
# these parts to regular command line string.
|
|
5
|
-
import shlex
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
# User defined exception.
|
|
9
|
-
class StopAllIterations(Exception):
|
|
10
|
-
pass
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
# 'input_variable' will be string exchanged in the real script. It is the first line, so it won't take time to find the
|
|
14
|
-
# line for the main script.
|
|
15
|
-
# noinspection PyUnresolvedReferences
|
|
16
|
-
remote_ipv4_list = exchange_input_variable
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
try:
|
|
20
|
-
# for iteration in range(100):
|
|
21
|
-
# Iterating through all the connections on the computer.
|
|
22
|
-
for connection in psutil.net_connections(kind='all'):
|
|
23
|
-
# 'connection.raddr' is a tuple consisting of IPv4 address [0] and the port [1].
|
|
24
|
-
# Sometimes, if there's no remote address, "raddr" will be empty and since it's a tuple, we need to check that
|
|
25
|
-
# before getting the first [0] index.
|
|
26
|
-
if connection.raddr:
|
|
27
|
-
for address in remote_ipv4_list:
|
|
28
|
-
if connection.raddr[0] == address:
|
|
29
|
-
# Get the command line from the connection PID.
|
|
30
|
-
command_line = psutil.Process(connection.pid).cmdline()
|
|
31
|
-
# Command line object is returned as list of parameters. We need 'shlex.join' to join the iterables
|
|
32
|
-
# to regular, readable string.
|
|
33
|
-
print(shlex.join(command_line))
|
|
34
|
-
# Break the loops, when first match is found.
|
|
35
|
-
raise StopAllIterations
|
|
36
|
-
except StopAllIterations:
|
|
37
|
-
pass
|
|
@@ -1,27 +0,0 @@
|
|
|
1
|
-
# importing the psutil library to get the source ports and get the process full command line from it.
|
|
2
|
-
import psutil
|
|
3
|
-
# 'psutil.Process(connection.pid).cmdline()' returns list of full command line parts, it is needed to reassemble
|
|
4
|
-
# these parts to regular command line string.
|
|
5
|
-
import shlex
|
|
6
|
-
|
|
7
|
-
# 'input_variable' will be string exchanged in the real script. It is the first line, so it won't take time to find the
|
|
8
|
-
# line for the main script.
|
|
9
|
-
# noinspection PyUnresolvedReferences
|
|
10
|
-
source_port = exchange_input_variable
|
|
11
|
-
|
|
12
|
-
# Iterating through all the connections on the computer.
|
|
13
|
-
for connection in psutil.net_connections():
|
|
14
|
-
# 'connection.laddr' is a tuple consisting of IPv4 address [0] and the port [1].
|
|
15
|
-
if connection.laddr[1] == source_port:
|
|
16
|
-
# Get the command line from the connection PID.
|
|
17
|
-
command_line = psutil.Process(connection.pid).cmdline()
|
|
18
|
-
# Command line object is returned as list of parameters. We need 'shlex.join' to join the iterables
|
|
19
|
-
# to regular, readable string.
|
|
20
|
-
result = shlex.join(command_line)
|
|
21
|
-
# If the result is still a PID, we'll try to get process name.
|
|
22
|
-
if result.isnumeric():
|
|
23
|
-
# Get the process name from the connection PID.
|
|
24
|
-
result = psutil.Process(connection.pid).name()
|
|
25
|
-
print(result)
|
|
26
|
-
# Break the loop, when first match is found.
|
|
27
|
-
break
|
|
@@ -1,27 +0,0 @@
|
|
|
1
|
-
# v1.0.2 - 21.03.2023 13:40
|
|
2
|
-
import sys
|
|
3
|
-
import shlex
|
|
4
|
-
|
|
5
|
-
from .. import process
|
|
6
|
-
from .. import web
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
def download_file_with_curl(file_url: str, target_directory: str) -> None:
|
|
10
|
-
"""
|
|
11
|
-
The function receives url and target filesystem directory to download the file.
|
|
12
|
-
|
|
13
|
-
:param file_url: full URL to download the file.
|
|
14
|
-
:param target_directory: The directory on the filesystem to save the file to.
|
|
15
|
-
"""
|
|
16
|
-
|
|
17
|
-
# Get only the filename from URL.
|
|
18
|
-
file_name = web.get_filename_from_url(file_url=file_url)
|
|
19
|
-
|
|
20
|
-
cmd: str = f'curl -L {file_url} --output "{target_directory}"'
|
|
21
|
-
cmd_list: list = shlex.split(cmd)
|
|
22
|
-
|
|
23
|
-
output_list: list = process.execute_with_live_output(cmd=cmd_list)
|
|
24
|
-
# If there was error in curl.
|
|
25
|
-
if 'curl: ' in output_list[-1]:
|
|
26
|
-
print('Curl error. Exiting...')
|
|
27
|
-
sys.exit()
|
|
@@ -1,21 +0,0 @@
|
|
|
1
|
-
# v1.0.2 - 21.03.2023 18:30
|
|
2
|
-
import shlex
|
|
3
|
-
|
|
4
|
-
from .. import process
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
def extract_archive_with_tar(file_path: str, target_directory: str) -> None:
|
|
8
|
-
"""
|
|
9
|
-
Function extracts the archive to target directory.
|
|
10
|
-
|
|
11
|
-
:param file_path: Full file path to archived file to extract.
|
|
12
|
-
:param target_directory: The directory on the filesystem to extract the file to.
|
|
13
|
-
:return: None
|
|
14
|
-
"""
|
|
15
|
-
|
|
16
|
-
# -v: Verbose, shows list of extracted files.
|
|
17
|
-
# -C: Output directory.
|
|
18
|
-
cmd: str = f'tar -xzvf "{file_path}" -C "{target_directory}"'
|
|
19
|
-
cmd_list: list = shlex.split(cmd)
|
|
20
|
-
|
|
21
|
-
output_list = process.execute_with_live_output(cmd=cmd_list)
|