atomicshop 2.21.1__py3-none-any.whl → 3.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of atomicshop might be problematic. Click here for more details.

Files changed (29) hide show
  1. atomicshop/__init__.py +1 -1
  2. atomicshop/basics/multiprocesses.py +228 -30
  3. atomicshop/dns.py +2 -0
  4. atomicshop/mitm/config_static.py +2 -1
  5. atomicshop/mitm/engines/create_module_template.py +2 -7
  6. atomicshop/mitm/import_config.py +30 -26
  7. atomicshop/mitm/initialize_engines.py +9 -24
  8. atomicshop/mitm/mitm_main.py +187 -59
  9. atomicshop/networks.py +448 -0
  10. atomicshop/wrappers/ctyping/setup_device.py +466 -0
  11. atomicshop/wrappers/dockerw/dockerw.py +17 -21
  12. atomicshop/wrappers/mongodbw/mongodbw.py +1 -0
  13. atomicshop/wrappers/psutilw/{networks.py → psutil_networks.py} +3 -1
  14. atomicshop/wrappers/pywin32w/wmis/msft_netipaddress.py +76 -0
  15. atomicshop/wrappers/pywin32w/wmis/win32_networkadapterconfiguration.py +262 -0
  16. atomicshop/wrappers/pywin32w/wmis/win32networkadapter.py +51 -82
  17. atomicshop/wrappers/pywin32w/wmis/wmi_helpers.py +235 -0
  18. atomicshop/wrappers/socketw/accepter.py +15 -1
  19. atomicshop/wrappers/socketw/creator.py +7 -1
  20. atomicshop/wrappers/socketw/dns_server.py +33 -39
  21. atomicshop/wrappers/socketw/exception_wrapper.py +20 -11
  22. atomicshop/wrappers/socketw/socket_wrapper.py +29 -78
  23. atomicshop/wrappers/winregw/winreg_network.py +20 -0
  24. {atomicshop-2.21.1.dist-info → atomicshop-3.0.0.dist-info}/METADATA +2 -1
  25. {atomicshop-2.21.1.dist-info → atomicshop-3.0.0.dist-info}/RECORD +28 -24
  26. atomicshop/wrappers/pywin32w/wmis/helpers.py +0 -131
  27. {atomicshop-2.21.1.dist-info → atomicshop-3.0.0.dist-info}/LICENSE.txt +0 -0
  28. {atomicshop-2.21.1.dist-info → atomicshop-3.0.0.dist-info}/WHEEL +0 -0
  29. {atomicshop-2.21.1.dist-info → atomicshop-3.0.0.dist-info}/top_level.txt +0 -0
atomicshop/__init__.py CHANGED
@@ -1,4 +1,4 @@
1
1
  """Atomic Basic functions and classes to make developer life easier"""
2
2
 
3
3
  __author__ = "Den Kras"
4
- __version__ = '2.21.1'
4
+ __version__ = '3.0.0'
@@ -1,14 +1,17 @@
1
1
  import multiprocessing
2
2
  import multiprocessing.managers
3
+ import os
3
4
  import queue
5
+ import threading
4
6
  import concurrent.futures
5
7
  from concurrent.futures import ProcessPoolExecutor, as_completed
8
+ from collections import deque
6
9
  from typing import Callable
7
10
 
8
11
  from ..import system_resources
9
12
 
10
13
 
11
- def process_wrap_queue(function_reference, *args, **kwargs):
14
+ def process_wrap_queue(function_reference: Callable, *args, **kwargs):
12
15
  """
13
16
  The function receives function reference and arguments, and executes the function in a thread.
14
17
  "_queue" means that a queue.put() is used to store the result of the function and queue.get() to output it.
@@ -154,38 +157,63 @@ class MultiProcessorRecursive:
154
157
  self.async_results: list = []
155
158
 
156
159
  def run_process(self):
157
- while self.input_list:
158
- new_input_list = []
159
- for item in self.input_list:
160
- # Check system resources before processing each item
161
- system_resources.wait_for_resource_availability(
162
- cpu_percent_max=self.cpu_percent_max,
163
- memory_percent_max=self.memory_percent_max,
164
- wait_time=self.wait_time,
165
- system_monitor_manager_dict=self.system_monitor_manager_dict)
160
+ """
161
+ Start with the items currently in self.input_list, but whenever a task
162
+ finishes schedule the children it returns *right away*.
163
+ The loop ends when there are no more outstanding tasks.
164
+ """
165
+ # ---------- internal helpers ----------
166
+ outstanding = 0 # tasks that have been submitted but not yet finished
167
+ done_event = threading.Event() # let the main thread wait until work is over
168
+
169
+ def _submit(item):
170
+ nonlocal outstanding
171
+ # Wait for resources *before* submitting a new job
172
+ system_resources.wait_for_resource_availability(
173
+ cpu_percent_max=self.cpu_percent_max,
174
+ memory_percent_max=self.memory_percent_max,
175
+ wait_time=self.wait_time,
176
+ system_monitor_manager_dict=self.system_monitor_manager_dict
177
+ )
178
+ outstanding += 1
179
+ self.pool.apply_async(
180
+ self.process_function,
181
+ (item,),
182
+ callback=_on_finish, # called in the main process when result is ready
183
+ error_callback=_on_error
184
+ )
166
185
 
167
- # Process the item
168
- async_result = self.pool.apply_async(self.process_function, (item,))
169
- self.async_results.append(async_result)
186
+ def _on_finish(result):
187
+ """Pool calls this in the parent process thread when a job completes."""
188
+ nonlocal outstanding
189
+ outstanding -= 1
170
190
 
171
- # Reset input_list for next round of processing
172
- self.input_list = []
191
+ # The worker returned a list of new items – submit them immediately
192
+ if result:
193
+ for child in result:
194
+ _submit(child)
195
+
196
+ # If no work left, release the waiter
197
+ if outstanding == 0:
198
+ done_event.set()
173
199
 
174
- # Collect results as they complete
175
- for async_result in self.async_results:
176
- try:
177
- result = async_result.get()
178
- # Assuming process_function returns a list, extend new_input_list
179
- new_input_list.extend(result)
180
- except Exception:
181
- raise
182
-
183
- # Update the input_list for the next iteration
184
- self.input_list = new_input_list
185
- # Clear the async_results for the next iteration
186
- self.async_results.clear()
187
-
188
- def shutdown_pool(self):
200
+ def _on_error(exc):
201
+ """Propagate the first exception and stop everything cleanly."""
202
+ done_event.set()
203
+ raise exc # let your code deal with it – you can customise this
204
+
205
+ # ---------- kick‑off ----------
206
+ # Schedule the items we already have
207
+ for item in self.input_list:
208
+ _submit(item)
209
+
210
+ # Clear the input list; after this point everything is driven by callbacks
211
+ self.input_list.clear()
212
+
213
+ # Wait until all recursively spawned work is finished
214
+ done_event.wait()
215
+
216
+ def shutdown(self):
189
217
  """Shuts down the pool gracefully."""
190
218
  if self.pool:
191
219
  self.pool.close() # Stop accepting new tasks
@@ -193,6 +221,176 @@ class MultiProcessorRecursive:
193
221
  self.pool = None
194
222
 
195
223
 
224
+ class _MultiProcessorRecursiveWithProcessPoolExecutor:
225
+ def __init__(
226
+ self,
227
+ process_function: Callable,
228
+ input_list: list,
229
+ max_workers: int = None,
230
+ cpu_percent_max: int = 80,
231
+ memory_percent_max: int = 80,
232
+ wait_time: float = 5,
233
+ system_monitor_manager_dict: multiprocessing.managers.DictProxy = None
234
+ ):
235
+ """
236
+ THIS CLASS USES THE concurrent.futures.ProcessPoolExecutor to achieve parallelism.
237
+ For some reason I got freezes on exceptions without the exception output after the run_process() method finished
238
+ and the pool remained open. So, using the MultiProcessorRecursive instead.
239
+
240
+ MultiProcessor class. Used to execute functions in parallel. The result of each execution is fed back
241
+ to the provided function. Making it sort of recursive execution.
242
+ :param process_function: function, function to execute on the input list.
243
+ :param input_list: list, list of inputs to process.
244
+ :param max_workers: integer, number of workers to execute functions in parallel. Default is None, which
245
+ is the number of CPUs that will be counted automatically by the multiprocessing module.
246
+ :param cpu_percent_max: integer, maximum CPU percentage. Above that usage, we will wait before starting new
247
+ execution.
248
+ :param memory_percent_max: integer, maximum memory percentage. Above that usage, we will wait, before starting
249
+ new execution.
250
+ :param wait_time: float, time to wait if the CPU or memory usage is above the maximum percentage.
251
+ :param system_monitor_manager_dict: multiprocessing.managers.DictProxy, shared manager dict for
252
+ system monitoring. The object is the output of atomicshop.system_resource_monitor.
253
+ If you are already running this monitor, you can pass the manager_dict to both the system monitor and this
254
+ class to share the system resources data.
255
+ If this is used, the system resources will be checked before starting each new execution from this
256
+ shared dict instead of performing new checks.
257
+
258
+ Usage Examples:
259
+ def unpack_file(file_path):
260
+ # Process the file at file_path and unpack it.
261
+ # Return a list of new file paths that were extracted from the provided path.
262
+ return [new_file_path1, new_file_path2] # Example return value
263
+
264
+ # List of file paths to process
265
+ file_paths = ["path1", "path2", "path3"]
266
+
267
+ # Note: unpack_file Callable is passed to init without parentheses.
268
+
269
+ 1. Providing the list directly to process at once:
270
+ # Initialize the processor.
271
+ processor = MultiProcessor(
272
+ process_function=unpack_file,
273
+ input_list=file_paths,
274
+ max_workers=4, # Number of parallel workers
275
+ cpu_percent_max=80, # Max CPU usage percentage
276
+ memory_percent_max=80, # Max memory usage percentage
277
+ wait_time=5 # Time to wait if resources are overused
278
+ )
279
+
280
+ # Process the list of files at once.
281
+ processor.run_process()
282
+ # Shutdown the pool processes after processing.
283
+ processor.shutdown_pool()
284
+
285
+ 2. Processing each file in the list differently then adding to the list of the multiprocessing instance then executing.
286
+ # Initialize the processor once, before the loop, with empty input_list.
287
+ processor = MultiProcessor(
288
+ process_function=unpack_file,
289
+ input_list=[],
290
+ max_workers=4, # Number of parallel workers
291
+ cpu_percent_max=80, # Max CPU usage percentage
292
+ memory_percent_max=80, # Max memory usage percentage
293
+ wait_time=5 # Time to wait if resources are overused
294
+ )
295
+
296
+ for file_path in file_paths:
297
+ # <Process each file>.
298
+ # Add the result to the input_list of the processor.
299
+ processor.input_list.append(file_path)
300
+
301
+ # Process the list of files at once.
302
+ processor.run_process()
303
+ # Shutdown the pool processes after processing.
304
+ processor.shutdown_pool()
305
+
306
+ 3. Processing each file in the list separately, since we're using an unpacking function that
307
+ will create more files, but the context for this operation is different for extraction
308
+ of each main file inside the list:
309
+
310
+ # Initialize the processor once, before the loop, with empty input_list.
311
+ processor = MultiProcessor(
312
+ process_function=unpack_file,
313
+ input_list=[],
314
+ max_workers=4, # Number of parallel workers
315
+ cpu_percent_max=80, # Max CPU usage percentage
316
+ memory_percent_max=80, # Max memory usage percentage
317
+ wait_time=5 # Time to wait if resources are overused
318
+ )
319
+
320
+ for file_path in file_paths:
321
+ # <Process each file>.
322
+ # Add the result to the input_list of the processor.
323
+ processor.input_list.append(file_path)
324
+ # Process the added file path separately.
325
+ processor.run_process()
326
+
327
+ # Shutdown the pool processes after processing.
328
+ processor.shutdown_pool()
329
+ """
330
+
331
+ self.process_function: Callable = process_function
332
+ self.input_list: list = input_list
333
+ self.cpu_percent_max: int = cpu_percent_max
334
+ self.memory_percent_max: int = memory_percent_max
335
+ self.wait_time: float = wait_time
336
+ self.system_monitor_manager_dict: multiprocessing.managers.DictProxy = system_monitor_manager_dict
337
+
338
+ if max_workers is None:
339
+ max_workers = os.cpu_count()
340
+ self.max_workers: int = max_workers
341
+
342
+ # Create the executor once and reuse it.
343
+ # noinspection PyTypeChecker
344
+ self.executor: ProcessPoolExecutor = None
345
+
346
+ def _ensure_executor(self):
347
+ """Create a new pool if we do not have one or if the old one was shut."""
348
+ if self.executor is None or getattr(self.executor, '_shutdown', False):
349
+ self.executor = ProcessPoolExecutor(max_workers=self.max_workers)
350
+
351
+ def run_process(self):
352
+ # Make sure we have a live executor
353
+ self._ensure_executor()
354
+
355
+ work_q = deque(self.input_list) # breadth‑first queue
356
+ self.input_list.clear()
357
+ futures = set()
358
+
359
+ # helper to submit jobs up to the concurrency limit
360
+ def _fill():
361
+ while work_q and len(futures) < self.max_workers:
362
+ item = work_q.popleft()
363
+ system_resources.wait_for_resource_availability(
364
+ cpu_percent_max=self.cpu_percent_max,
365
+ memory_percent_max=self.memory_percent_max,
366
+ wait_time=self.wait_time,
367
+ system_monitor_manager_dict=self.system_monitor_manager_dict
368
+ )
369
+ futures.add(self.executor.submit(self.process_function, item))
370
+
371
+ _fill() # start the first wave
372
+
373
+ while futures:
374
+ for fut in as_completed(futures):
375
+ futures.remove(fut) # a slot just freed up
376
+
377
+ # propagate worker exceptions immediately
378
+ children = fut.result()
379
+
380
+ # schedule the newly discovered items
381
+ if children:
382
+ work_q.extend(children)
383
+
384
+ _fill() # keep the pool saturated
385
+ break # leave the for‑loop so as_completed resets
386
+
387
+ def shutdown(self):
388
+ """Shuts down the executor gracefully."""
389
+ if self.executor:
390
+ self.executor.shutdown(wait=True) # blocks until all tasks complete
391
+ self.executor = None
392
+
393
+
196
394
  class ConcurrentProcessorRecursive:
197
395
  def __init__(
198
396
  self,
atomicshop/dns.py CHANGED
@@ -174,3 +174,5 @@ def default_dns_gateway_main() -> int:
174
174
  elif args.dynamic:
175
175
  set_connection_dns_gateway_dynamic(
176
176
  connection_name=args.connection_name, use_default_connection=args.connection_default)
177
+
178
+ return 0
@@ -1,5 +1,6 @@
1
1
  import os
2
2
  from dataclasses import dataclass
3
+ from typing import Literal
3
4
 
4
5
  from . import import_config
5
6
 
@@ -157,7 +158,7 @@ class ProcessName:
157
158
  ssh_user: str
158
159
  ssh_pass: str
159
160
 
160
- ssh_script_to_execute = 'process_from_port'
161
+ ssh_script_to_execute: Literal['process_from_port', 'process_from_ipv4'] = 'process_from_port'
161
162
 
162
163
 
163
164
  def load_config(config_toml_file_path: str):
@@ -30,7 +30,7 @@ class CreateModuleTemplate:
30
30
  def __init__(self):
31
31
  # === Get input variables. ===
32
32
  self.engine_name: str = parse_arguments().engine_name
33
- self.domains: list = ['example.com']
33
+ self.domains: list = ['example.com:443', 'example.org:80']
34
34
 
35
35
  # New engine's directory.
36
36
  self.new_engine_directory: str = ENGINES_DIRECTORY_PATH + os.sep + self.engine_name
@@ -76,15 +76,10 @@ class CreateModuleTemplate:
76
76
 
77
77
  config_lines_list.append('[engine]')
78
78
  config_lines_list.append(f'domains = [{", ".join(domains_with_quotes)}]')
79
- config_lines_list.append('dns_target = "127.0.0.1"')
80
- config_lines_list.append('tcp_listening_address_list = ["0.0.0.0:443"]\n')
79
+ config_lines_list.append('localhost = 0\n')
81
80
  # config_lines_list.append(f'\n')
82
81
  config_lines_list.append('[mtls]')
83
82
  config_lines_list.append('# "subdomain.domain.com" = "file_name_in_current_dir.pem"\n')
84
- # config_lines_list.append(f'\n')
85
- config_lines_list.append('[no_sni]')
86
- config_lines_list.append('get_from_dns = 1 # Blocking, the accept function will wait until the domain is received from DNS.')
87
- config_lines_list.append('serve_domain_on_address = {0 = [{"example.com" = "127.0.0.2:443"}]}')
88
83
 
89
84
  config_file_path = self.new_engine_directory + os.sep + CONFIG_FILE_NAME
90
85
 
@@ -151,37 +151,41 @@ def check_configurations() -> int:
151
151
  print_api(error_message, color="red")
152
152
  return 1
153
153
 
154
+ is_localhost: bool | None = None
154
155
  for engine in config_static.ENGINES_LIST:
155
- if engine.no_sni.get_from_dns and engine.no_sni.serve_domain_on_address_enable:
156
- message = (
157
- f"Both [get_from_dns] and [serve_domain_on_address] are enabled in [no_sni] section of the engine.\n"
158
- f"Only one Can be True.")
159
- print_api(message, color="red")
160
- return 1
161
- if not engine.no_sni.get_from_dns and not engine.no_sni.serve_domain_on_address_enable:
162
- message = (
163
- f"Both [get_from_dns] and [serve_domain_on_address] are disabled in [no_sni] section of the engine.\n"
164
- f"Only one Can be True.")
165
- print_api(message, color="red")
166
- return 1
167
-
168
- if engine.no_sni.serve_domain_on_address_enable:
169
- # Check if the domains in no_sni are the same as in the engine. They should not be.
170
- # Same goes for the address.
171
- for domain, address_ip_port in engine.no_sni.serve_domain_on_address_dict.items():
172
- if domain in engine.domain_list:
156
+ for domain_port in engine.domain_list:
157
+ # Check if the domains has port.
158
+ if ':' not in domain_port:
159
+ message = (
160
+ f"[*] Domain [{domain_port}] doesn't have a port.\n"
161
+ f"Please check your engine configuration file.")
162
+ print_api(message, color="red")
163
+ return 1
164
+ else:
165
+ # Split the domain and port.
166
+ domain, port = domain_port.split(':')
167
+ # Check if the port is a number.
168
+ if not port.isdigit():
173
169
  message = (
174
- f"[*] No SNI setting: The domain [{domain}] is in the engine domains list [{engine.domain_list}].\n"
175
- f"The point of the no_sni section is to serve specific domains on separate addresses.\n")
170
+ f"[*] Port [{port}] is not a number.\n"
171
+ f"Please check your engine configuration file.")
176
172
  print_api(message, color="red")
177
173
  return 1
178
174
 
179
- if address_ip_port in engine.tcp_listening_address_list:
180
- message = (
181
- f"[*] No SNI setting: The address [{address_ip_port}] is in the engine listening interfaces list [{engine.tcp_listening_address_list}].\n"
182
- f"The point of the no_sni section is to serve specific domains on separate addresses.\n")
183
- print_api(message, color="red")
184
- return 1
175
+ # Check if 'localhost' is set in all the engines, or not.
176
+ # There can't be mixed engines where local host is set and not set.
177
+ # It can be all engines will be localhost or none of them.
178
+ if is_localhost is None:
179
+ is_localhost = engine.is_localhost
180
+ else:
181
+ if is_localhost != engine.is_localhost:
182
+ message = (
183
+ f"[*] Mixed [localhost] setting in the engines found.\n"
184
+ f"[*] Some engines are set to [localhost] and some are not.\n"
185
+ f"[*] This is not allowed. All engines must be set to [localhost = 1] or All engines must be set to [localhost = 0].\n"
186
+ f"Please check your engine configuration files.")
187
+ print_api(message, color="red")
188
+ return 1
185
189
 
186
190
  # Check admin right if on localhost ============================================================================
187
191
  # If any of the DNS IP target addresses is localhost loopback, then we need to check if the script
@@ -7,23 +7,15 @@ from .engines.__reference_general import parser___reference_general, responder__
7
7
  recorder___reference_general
8
8
 
9
9
 
10
- class NoSNI:
11
- def __init__(self):
12
- self.get_from_dns: bool = False
13
- self.serve_domain_on_address_enable: bool = False
14
- self.serve_domain_on_address_dict: dict = dict()
15
-
16
-
17
10
  class ModuleCategory:
18
11
  def __init__(self, script_directory: str):
19
12
  self.engine_name: str = str()
20
13
  self.script_directory: str = script_directory
21
14
 
22
15
  self.domain_list: list = list()
23
- self.dns_target: str = str()
24
- self.tcp_listening_address_list: list = list()
16
+ self.domain_target_dict: dict = dict()
17
+ self.is_localhost: bool = bool()
25
18
  self.mtls: dict = dict()
26
- self.no_sni: NoSNI = NoSNI()
27
19
 
28
20
  self.parser_file_path: str = str()
29
21
  self.responder_file_path: str = str()
@@ -51,24 +43,11 @@ class ModuleCategory:
51
43
 
52
44
  # Getting the parameters from engine config file
53
45
  self.domain_list = configuration_data['engine']['domains']
54
- self.dns_target = configuration_data['engine']['dns_target']
55
- self.tcp_listening_address_list = configuration_data['engine']['tcp_listening_address_list']
46
+ self.is_localhost = bool(configuration_data['engine']['localhost'])
56
47
 
57
48
  if 'mtls' in configuration_data:
58
49
  self.mtls = configuration_data['mtls']
59
50
 
60
- self.no_sni.get_from_dns = bool(configuration_data['no_sni']['get_from_dns'])
61
-
62
- for enable_bool, address_list in configuration_data['no_sni']['serve_domain_on_address'].items():
63
- if enable_bool in ['0', '1']:
64
- self.no_sni.serve_domain_on_address_enable = bool(int(enable_bool))
65
- else:
66
- raise ValueError(f"Error: no_sni -> serve_domain_on_address -> key must be 0 or 1.")
67
-
68
- for address in address_list:
69
- for domain, address_ip_port in address.items():
70
- self.no_sni.serve_domain_on_address_dict = {domain: address_ip_port}
71
-
72
51
  # If there's module configuration file, but no domains in it, there's no point to continue.
73
52
  # Since, each engine is based on domains.
74
53
  if not self.domain_list or self.domain_list[0] == '':
@@ -86,6 +65,12 @@ class ModuleCategory:
86
65
  self.responder_file_path = f"{engine_directory_path}{os.sep}responder{file_name_suffix}.py"
87
66
  self.recorder_file_path = f"{engine_directory_path}{os.sep}recorder{file_name_suffix}.py"
88
67
 
68
+ for domain_index, domain_port_string in enumerate(self.domain_list):
69
+ # Splitting the domain and port
70
+ domain, port = domain_port_string.split(':')
71
+
72
+ self.domain_target_dict[domain] = {'ip': None, 'port': port}
73
+
89
74
  for subdomain, file_name in self.mtls.items():
90
75
  self.mtls[subdomain] = f'{engine_directory_path}{os.sep}{file_name}'
91
76