atomicshop 2.19.7__py3-none-any.whl → 2.19.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of atomicshop might be problematic. Click here for more details.

atomicshop/__init__.py CHANGED
@@ -1,4 +1,4 @@
1
1
  """Atomic Basic functions and classes to make developer life easier"""
2
2
 
3
3
  __author__ = "Den Kras"
4
- __version__ = '2.19.7'
4
+ __version__ = '2.19.9'
@@ -32,6 +32,9 @@ def main():
32
32
  print_api("PIP Installing Robocorp-Recognition.")
33
33
  subprocess.check_call(["pip", "install", "--upgrade", "rpaframework-recognition"])
34
34
 
35
+ print_api("PIP Installing pynput.")
36
+ subprocess.check_call(["pip", "install", "--upgrade", "pynput"])
37
+
35
38
  print_api("Installing Playwright browsers.")
36
39
  subprocess.check_call(["playwright", "install"])
37
40
 
@@ -0,0 +1,11 @@
1
+ import sys
2
+
3
+ from atomicshop.wrappers import githubw
4
+
5
+
6
+ def main():
7
+ githubw.github_wrapper_main_with_args()
8
+
9
+
10
+ if __name__ == "__main__":
11
+ sys.exit(main())
@@ -0,0 +1,17 @@
1
+ """
2
+ Need to fix the
3
+ etw.etw
4
+ file, in the
5
+ EventConsumer._unpackSimpleType
6
+ function.
7
+ """
8
+ """
9
+ data = formatted_data.value
10
+ # Convert the formatted data if necessary
11
+ if isinstance(data, str):
12
+ if data.endswith(' '):
13
+ data = data[:-1]
14
+ else:
15
+ if out_type in tdh.TDH_CONVERTER_LOOKUP and type(data) != tdh.TDH_CONVERTER_LOOKUP[out_type]:
16
+ data = tdh.TDH_CONVERTER_LOOKUP[out_type](data)
17
+ """
atomicshop/etws/trace.py CHANGED
@@ -1,6 +1,7 @@
1
1
  import queue
2
2
  import sys
3
3
  import time
4
+ import multiprocessing.managers
4
5
 
5
6
  # Import FireEye Event Tracing library.
6
7
  import etw
@@ -11,19 +12,16 @@ from ..process_poller import simple_process_pool
11
12
  from ..wrappers.psutilw import psutilw
12
13
 
13
14
 
14
- WAIT_FOR_PROCESS_POLLER_PID_SECONDS: int = 3
15
- WAIT_FOR_PROCESS_POLLER_PID_COUNTS: int = WAIT_FOR_PROCESS_POLLER_PID_SECONDS * 10
16
-
17
-
18
15
  class EventTrace(etw.ETW):
19
16
  def __init__(
20
17
  self,
21
18
  providers: list,
22
- event_callback=None,
19
+ event_callback: callable = None,
23
20
  event_id_filters: list = None,
24
21
  session_name: str = None,
25
22
  close_existing_session_name: bool = True,
26
- enable_process_poller: bool = False
23
+ enable_process_poller: bool = False,
24
+ process_pool_shared_dict_proxy: multiprocessing.managers.DictProxy = None
27
25
  ):
28
26
  """
29
27
  :param providers: List of tuples with provider name and provider GUID.
@@ -39,6 +37,13 @@ class EventTrace(etw.ETW):
39
37
  :param enable_process_poller: Boolean to enable process poller. Gets the process PID, Name and CommandLine.
40
38
  Since the DNS events doesn't contain the process name and command line, only PID.
41
39
  Then DNS events will be enriched with the process name and command line from the process poller.
40
+ :param process_pool_shared_dict_proxy: multiprocessing.managers.DictProxy,
41
+ multiprocessing shared dict proxy that contains current processes.
42
+ Check the 'atomicshop\process_poller\simple_process_pool.py' SimpleProcessPool class for more information.
43
+
44
+ If None, the process poller will create a new shared dict proxy.
45
+ If provided, then the provided shared dict proxy will be used.
46
+ Off course valid only if 'enable_process_poller' is True.
42
47
 
43
48
  ------------------------------------------
44
49
 
@@ -62,6 +67,7 @@ class EventTrace(etw.ETW):
62
67
  self.event_queue = queue.Queue()
63
68
  self.close_existing_session_name: bool = close_existing_session_name
64
69
  self.enable_process_poller: bool = enable_process_poller
70
+ self.process_pool_shared_dict_proxy: multiprocessing.managers.DictProxy = process_pool_shared_dict_proxy
65
71
 
66
72
  # If no callback function is provided, we will use the default one, which will put the event in the queue.
67
73
  if not event_callback:
@@ -75,8 +81,16 @@ class EventTrace(etw.ETW):
75
81
  for provider in providers:
76
82
  etw_format_providers.append(etw.ProviderInfo(provider[0], etw.GUID(provider[1])))
77
83
 
84
+ self.self_hosted_poller: bool = False
78
85
  if self.enable_process_poller:
79
- self.process_poller = simple_process_pool.SimpleProcessPool()
86
+ if not self.process_pool_shared_dict_proxy:
87
+ self.self_hosted_poller = True
88
+ self.process_poller = simple_process_pool.SimpleProcessPool()
89
+ self.multiprocessing_manager: multiprocessing.managers.SyncManager = multiprocessing.Manager()
90
+ self.process_pool_shared_dict_proxy = self.multiprocessing_manager.dict()
91
+
92
+ self.pid_process_converter = simple_process_pool.PidProcessConverter(
93
+ process_pool_shared_dict_proxy=self.process_pool_shared_dict_proxy)
80
94
 
81
95
  super().__init__(
82
96
  providers=etw_format_providers, event_callback=function_callable, event_id_filters=event_id_filters,
@@ -112,6 +126,9 @@ class EventTrace(etw.ETW):
112
126
  if self.enable_process_poller:
113
127
  self.process_poller.stop()
114
128
 
129
+ if self.self_hosted_poller:
130
+ self.multiprocessing_manager.shutdown()
131
+
115
132
  def emit(self):
116
133
  """
117
134
  The Function will return the next event from the queue.
@@ -143,21 +160,9 @@ class EventTrace(etw.ETW):
143
160
  }
144
161
 
145
162
  if self.enable_process_poller:
146
- processes = self.process_poller.get_processes()
147
- if event_dict['pid'] not in processes:
148
- counter = 0
149
- while counter < WAIT_FOR_PROCESS_POLLER_PID_COUNTS:
150
- processes = self.process_poller.get_processes()
151
- if event_dict['pid'] not in processes:
152
- time.sleep(0.1)
153
- counter += 1
154
- else:
155
- break
156
-
157
- if counter == WAIT_FOR_PROCESS_POLLER_PID_COUNTS:
158
- print_api(f"Error: Couldn't get the process name for PID: {event_dict['pid']}.", color='red')
159
-
160
- event_dict = psutilw.cross_single_connection_with_processes(event_dict, processes)
163
+ process_info: dict = self.pid_process_converter.get_process_by_pid(event_dict['pid'])
164
+ event_dict['name'] = process_info['name']
165
+ event_dict['cmdline'] = process_info['cmdline']
161
166
 
162
167
  return event_dict
163
168
 
@@ -1,6 +1,8 @@
1
+ import multiprocessing.managers
2
+
1
3
  from .. import trace, const
2
4
  from ...basics import dicts
3
- from ... import dns
5
+ from ... import dns, ip_addresses
4
6
 
5
7
 
6
8
  ETW_DEFAULT_SESSION_NAME: str = 'AtomicShopDnsTrace'
@@ -9,9 +11,6 @@ PROVIDER_NAME: str = const.ETW_DNS['provider_name']
9
11
  PROVIDER_GUID: str = const.ETW_DNS['provider_guid']
10
12
  REQUEST_RESP_EVENT_ID: int = const.ETW_DNS['event_ids']['dns_request_response']
11
13
 
12
- WAIT_FOR_PROCESS_POLLER_PID_SECONDS: int = 3
13
- WAIT_FOR_PROCESS_POLLER_PID_COUNTS: int = WAIT_FOR_PROCESS_POLLER_PID_SECONDS * 10
14
-
15
14
 
16
15
  class DnsRequestResponseTrace:
17
16
  """DnsTrace class use to trace DNS events from Windows Event Tracing for EventId 3008."""
@@ -20,7 +19,8 @@ class DnsRequestResponseTrace:
20
19
  attrs: list = None,
21
20
  session_name: str = None,
22
21
  close_existing_session_name: bool = True,
23
- skip_record_list: list = None
22
+ skip_record_list: list = None,
23
+ process_pool_shared_dict_proxy: multiprocessing.managers.DictProxy = None
24
24
  ):
25
25
  """
26
26
  :param attrs: List of attributes to return. If None, all attributes will be returned.
@@ -32,6 +32,13 @@ class DnsRequestResponseTrace:
32
32
  created. Instead, the existing session will be used. If there is a buffer from the previous session,
33
33
  you will get the events from the buffer.
34
34
  :param skip_record_list: List of DNS Records to skip emitting. Example: ['PTR', 'SRV']
35
+ :param process_pool_shared_dict_proxy: multiprocessing.managers.DictProxy, multiprocessing shared dict proxy
36
+ that contains current processes.
37
+ Check the 'atomicshop\process_poller\simple_process_pool.py' SimpleProcessPool class for more information.
38
+
39
+ For this specific class it means that you can run the process poller outside of this class and pass the
40
+ 'process_pool_shared_dict_proxy' to this class. Then you can get the process name and command line for
41
+ the DNS events from the 'process_pool_shared_dict_proxy' and use it also in other classes.
35
42
 
36
43
  -------------------------------------------------
37
44
 
@@ -53,6 +60,7 @@ class DnsRequestResponseTrace:
53
60
  """
54
61
 
55
62
  self.attrs = attrs
63
+ self.process_pool_shared_dict_proxy: multiprocessing.managers.DictProxy = process_pool_shared_dict_proxy
56
64
 
57
65
  if skip_record_list:
58
66
  self.skip_record_list: list = skip_record_list
@@ -68,7 +76,8 @@ class DnsRequestResponseTrace:
68
76
  event_id_filters=[REQUEST_RESP_EVENT_ID],
69
77
  session_name=session_name,
70
78
  close_existing_session_name=close_existing_session_name,
71
- enable_process_poller=True
79
+ enable_process_poller=True,
80
+ process_pool_shared_dict_proxy=self.process_pool_shared_dict_proxy
72
81
  )
73
82
 
74
83
  def start(self):
@@ -90,13 +99,51 @@ class DnsRequestResponseTrace:
90
99
  :return: Dictionary with the event data.
91
100
  """
92
101
 
102
+ # Get the event from ETW as is.
93
103
  event = self.event_trace.emit()
94
104
 
105
+ # Get the raw query results string from the event.
106
+ query_results: str = event['EventHeader']['QueryResults']
107
+
108
+ if query_results != '':
109
+ query_results_list: list = query_results.split(';')
110
+
111
+ addresses_ips: list = list()
112
+ addresses_cnames: list = list()
113
+ for query_result in query_results_list:
114
+ # If there is a type in the query result, it means it is a cname (domain).
115
+ if 'type' in query_result:
116
+ query_result = query_result.split(' ')[-1]
117
+
118
+ # But we'll still make sure that the query result is an IP address or not.
119
+ if ip_addresses.is_ip_address(query_result):
120
+ addresses_ips.append(query_result)
121
+ # If it is not empty, then it is a cname.
122
+ elif query_result != '':
123
+ addresses_cnames.append(query_result)
124
+ # if the query results are empty, then we'll just set the addresses to empty lists.
125
+ else:
126
+ addresses_ips: list = list()
127
+ addresses_cnames: list = list()
128
+
129
+ status_id: str = str(event['EventHeader']['QueryStatus'])
130
+
131
+ # Getting the 'QueryStatus' key. If DNS Query Status is '0' then it was executed successfully.
132
+ # And if not, it means there was an error. The 'QueryStatus' indicate what number of an error it is.
133
+ if status_id == '0':
134
+ status = 'Success'
135
+ else:
136
+ status = 'Error'
137
+
95
138
  event_dict: dict = {
96
139
  'event_id': event['EventId'],
97
- 'domain': event['EventHeader']['QueryName'],
140
+ 'query': event['EventHeader']['QueryName'],
98
141
  'query_type_id': str(event['EventHeader']['QueryType']),
99
142
  'query_type': dns.TYPES_DICT[str(event['EventHeader']['QueryType'])],
143
+ 'result_ips': ','.join(addresses_ips),
144
+ 'result_cnames': ','.join(addresses_cnames),
145
+ 'status_id': status_id,
146
+ 'status': status,
100
147
  'pid': event['pid'],
101
148
  'name': event['name'],
102
149
  'cmdline': event['cmdline']
@@ -107,42 +154,6 @@ class DnsRequestResponseTrace:
107
154
  if event_dict['query_type'] in self.skip_record_list:
108
155
  return self.emit()
109
156
 
110
- # Defining list if ips and other answers, which aren't IPs.
111
- list_of_ips = list()
112
- list_of_other_domains = list()
113
- # Parse DNS results, only if 'QueryResults' key isn't empty, since many of the events are, mostly due errors.
114
- if event['EventHeader']['QueryResults']:
115
- # 'QueryResults' key contains a string with all the 'Answers' divided by type and ';' character.
116
- # Basically, we can parse each type out of string, but we need only IPs and other answers.
117
- list_of_parameters = event['EventHeader']['QueryResults'].split(';')
118
-
119
- # Iterating through all the parameters that we got from 'QueryResults' key.
120
- for parameter in list_of_parameters:
121
- # If 'type' string is present it means that entry is a domain;
122
- if 'type' in parameter:
123
- # Remove the 'type' string and get the domain name.
124
- current_iteration_parameter = parameter.rsplit(' ', maxsplit=1)[1]
125
- # Add the variable to the list of other answers.
126
- list_of_other_domains.append(current_iteration_parameter)
127
- # If 'type' string is not present it means that entry is an IP.
128
- else:
129
- # Sometimes the last parameter in the 'QueryResults' key after ';' character will be empty, skip it.
130
- if parameter:
131
- list_of_ips.append(parameter)
132
-
133
- event_dict['ips'] = list_of_ips
134
- event_dict['other_domains'] = list_of_other_domains
135
-
136
- # Getting the 'QueryStatus' key.
137
- event_dict['status_id'] = event['EventHeader']['QueryStatus']
138
-
139
- # Getting the 'QueryStatus' key. If DNS Query Status is '0' then it was executed successfully.
140
- # And if not, it means there was an error. The 'QueryStatus' indicate what number of an error it is.
141
- if event['EventHeader']['QueryStatus'] == '0':
142
- event_dict['status'] = 'Success'
143
- else:
144
- event_dict['status'] = 'Error'
145
-
146
157
  if self.attrs:
147
158
  event_dict = dicts.reorder_keys(
148
159
  event_dict, self.attrs, skip_keys_not_in_list=True)
@@ -25,7 +25,7 @@ class DnsCheck:
25
25
 
26
26
  self.fetch_engine: trace_dns.DnsRequestResponseTrace = (
27
27
  trace_dns.DnsRequestResponseTrace(
28
- attrs=['name', 'cmdline', 'domain', 'query_type'],
28
+ attrs=['name', 'cmdline', 'query', 'query_type'],
29
29
  session_name=self.etw_session_name,
30
30
  close_existing_session_name=True,
31
31
  skip_record_list=self.settings['skip_record_list'],
@@ -1,14 +1,19 @@
1
1
  import threading
2
2
  from pathlib import Path
3
3
  import time
4
+ import multiprocessing.managers
4
5
 
5
6
  from ..wrappers.pywin32w.win_event_log.subscribes import process_create, process_terminate
6
- from .. import get_process_list
7
+ from .. import get_process_list, get_process_name_cmd_dll
8
+ from ..print_api import print_api
7
9
 
8
10
 
9
11
  WAIT_BEFORE_PROCESS_TERMINATION_CHECK_SECONDS: float = 3
10
12
  WAIT_BEFORE_PROCESS_TERMINATION_CHECK_COUNTS: float = WAIT_BEFORE_PROCESS_TERMINATION_CHECK_SECONDS * 10
11
13
 
14
+ WAIT_FOR_PROCESS_POLLER_PID_SECONDS: int = 3
15
+ WAIT_FOR_PROCESS_POLLER_PID_COUNTS: int = WAIT_FOR_PROCESS_POLLER_PID_SECONDS * 10
16
+
12
17
 
13
18
  class SimpleProcessPool:
14
19
  """
@@ -22,14 +27,48 @@ class SimpleProcessPool:
22
27
 
23
28
  def __init__(
24
29
  self,
25
- wait_before_pid_remove_seconds: float = 5
30
+ wait_before_pid_remove_seconds: float = 5,
31
+ process_pool_shared_dict_proxy: multiprocessing.managers.DictProxy = None
26
32
  ):
27
33
  """
28
34
  :param wait_before_pid_remove_seconds: float, how many seconds to wait before the process is removed from
29
35
  the pool after process termination event is received for that pid.
36
+ :param process_pool_shared_dict_proxy: multiprocessing.managers.DictProxy, shared dict proxy to update
37
+ the process pool.
38
+ If you run a function from other multiprocessing.Process, you can pass the shared_dict_proxy to the function
39
+ and update the process pool from that function.
40
+
41
+ Example:
42
+ import multiprocessing.managers
43
+
44
+ manager: multiprocessing.managers.SyncManager = multiprocessing.Manager()
45
+ multiprocess_dict_proxy: multiprocessing.managers.DictProxy = manager.dict()
46
+
47
+ process_poller = SimpleProcessPool()
48
+ process_poller.start()
49
+
50
+ while True:
51
+ #============================
52
+ # your function where you get info with pid
53
+ # result = {
54
+ # 'pid': 1234,
55
+ # 'info': 'some info'
56
+ # }
57
+ #============================
58
+ info_with_process_details = {
59
+ 'pid': result['pid'],
60
+ 'info': result['info']
61
+ 'process_details': shared_dict_proxy[result['pid']]
62
+ }
63
+
64
+ break
65
+
66
+ process_poller.stop()
67
+ manager.shutdown()
30
68
  """
31
69
 
32
70
  self.wait_before_pid_remove_seconds: float = wait_before_pid_remove_seconds
71
+ self.process_pool_shared_dict_proxy: multiprocessing.managers.DictProxy = process_pool_shared_dict_proxy
33
72
 
34
73
  self._processes: dict = dict()
35
74
  self._running: bool = False
@@ -69,6 +108,11 @@ class SimpleProcessPool:
69
108
  'cmdline': command_line
70
109
  }
71
110
 
111
+ # Update the multiprocessing shared dict proxy.
112
+ if self.process_pool_shared_dict_proxy:
113
+ self.process_pool_shared_dict_proxy.clear()
114
+ self.process_pool_shared_dict_proxy.update(self._processes)
115
+
72
116
  # print_api(f'Process [{process_id}] added to the pool.', color='blue')
73
117
 
74
118
  def _thread_process_termination(self):
@@ -109,3 +153,56 @@ class SimpleProcessPool:
109
153
  time.sleep(self.wait_before_pid_remove_seconds)
110
154
  _ = self._processes.pop(process_id, None)
111
155
  # print_api(f'Process [{process_id}] removed from the pool.', color='yellow')
156
+
157
+
158
+ class PidProcessConverter:
159
+ """
160
+ This class is used to get the process details by PID from the process pool shared dict proxy.
161
+ """
162
+
163
+ def __init__(
164
+ self,
165
+ process_pool_shared_dict_proxy: multiprocessing.managers.DictProxy
166
+ ):
167
+ """
168
+ :param process_pool_shared_dict_proxy: multiprocessing.managers.DictProxy, multiprocessing shared dict proxy.
169
+ """
170
+
171
+ self.process_pool_shared_dict_proxy: multiprocessing.managers.DictProxy = process_pool_shared_dict_proxy
172
+
173
+ self.get_process_with_dll_instance = get_process_name_cmd_dll.ProcessNameCmdline(load_dll=True)
174
+
175
+ def get_process_by_pid(self, pid: int):
176
+ """
177
+ THIS FUNCTION WILL RUN OUTSIDE THE PROCESS POOL PROCESS. Inside the function that needs
178
+ the pid to process conversion.
179
+ Get the process details by PID from the process pool shared dict proxy.
180
+
181
+ :param pid: int, the process ID.
182
+ :return: dict, the process details.
183
+ """
184
+
185
+ counter = 0
186
+ process_dict: dict = dict()
187
+ while counter < WAIT_FOR_PROCESS_POLLER_PID_COUNTS:
188
+ if pid not in self.process_pool_shared_dict_proxy:
189
+ time.sleep(0.1)
190
+ counter += 1
191
+ else:
192
+ process_dict = self.process_pool_shared_dict_proxy[pid]
193
+ break
194
+
195
+ if counter == WAIT_FOR_PROCESS_POLLER_PID_COUNTS and not process_dict:
196
+ # Last resort, try to get the process name by current process snapshot.
197
+ processes = self.get_process_with_dll_instance.get_process_details(as_dict=True)
198
+ if pid not in processes:
199
+ print_api(f"Error: Couldn't get the process name for PID: {pid}.", color='red')
200
+ process_dict = {
201
+ 'name': pid,
202
+ 'cmdline': ''
203
+ }
204
+ else:
205
+ process_dict = processes[pid]
206
+
207
+ return process_dict
208
+
atomicshop/web.py CHANGED
@@ -153,6 +153,7 @@ def download(
153
153
  file_url: str,
154
154
  target_directory: str = None,
155
155
  file_name: str = None,
156
+ headers: dict = None,
156
157
  **kwargs
157
158
  ) -> str:
158
159
  """
@@ -163,6 +164,7 @@ def download(
163
164
  If not specified, temporary directory will be used.
164
165
  :param file_name: string, file name (example: file.zip) that you want the downloaded file to be saved as.
165
166
  If not specified, the default filename from 'file_url' will be used.
167
+ :param headers: dictionary, HTTP headers to use when downloading the file.
166
168
  :return: string, full file path of downloaded file. If download failed, 'None' will be returned.
167
169
  """
168
170
 
@@ -194,7 +196,10 @@ def download(
194
196
  # In order to use 'urllib.request', it is not enough to 'import urllib', you need to 'import urllib.request'.
195
197
  # Open the URL for data gathering with SSL context from certifi
196
198
  ssl_context = ssl.create_default_context(cafile=certifi.where())
197
- file_to_download = urllib.request.urlopen(file_url, context=ssl_context)
199
+
200
+ # Build a Request object with headers if provided.
201
+ req = urllib.request.Request(file_url, headers=headers or {})
202
+ file_to_download = urllib.request.urlopen(req, context=ssl_context)
198
203
 
199
204
  # Check status of url.
200
205
  if not is_status_ok(status_code=file_to_download.status, **kwargs):
@@ -225,6 +230,7 @@ def download(
225
230
  else:
226
231
  print_to_console()
227
232
  break
233
+
228
234
  if aggregated_bytes_int == file_size_bytes_int:
229
235
  print_api.print_api(f'Successfully Downloaded to: {file_path}', color="green", **kwargs)
230
236
  else:
@@ -236,8 +242,13 @@ def download(
236
242
 
237
243
 
238
244
  def download_and_extract_file(
239
- file_url: str, target_directory: str, file_name: str = str(), archive_remove_first_directory: bool = False,
240
- **kwargs):
245
+ file_url: str,
246
+ target_directory: str,
247
+ file_name: str = str(),
248
+ archive_remove_first_directory: bool = False,
249
+ headers: dict = None,
250
+ **kwargs
251
+ ):
241
252
  """
242
253
  This function will download the branch file from GitHub, extract the file and remove the file, leaving
243
254
  only the extracted folder.
@@ -248,12 +259,13 @@ def download_and_extract_file(
248
259
  :param target_directory: string, target directory where to save the file.
249
260
  :param archive_remove_first_directory: boolean, sets if archive extract function will extract the archive without
250
261
  first directory in the archive. Check reference in the 'extract_archive_with_zipfile' function.
262
+ :param headers: dictionary, HTTP headers to use when downloading the file.
251
263
  :return:
252
264
  """
253
265
 
254
266
  # Download the repo to current working directory and return full file path of downloaded file.
255
267
  file_path = download(
256
- file_url=file_url, target_directory=target_directory, file_name=file_name, **kwargs)
268
+ file_url=file_url, target_directory=target_directory, file_name=file_name, headers=headers, **kwargs)
257
269
 
258
270
  # Extract the archive and remove the first directory.
259
271
  zips.extract_archive_with_zipfile(
@@ -33,11 +33,21 @@ class GoogleLLM:
33
33
  os.environ["API_KEY"] = llm_api_key
34
34
  genai.configure(api_key=os.environ["API_KEY"])
35
35
 
36
- def get_current_models(self) -> list[str]:
37
- """ Function to get the current models available in the Gemini API """
38
- result_list: list[str] = []
36
+ def get_current_models(
37
+ self,
38
+ full_info: bool = False
39
+ ) -> list:
40
+ """
41
+ Function to get the current models available in the Gemini API
42
+
43
+ :param full_info: bool, if True, returns the full information about the models, otherwise only the names for API usage.
44
+ """
45
+ result_list: list = []
39
46
  for model in self.genai.list_models():
40
- result_list.append(model.name)
47
+ if full_info:
48
+ result_list.append(model)
49
+ else:
50
+ result_list.append(model.name)
41
51
 
42
52
  return result_list
43
53
 
@@ -57,7 +67,7 @@ class GoogleLLM:
57
67
  number_of_characters_per_link: int = 15000,
58
68
  temperature: float = 0,
59
69
  max_output_tokens: int = 4096,
60
- model_name: str = 'gemini-pro'
70
+ model_name: str = 'gemini-2.0-flash-thinking-exp-01-21'
61
71
  ) -> str:
62
72
  """
63
73
  Function to get the answer to a question by searching Google Custom Console API and processing the content using Gemini API.
@@ -3,7 +3,6 @@ import fnmatch
3
3
 
4
4
  from .. import web, urls
5
5
  from ..print_api import print_api
6
- from ..basics import strings
7
6
 
8
7
 
9
8
  class MoreThanOneReleaseFoundError(Exception):
@@ -14,13 +13,14 @@ class NoReleaseFoundError(Exception):
14
13
 
15
14
 
16
15
  class GitHubWrapper:
17
- # You also can use '.tar.gz' as extension.
18
16
  def __init__(
19
17
  self,
20
18
  user_name: str = None,
21
19
  repo_name: str = None,
22
20
  repo_url: str = None,
23
21
  branch: str = 'master',
22
+ path: str = None,
23
+ pat: str = None,
24
24
  branch_file_extension: str = '.zip'
25
25
  ):
26
26
  """
@@ -34,7 +34,11 @@ class GitHubWrapper:
34
34
  You can provide the full url to the repository directly and then extract the user_name and repo_name from it
35
35
  with the 'build_links_from_repo_url' function.
36
36
  :param branch: str, the branch name. The default is 'master'.
37
+ :param path: str, the path to the file/folder inside the repo that we'll do certain actions on.
38
+ Actions example: get_latest_commit_comment, download_path_from_branch.
39
+ :param pat: str, the personal access token to the repo.
37
40
  :param branch_file_extension: str, the branch file extension. The default is '.zip'.
41
+ You also can use '.tar.gz' as extension.
38
42
 
39
43
  ================================================================================================================
40
44
  Usage to download the 'master' branch file:
@@ -84,6 +88,8 @@ class GitHubWrapper:
84
88
  self.repo_name: str = repo_name
85
89
  self.repo_url: str = repo_url
86
90
  self.branch: str = branch
91
+ self.path: str = path
92
+ self.pat: str = pat
87
93
  self.branch_file_extension: str = branch_file_extension
88
94
 
89
95
  # Default variables.
@@ -94,7 +100,10 @@ class GitHubWrapper:
94
100
  # Initialize variables.
95
101
  self.branch_download_link: str = str()
96
102
  self.branch_downloaded_folder_name: str = str()
103
+ self.api_url: str = str()
97
104
  self.latest_release_json_url: str = str()
105
+ self.commits_url: str = str()
106
+ self.contents_url: str = str()
98
107
 
99
108
  if self.user_name and self.repo_name and not self.repo_url:
100
109
  self.build_links_from_user_and_repo()
@@ -102,21 +111,33 @@ class GitHubWrapper:
102
111
  if self.repo_url and not self.user_name and not self.repo_name:
103
112
  self.build_links_from_repo_url()
104
113
 
114
+ def _get_headers(self) -> dict:
115
+ """
116
+ Returns headers for the GitHub API requests. If a personal access token (PAT) is provided, it adds the
117
+ 'Authorization' header.
118
+ """
119
+ headers = {}
120
+ if self.pat:
121
+ headers['Authorization'] = f'token {self.pat}'
122
+ return headers
123
+
105
124
  def build_links_from_user_and_repo(self, **kwargs):
106
125
  if not self.user_name or not self.repo_name:
107
- message = "'user_name' or 'repo_name' is empty."
108
- print_api(message, color="red", error_type=True, **kwargs)
126
+ raise ValueError("'user_name' or 'repo_name' is empty.")
109
127
 
110
128
  self.repo_url = f'https://{self.domain}/{self.user_name}/{self.repo_name}'
111
129
  self.branch_download_link = f'{self.repo_url}/{self.archive_directory}/{self.branch_file_name}'
112
130
  self.branch_downloaded_folder_name = f'{self.repo_name}-{self.branch}'
113
- self.latest_release_json_url: str = \
114
- f'https://api.{self.domain}/repos/{self.user_name}/{self.repo_name}/releases/latest'
131
+
132
+ self.api_url = f'https://api.{self.domain}/repos/{self.user_name}/{self.repo_name}'
133
+
134
+ self.latest_release_json_url: str = f'{self.api_url}/releases/latest'
135
+ self.commits_url: str = f'{self.api_url}/commits'
136
+ self.contents_url: str = f'{self.api_url}/contents'
115
137
 
116
138
  def build_links_from_repo_url(self, **kwargs):
117
139
  if not self.repo_url:
118
- message = "'repo_url' is empty."
119
- print_api(message, color="red", error_type=True, **kwargs)
140
+ raise ValueError("'repo_url' is empty.")
120
141
 
121
142
  repo_url_parsed = urls.url_parser(self.repo_url)
122
143
  self.check_github_domain(repo_url_parsed['netloc'])
@@ -134,25 +155,70 @@ class GitHubWrapper:
134
155
  self,
135
156
  target_directory: str,
136
157
  archive_remove_first_directory: bool = False,
158
+ download_each_file: bool = False,
137
159
  **kwargs
138
160
  ):
139
161
  """
140
162
  This function will download the branch file from GitHub, extract the file and remove the file, leaving
141
163
  only the extracted folder.
164
+ If the 'path' was specified during the initialization of the class, only the path will be downloaded.
142
165
 
143
- :param target_directory:
144
- :param archive_remove_first_directory: boolean, sets if archive extract function will extract the archive
166
+ :param target_directory: str, the target directory to download the branch/path.
167
+ :param archive_remove_first_directory: boolean, available only if 'path' was not specified during the initialization
168
+ Sets if archive extract function will extract the archive
145
169
  without first directory in the archive. Check reference in the
146
170
  'archiver.zip.extract_archive_with_zipfile' function.
171
+ :param download_each_file: bool, available only if 'path' was specified during the initialization of the class.
172
+ Sets if each file will be downloaded separately.
173
+
174
+ True: Meaning the directory '/home/user/Downloads/files/' will be created and each file will be downloaded
175
+ ('file1.txt', 'file2.txt', 'file3.txt') separately to this directory.
176
+ False: The branch file will be downloaded to temp directory then the provided path
177
+ will be extracted from there, then the downloaded branch directory will be removed.
147
178
  :return:
148
179
  """
149
180
 
150
- # Download the repo to current working directory, extract and remove the archive.
151
- web.download_and_extract_file(
152
- file_url=self.branch_download_link,
153
- target_directory=target_directory,
154
- archive_remove_first_directory=archive_remove_first_directory,
155
- **kwargs)
181
+ headers: dict = self._get_headers()
182
+
183
+ if not download_each_file:
184
+ # Download the repo to current working directory, extract and remove the archive.
185
+ web.download_and_extract_file(
186
+ file_url=self.branch_download_link,
187
+ target_directory=target_directory,
188
+ archive_remove_first_directory=archive_remove_first_directory,
189
+ headers=headers,
190
+ **kwargs)
191
+ else:
192
+ # Build the URL for the contents API
193
+ contents_url = f"{self.contents_url}/{self.path}"
194
+ params = {'ref': self.branch}
195
+
196
+ response = requests.get(contents_url, headers=headers, params=params)
197
+ response.raise_for_status()
198
+
199
+ items = response.json()
200
+
201
+ # Ensure the target directory exists.
202
+ os.makedirs(target_directory, exist_ok=True)
203
+
204
+ for item in items:
205
+ item_path = os.path.join(target_directory, item['name'])
206
+ if item['type'] == 'file':
207
+ # Download the file using the provided download URL.
208
+ file_url = item['download_url']
209
+ # You can reuse your download function here, passing the headers.
210
+ download(
211
+ file_url=file_url,
212
+ target_directory=target_directory,
213
+ file_name=item['name'],
214
+ headers=headers
215
+ )
216
+ elif item['type'] == 'dir':
217
+ # Recursively download subdirectories.
218
+ self.download_folder_contents(
219
+ folder_path=os.path.join(folder_path, item['name']),
220
+ target_directory=item_path
221
+ )
156
222
 
157
223
  def get_latest_release_url(
158
224
  self,
@@ -212,10 +278,13 @@ class GitHubWrapper:
212
278
  :return:
213
279
  """
214
280
 
281
+ headers: dict = self._get_headers()
282
+
215
283
  # Get the latest release url.
216
284
  found_url = self.get_latest_release_url(string_pattern=string_pattern, exclude_string=exclude_string, **kwargs)
217
285
 
218
- downloaded_file_path = web.download(file_url=found_url, target_directory=target_directory, **kwargs)
286
+ downloaded_file_path = web.download(
287
+ file_url=found_url, target_directory=target_directory, headers=headers, **kwargs)
219
288
  return downloaded_file_path
220
289
 
221
290
  def download_and_extract_latest_release(
@@ -238,6 +307,8 @@ class GitHubWrapper:
238
307
  :return:
239
308
  """
240
309
 
310
+ headers: dict = self._get_headers()
311
+
241
312
  # Get the latest release url.
242
313
  found_url = self.get_latest_release_url(string_pattern=string_pattern, exclude_string=exclude_string, **kwargs)
243
314
 
@@ -245,6 +316,7 @@ class GitHubWrapper:
245
316
  file_url=found_url,
246
317
  target_directory=target_directory,
247
318
  archive_remove_first_directory=archive_remove_first_directory,
319
+ headers=headers,
248
320
  **kwargs)
249
321
 
250
322
  def get_the_latest_release_json(self):
@@ -252,7 +324,10 @@ class GitHubWrapper:
252
324
  This function will get the latest releases json.
253
325
  :return:
254
326
  """
255
- response = requests.get(self.latest_release_json_url)
327
+
328
+ headers: dict = self._get_headers()
329
+
330
+ response = requests.get(self.latest_release_json_url, headers=headers)
256
331
  return response.json()
257
332
 
258
333
  def get_the_latest_release_version_number(self):
@@ -261,3 +336,114 @@ class GitHubWrapper:
261
336
  :return:
262
337
  """
263
338
  return self.get_the_latest_release_json()['tag_name']
339
+
340
+ def get_latest_commit_comment(self):
341
+ """
342
+ This function retrieves the commit message (comment) of the latest commit on the specified branch.
343
+ It uses the GitHub API endpoint for commits.
344
+
345
+ :return: str, the commit message of the latest commit.
346
+ """
347
+
348
+ headers: dict = self._get_headers()
349
+
350
+ # Use query parameters to filter commits by branch (sha) and limit results to 1
351
+ params: dict = {
352
+ 'sha': self.branch,
353
+ 'per_page': 1
354
+ }
355
+
356
+ if self.path:
357
+ params['path'] = self.path
358
+
359
+ response = requests.get(self.commits_url, headers=headers, params=params)
360
+ response.raise_for_status() # Raises an HTTPError if the HTTP request returned an unsuccessful status code.
361
+
362
+ commits = response.json()
363
+ if not commits:
364
+ return None
365
+
366
+ commit_message = commits[0].get("commit", {}).get("message", "")
367
+ return commit_message
368
+
369
+
370
+ def parse_github_args():
371
+ import argparse
372
+
373
+ parser = argparse.ArgumentParser(description='GitHub Wrapper')
374
+ parser.add_argument(
375
+ '-u', '--repo_url', type=str, required=True,
376
+ help='The repository url. Example: https://github.com/{user_name}/{repo_name}')
377
+ parser.add_argument(
378
+ '-b', '--branch', type=str, required=True,
379
+ help='The branch name. The specific branch from the repo you want to operate on.')
380
+ parser.add_argument(
381
+ '-p', '--path', type=str, default=None,
382
+ help="The path to the file/folder inside the repo that we'll do certain actions on.\n"
383
+ "Available actions: get_latest_commit_comment, download_path_from_branch.")
384
+ parser.add_argument(
385
+ '-t', '--target_directory', type=str, default=None,
386
+ help='The target directory to download the file/folder.'
387
+ )
388
+ parser.add_argument(
389
+ '--pat', type=str, default=None,
390
+ help='The personal access token to the repo.')
391
+ parser.add_argument(
392
+ '-glcc', '--get_latest_commit_comment', action='store_true', default=False,
393
+ help='Sets if the latest commit comment will be printed.')
394
+ parser.add_argument(
395
+ '-db', '--download_branch', action='store_true', default=False,
396
+ help='Sets if the branch will be downloaded. In conjunction with path, only the path will be downloaded.')
397
+
398
+ return parser.parse_args()
399
+
400
+
401
+ def github_wrapper_main(
402
+ repo_url: str,
403
+ branch: str,
404
+ path: str = None,
405
+ target_directory: str = None,
406
+ pat: str = None,
407
+ get_latest_commit_comment: bool = False,
408
+ download_branch: bool = False
409
+ ):
410
+ """
411
+ This function is the main function for the GitHubWrapper class.
412
+ :param repo_url: str, the repository url.
413
+ Example: https://github.com/{user_name}/{repo_name}
414
+ :param branch: str, the branch name. The specific branch from the repo you want to operate on.
415
+ :param path: str, the path to the file/folder for which the commit message should be retrieved.
416
+ :param target_directory: str, the target directory to download the file/folder.
417
+ :param pat: str, the personal access token to the repo.
418
+ :param get_latest_commit_comment: bool, sets if the latest commit comment will be printed.
419
+ :param download_branch: bool, sets if the branch will be downloaded. In conjunction with path, only the path will be
420
+ downloaded.
421
+ :return:
422
+ """
423
+
424
+ git_wrapper = GitHubWrapper(repo_url=repo_url, branch=branch, path=path, pat=pat)
425
+
426
+ if get_latest_commit_comment:
427
+ commit_comment = git_wrapper.get_latest_commit_comment()
428
+ print_api(commit_comment)
429
+ return 0
430
+
431
+ if download_branch:
432
+ git_wrapper.download_and_extract_branch(
433
+ target_directory=target_directory, download_each_file=True, download_branch_and_extract=False,
434
+ archive_remove_first_directory=True)
435
+
436
+ return 0
437
+
438
+
439
+ def github_wrapper_main_with_args():
440
+ args = parse_github_args()
441
+
442
+ return github_wrapper_main(
443
+ repo_url=args.repo_url,
444
+ branch=args.branch,
445
+ path=args.path,
446
+ target_directory=args.target_directory,
447
+ pat=args.pat,
448
+ get_latest_commit_comment=args.get_latest_commit_comment
449
+ )
@@ -404,6 +404,33 @@ class MongoDBWrapper:
404
404
 
405
405
  return count
406
406
 
407
+ def aggregate_entries_in_collection(
408
+ self,
409
+ collection_name: str,
410
+ pipeline: list[dict]
411
+ ) -> list[dict]:
412
+ """
413
+ Aggregate entries in a MongoDB collection by query.
414
+
415
+ :param collection_name: str, the name of the collection.
416
+ :param pipeline: list of dictionaries, the pipeline to search for.
417
+ Example, search for all entries with column name 'name' equal to 'John':
418
+ pipeline = [{'$match': {'name': 'John'}}]
419
+ Example, return all entries from collection:
420
+ pipeline = []
421
+
422
+ :return: list of dictionaries, the list of entries that match the query.
423
+ """
424
+
425
+ self.connect()
426
+
427
+ aggregation: list[dict] = aggregate_entries_in_collection(
428
+ database=self.db, collection_name=collection_name,
429
+ pipeline=pipeline, mongo_client=self.client, close_client=False)
430
+
431
+ return aggregation
432
+
433
+
407
434
  def get_client(self):
408
435
  return self.client
409
436
 
@@ -1148,6 +1175,60 @@ def count_entries_in_collection(
1148
1175
  return count
1149
1176
 
1150
1177
 
1178
+ def aggregate_entries_in_collection(
1179
+ database: Union[str, pymongo.database.Database],
1180
+ collection_name: str,
1181
+ pipeline: list,
1182
+ mongo_client: pymongo.MongoClient = None,
1183
+ close_client: bool = False
1184
+ ) -> list:
1185
+ """
1186
+ Perform an aggregation pipeline operation on a MongoDB collection.
1187
+ For example, we count the number of entries with the same 'sha256' value that is provided in a list:
1188
+ pipeline = [
1189
+ {"$match": {"sha256": {"$in": ["hash1", "hash2"]}}},
1190
+ {"$group": {"_id": "$sha256", "count": {"$sum": 1}}}
1191
+ ]
1192
+ And we will get the result:
1193
+ [
1194
+ {"_id": "hash1", "count": 1},
1195
+ {"_id": "hash2", "count": 1}
1196
+ ]
1197
+ Meaning we will get separate counts for each 'sha256' value in the list.
1198
+
1199
+ :param database: String or the database object.
1200
+ str - the name of the database. In this case the database object will be created.
1201
+ pymongo.database.Database - the database object that will be used instead of creating a new one.
1202
+ :param collection_name: str, the name of the collection.
1203
+ :param pipeline: list, the aggregation pipeline to execute.
1204
+ Example:
1205
+ pipeline = [
1206
+ {"$match": {"sha256": {"$in": ["hash1", "hash2"]}}},
1207
+ {"$group": {"_id": "$sha256", "count": {"$sum": 1}}}
1208
+ ]
1209
+ :param mongo_client: pymongo.MongoClient, the connection object.
1210
+ If None, a new connection will be created to default URI.
1211
+ :param close_client: bool, if True, the connection will be closed after the operation.
1212
+
1213
+ :return: list, the results of the aggregation pipeline.
1214
+ """
1215
+ if not mongo_client:
1216
+ mongo_client = connect()
1217
+ close_client = True
1218
+
1219
+ db = _get_pymongo_db_from_string_or_pymongo_db(database, mongo_client)
1220
+ collection = db[collection_name]
1221
+
1222
+ # Perform aggregation
1223
+ results = collection.aggregate(pipeline)
1224
+
1225
+ if close_client:
1226
+ mongo_client.close()
1227
+
1228
+ # Return the results as a list
1229
+ return list(results)
1230
+
1231
+
1151
1232
  def delete_all_entries_from_collection(
1152
1233
  database: Union[str, pymongo.database.Database],
1153
1234
  collection_name: str,
@@ -69,7 +69,7 @@ def enable_audit_process_creation(print_kwargs: dict = None):
69
69
  :param print_kwargs: Optional keyword arguments for the print function.
70
70
  """
71
71
  if is_audit_process_creation_enabled():
72
- print_api("Audit Process Creation is already enabled.", color='yellow', **(print_kwargs or {}))
72
+ print_api("Audit Process Creation is already enabled.", color='blue', **(print_kwargs or {}))
73
73
  return
74
74
 
75
75
  # Enable "Audit Process Creation" policy
@@ -124,7 +124,7 @@ def enable_command_line_auditing(print_kwargs: dict = None):
124
124
 
125
125
  if is_command_line_auditing_enabled():
126
126
  print_api(
127
- "'Include command line in process creation events' is already enabled.", color='yellow',
127
+ "[Include command line in process creation events] is already enabled.", color='blue',
128
128
  **(print_kwargs or {}))
129
129
  return
130
130
 
@@ -135,11 +135,11 @@ def enable_command_line_auditing(print_kwargs: dict = None):
135
135
  winreg.SetValueEx(reg_key, PROCESS_CREATION_INCLUDE_CMDLINE_VALUE, 0, winreg.REG_DWORD, 1)
136
136
 
137
137
  print_api(
138
- "Successfully enabled 'Include command line in process creation events'.",
138
+ "Successfully enabled [Include command line in process creation events].",
139
139
  color='green', **(print_kwargs or {}))
140
140
  except WindowsError as e:
141
141
  print_api(
142
- f"Failed to enable 'Include command line in process creation events': {e}", error_type=True,
142
+ f"Failed to enable [Include command line in process creation events]: {e}", error_type=True,
143
143
  color='red', **(print_kwargs or {}))
144
144
 
145
145
 
@@ -58,7 +58,7 @@ def enable_audit_process_termination(print_kwargs: dict = None):
58
58
  :param print_kwargs: Optional keyword arguments for the print function.
59
59
  """
60
60
  if is_audit_process_termination_enabled():
61
- print_api("Audit Process Termination is already enabled.", color='yellow', **(print_kwargs or {}))
61
+ print_api("Audit Process Termination is already enabled.", color='blue', **(print_kwargs or {}))
62
62
  return
63
63
 
64
64
  audit_policy_command = [
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: atomicshop
3
- Version: 2.19.7
3
+ Version: 2.19.9
4
4
  Summary: Atomic functions and classes to make developer life easier
5
5
  Author: Denis Kras
6
6
  License: MIT License
@@ -1,4 +1,4 @@
1
- atomicshop/__init__.py,sha256=mP4RKVYs6CFRxy4SuFUKdwycVZS11TlGgfMVUl8GYy4,123
1
+ atomicshop/__init__.py,sha256=miL9aLeTH24dv_rrq3_DNotYp3c4Vv78Z3CWjZQyXrA,123
2
2
  atomicshop/_basics_temp.py,sha256=6cu2dd6r2dLrd1BRNcVDKTHlsHs_26Gpw8QS6v32lQ0,3699
3
3
  atomicshop/_create_pdf_demo.py,sha256=Yi-PGZuMg0RKvQmLqVeLIZYadqEZwUm-4A9JxBl_vYA,3713
4
4
  atomicshop/_patch_import.py,sha256=ENp55sKVJ0e6-4lBvZnpz9PQCt3Otbur7F6aXDlyje4,6334
@@ -45,7 +45,7 @@ atomicshop/urls.py,sha256=aJ0NGS9qqaKeqjkkWBs80jaBBg6MYBiPuLIyPGxscVc,1557
45
45
  atomicshop/uuids.py,sha256=JSQdm3ZTJiwPQ1gYe6kU0TKS_7suwVrHc8JZDGYlydM,2214
46
46
  atomicshop/venvs.py,sha256=D9lwOoObkYoRx-weuoAmbvN-RdSHhVm4DE9TVl-utAs,903
47
47
  atomicshop/virtualization.py,sha256=LPP4vjE0Vr10R6DA4lqhfX_WaNdDGRAZUW0Am6VeGco,494
48
- atomicshop/web.py,sha256=GLdTXgMxg1_0UQaXC4bOvARVyuFg7SPIeJdsCHV8rNE,11662
48
+ atomicshop/web.py,sha256=No3hExQis7cmMay0GdHULG67RMLd5txg-3a-C7CWMd0,12052
49
49
  atomicshop/websocket_parse.py,sha256=aLHWyKqaYqEn_MRBWm2L6rIl6QPmqbVrjEXE_rBzwCw,16711
50
50
  atomicshop/a_installs/ubuntu/docker_rootless.py,sha256=9IPNtGZYjfy1_n6ZRt7gWz9KZgR6XCgevjqq02xk-o0,281
51
51
  atomicshop/a_installs/ubuntu/docker_sudo.py,sha256=JzayxeyKDtiuT4Icp2L2LyFRbx4wvpyN_bHLfZ-yX5E,281
@@ -57,9 +57,10 @@ atomicshop/a_installs/win/fibratus.py,sha256=TU4e9gdZ_zI73C40uueJ59pD3qmN-UFGdX5
57
57
  atomicshop/a_installs/win/mongodb.py,sha256=AqyItXu19aaoe49pppDxtEkXey6PMy0PoT2Y_RmPpPE,179
58
58
  atomicshop/a_installs/win/nodejs.py,sha256=U519Dyt4bsQPbEg_PwnZL5tsbfqDr1BbhxwoQFZsSKo,200
59
59
  atomicshop/a_installs/win/pycharm.py,sha256=j_RSd7aDOyC3yDd-_GUTMLlQTmDrqtVFG--oUfGLiZk,140
60
- atomicshop/a_installs/win/robocorp.py,sha256=tCUrBHFynAZK81To8vRBvchOwY6BWc4LhBgTxXb0az4,2132
60
+ atomicshop/a_installs/win/robocorp.py,sha256=2E28iaRlAZROoxmXwiXv8rqTjVcdBT2UJ3B8nxrtmkc,2245
61
61
  atomicshop/a_installs/win/wsl_ubuntu_lts.py,sha256=dZbPRLNKFeMd6MotjkE6UDY9cOiIaaclIdR1kGYWI50,139
62
62
  atomicshop/a_mains/dns_gateway_setting.py,sha256=ncc2rFQCChxlNP59UshwmTonLqC6MWblrVAzbbz-13M,149
63
+ atomicshop/a_mains/github_wrapper.py,sha256=F-PoZknVCxWPN0PTO6l7ZNiaYvo7OVFKFI_zlPt56ps,169
63
64
  atomicshop/a_mains/msi_unpacker.py,sha256=5hrkqETYt9HIqR_3PMf32_q06kCrIcsdm_RJV9oY438,188
64
65
  atomicshop/a_mains/search_for_hyperlinks_in_docx.py,sha256=HkIdo_Sz9nPbbbJf1mwfwFkyI7vkvpH8qiIkuYopN4w,529
65
66
  atomicshop/a_mains/FACT/factw_fact_extractor_docker_image_main_sudo.py,sha256=DDKX3Wp2SmzMCEtCIEOUbEKMob2ZQ7VEQGLEf9uYXrs,320
@@ -112,12 +113,13 @@ atomicshop/basics/threads.py,sha256=xvgdDJdmgN0wmmARoZ-H7Kvl1GOcEbvgaeGL4M3Hcx8,
112
113
  atomicshop/basics/timeit_template.py,sha256=fYLrk-X_dhdVtnPU22tarrhhvlggeW6FdKCXM8zkX68,405
113
114
  atomicshop/basics/tracebacks.py,sha256=pMdnTUTYKdY9SVtMPNBUKL_4uSKulc54NNQjOIHjKxE,506
114
115
  atomicshop/etws/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
116
+ atomicshop/etws/_pywintrace_fix.py,sha256=nHrtnAb796eOZ6FlCqcsuRh_TSqSPp6JXLN6TBIz-Us,418
115
117
  atomicshop/etws/const.py,sha256=v3x_IdCYeSKbCGywiZFOZln80ldpwKW5nuMDuUe51Jg,1257
116
118
  atomicshop/etws/providers.py,sha256=CXNx8pYdjtpLIpA66IwrnE64XhY4U5ExnFBMLEb8Uzk,547
117
119
  atomicshop/etws/sessions.py,sha256=b_KeiOvgOBJezJokN81TRlrvJiQNJlIWN4Z6UVjuxP0,1335
118
- atomicshop/etws/trace.py,sha256=QcB_NYP-KPOaafYd6jCFdPPQsBu4jzac4QicJdWJAoE,7359
120
+ atomicshop/etws/trace.py,sha256=LC5b_WGwk5LTVggnQ55F4Iklis7B7hVFWusOt1jEHnI,8068
119
121
  atomicshop/etws/traces/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
120
- atomicshop/etws/traces/trace_dns.py,sha256=WvOZm7KNdP4r6ofkZhUGi9WjtYlkV3mUp_yxita3Qg4,6399
122
+ atomicshop/etws/traces/trace_dns.py,sha256=0a8zoq59NyJqy6Z6lf1qkvxrHcrCSnyYEAfLfBdJIuI,6880
121
123
  atomicshop/etws/traces/trace_sysmon_process_creation.py,sha256=OM-bkK38uYMwWLZKNOTDa0Xdk3sO6sqsxoMUIiPvm5g,4656
122
124
  atomicshop/file_io/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
123
125
  atomicshop/file_io/csvs.py,sha256=zv0kKjRT-ZWRi0WpMIUQ_FKyP9Dt0f5Bc98Qsj6ClPU,9495
@@ -155,7 +157,7 @@ atomicshop/mitm/statistic_analyzer_helper/moving_average_helper.py,sha256=UnnY_F
155
157
  atomicshop/monitor/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
156
158
  atomicshop/monitor/change_monitor.py,sha256=K5NlVp99XIDDPnQQMdru4BDmua_DtcDIhVAzkTOvD5s,7673
157
159
  atomicshop/monitor/checks/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
158
- atomicshop/monitor/checks/dns.py,sha256=GHBQ2GEPaU3hAmK6l2vM2PKTcBNzDDF9ThZixqnl9Qk,7108
160
+ atomicshop/monitor/checks/dns.py,sha256=714NedT1ULW7DFk_LM5S4VXYALurKybWXKhVOiEzZjs,7107
159
161
  atomicshop/monitor/checks/file.py,sha256=2tIDSlX2KZNc_9i9ji1tcOqupbFTIOj7cKXLyBEDWMk,3263
160
162
  atomicshop/monitor/checks/network.py,sha256=CGZWl4WlQrxayZeVF9JspJXwYA-zWx8ECWTVGSlXc98,3825
161
163
  atomicshop/monitor/checks/process_running.py,sha256=x66wd6-l466r8sbRQaIli0yswyGt1dH2DVXkGDL6O0Q,1891
@@ -166,7 +168,7 @@ atomicshop/permissions/ubuntu_permissions.py,sha256=n8z1vcIXDts4zLVue33dtJiTopjg
166
168
  atomicshop/permissions/win_permissions.py,sha256=eDQm1jfK9x_hkbLqIJjFTwfqinAWQ0iSr0kW3XrF1BE,1272
167
169
  atomicshop/process_poller/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
168
170
  atomicshop/process_poller/process_pool.py,sha256=4Qs427qd7OcBxu5PMFU5PTmyuxRy0vgj2GLsRt0IoEw,9565
169
- atomicshop/process_poller/simple_process_pool.py,sha256=zHSxNlY2RTIXdqIyq8pg6kbXrTK7nqrz8iVWvyGWIE8,4505
171
+ atomicshop/process_poller/simple_process_pool.py,sha256=tbeuw30MvNFbore1YooEra1ozZvFR8maKbcGNlBpBKc,8484
170
172
  atomicshop/process_poller/tracer_base.py,sha256=IOiHcnmF-MccOSCErixN5mve9RifZ9cPnGVHCIRchrs,1091
171
173
  atomicshop/process_poller/pollers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
172
174
  atomicshop/process_poller/pollers/psutil_pywin32wmi_dll.py,sha256=XRRfOIy62iOYU8IKRcyECWiL0rqQ35DeYbPsv_SHDVM,4510
@@ -181,7 +183,7 @@ atomicshop/startup/win/startup_folder.py,sha256=2RZEyF-Mf8eWPlt_-OaoGKKnMs6YhELE
181
183
  atomicshop/startup/win/task_scheduler.py,sha256=qALe-8sfthYxsdCViH2r8OsH3x-WauDqteg5RzElPdk,4348
182
184
  atomicshop/web_apis/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
183
185
  atomicshop/web_apis/google_custom_search.py,sha256=R1BnUmBFWZIWkfizSRWoSYoZTdPEjLJ28F_sS2g1jGQ,1558
184
- atomicshop/web_apis/google_llm.py,sha256=X_sG3leUvskPCPryN6YszDFih_X2Ne0OSMA3UbDMKIg,6741
186
+ atomicshop/web_apis/google_llm.py,sha256=JcWkorK0Fu5C3SkZLSzI0TO4CwDscjFsWu1bDhfGYww,7056
185
187
  atomicshop/wrappers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
186
188
  atomicshop/wrappers/_process_wrapper_curl.py,sha256=XkZZXYl7D0Q6UfdWqy-18AvpU0yVp9i2BVD2qRcXlkk,841
187
189
  atomicshop/wrappers/_process_wrapper_tar.py,sha256=WUMZFKNrlG4nJP9tWZ51W7BR1j_pIjsjgyAStmWjRGs,655
@@ -189,7 +191,7 @@ atomicshop/wrappers/astw.py,sha256=VkYfkfyc_PJLIOxByT6L7B8uUmKY6-I8XGZl4t_z828,4
189
191
  atomicshop/wrappers/configparserw.py,sha256=JwDTPjZoSrv44YKwIRcjyUnpN-FjgXVfMqMK_tJuSgU,22800
190
192
  atomicshop/wrappers/cryptographyw.py,sha256=LfzTnwvJE03G6WZryOOf43VKhhnyMakzHpn8DPPCoy4,13252
191
193
  atomicshop/wrappers/ffmpegw.py,sha256=wcq0ZnAe0yajBOuTKZCCaKI7CDBjkq7FAgdW5IsKcVE,6031
192
- atomicshop/wrappers/githubw.py,sha256=Ft-QQ4sewzjEWEiW89A_nv9wcKVaQdq76TWvVS6r9mI,12576
194
+ atomicshop/wrappers/githubw.py,sha256=razdRRkO-f00-E0cZs7LE8qoLd-Mm8hLOWlqKR3Ng0U,20343
193
195
  atomicshop/wrappers/msiw.py,sha256=GQLqud72nfex3kvO1bJSruNriCYTYX1_G1gSf1MPkIA,6118
194
196
  atomicshop/wrappers/numpyw.py,sha256=sBV4gSKyr23kXTalqAb1oqttzE_2XxBooCui66jbAqc,1025
195
197
  atomicshop/wrappers/olefilew.py,sha256=biD5m58rogifCYmYhJBrAFb9O_Bn_spLek_9HofLeYE,2051
@@ -266,7 +268,7 @@ atomicshop/wrappers/mongodbw/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NM
266
268
  atomicshop/wrappers/mongodbw/install_mongodb_ubuntu.py,sha256=2eEOb35T259lhn5koynfTIm1hanxD02zN97ExGSBM2o,4021
267
269
  atomicshop/wrappers/mongodbw/install_mongodb_win.py,sha256=64EUQYx7VuMC3ndO2x3nSErh5NZ_BsqMwGvPcybfC-Q,8499
268
270
  atomicshop/wrappers/mongodbw/mongo_infra.py,sha256=IjEF0jPzQz866MpTm7rnksnyyWQeUT_B2h2DA9ryAio,2034
269
- atomicshop/wrappers/mongodbw/mongodbw.py,sha256=ih3Gd45rg_70y4sGeu0eEJ3sJd9tEN4I5IqHZelRZJw,52854
271
+ atomicshop/wrappers/mongodbw/mongodbw.py,sha256=it1TDnOF64YgDbkkBvUmUb9XGuUg6SwGnHhuqar3aHE,55929
270
272
  atomicshop/wrappers/nodejsw/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
271
273
  atomicshop/wrappers/nodejsw/install_nodejs_ubuntu.py,sha256=wjpJdfAaY92RYl_L9esDIWuBMGeYH35RHJ5BVgMof8Y,6260
272
274
  atomicshop/wrappers/nodejsw/install_nodejs_windows.py,sha256=WvXIcEVnKcQYD-KNwhVP094s__1tt0Ir2Y87MABl8Nc,6283
@@ -299,8 +301,8 @@ atomicshop/wrappers/pywin32w/winshell.py,sha256=i2bKiMldPU7_azsD5xGQDdMwjaM7suKJ
299
301
  atomicshop/wrappers/pywin32w/win_event_log/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
300
302
  atomicshop/wrappers/pywin32w/win_event_log/subscribe.py,sha256=FYo2X0Xm3lb3GIdIt_8usoj7JPSDWj0iwsIJ4OwZLQM,8156
301
303
  atomicshop/wrappers/pywin32w/win_event_log/subscribes/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
302
- atomicshop/wrappers/pywin32w/win_event_log/subscribes/process_create.py,sha256=1PrPiDiuiVfzfzN5BUuxMfUoCgGW7RGgH6HVrjpTnQc,6064
303
- atomicshop/wrappers/pywin32w/win_event_log/subscribes/process_terminate.py,sha256=0k09fiAwKDJO404bjxUWSSSLOiNANl-VTJDD_YLq-I8,3763
304
+ atomicshop/wrappers/pywin32w/win_event_log/subscribes/process_create.py,sha256=IuXABBz60iGCLbvYkeTyn8ZzCiBn9OvyfJdO4q5LLnQ,6060
305
+ atomicshop/wrappers/pywin32w/win_event_log/subscribes/process_terminate.py,sha256=OJFWywGGGkBHq1N0MKGtHSFFQMFQSDVU6FXCRIdssg8,3761
304
306
  atomicshop/wrappers/pywin32w/win_event_log/subscribes/schannel_logging.py,sha256=8nxIcNcbeEuvoBwhujgh7-oIpL9A6J-gg1NM8hOGAVA,3442
305
307
  atomicshop/wrappers/pywin32w/wmis/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
306
308
  atomicshop/wrappers/pywin32w/wmis/helpers.py,sha256=uMXa27UfBpqXInvnmk7CZlqwRI2pg_I_HXelxO9nLLg,5020
@@ -325,8 +327,8 @@ atomicshop/wrappers/socketw/statistics_csv.py,sha256=fgMzDXI0cybwUEqAxprRmY3lqbh
325
327
  atomicshop/wrappers/winregw/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
326
328
  atomicshop/wrappers/winregw/winreg_installed_software.py,sha256=Qzmyktvob1qp6Tjk2DjLfAqr_yXV0sgWzdMW_9kwNjY,2345
327
329
  atomicshop/wrappers/winregw/winreg_network.py,sha256=AENV88H1qDidrcpyM9OwEZxX5svfi-Jb4N6FkS1xtqA,8851
328
- atomicshop-2.19.7.dist-info/LICENSE.txt,sha256=lLU7EYycfYcK2NR_1gfnhnRC8b8ccOTElACYplgZN88,1094
329
- atomicshop-2.19.7.dist-info/METADATA,sha256=eWuLCGUrAwVSQ15odcFHEUvTyhT1m9roQxiCYJk1_t8,10630
330
- atomicshop-2.19.7.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92
331
- atomicshop-2.19.7.dist-info/top_level.txt,sha256=EgKJB-7xcrAPeqTRF2laD_Np2gNGYkJkd4OyXqpJphA,11
332
- atomicshop-2.19.7.dist-info/RECORD,,
330
+ atomicshop-2.19.9.dist-info/LICENSE.txt,sha256=lLU7EYycfYcK2NR_1gfnhnRC8b8ccOTElACYplgZN88,1094
331
+ atomicshop-2.19.9.dist-info/METADATA,sha256=Q4n9PKQsRSE0ite3_TvSdyFuO7agblLmWIMekjlvmx4,10630
332
+ atomicshop-2.19.9.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92
333
+ atomicshop-2.19.9.dist-info/top_level.txt,sha256=EgKJB-7xcrAPeqTRF2laD_Np2gNGYkJkd4OyXqpJphA,11
334
+ atomicshop-2.19.9.dist-info/RECORD,,