atomicshop 2.12.1__py3-none-any.whl → 2.12.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of atomicshop might be problematic. Click here for more details.

atomicshop/__init__.py CHANGED
@@ -1,4 +1,4 @@
1
1
  """Atomic Basic functions and classes to make developer life easier"""
2
2
 
3
3
  __author__ = "Den Kras"
4
- __version__ = '2.12.1'
4
+ __version__ = '2.12.4'
@@ -1,3 +1,6 @@
1
+ import copy
2
+
3
+
1
4
  def remove_duplicates(list_instance: list):
2
5
  # One of the fastest methods.
3
6
  seen = set()
@@ -109,3 +112,14 @@ def get_the_most_frequent_element_from_list(list_instance: list[str]) -> str:
109
112
  """
110
113
 
111
114
  return max(set(list_instance), key=list_instance.count)
115
+
116
+
117
+ def copy_list_of_mutable_objects(list_instance: list) -> list:
118
+ """
119
+ This function will copy the list of mutable objects. Meaning that all the mutable objects inside will be copied
120
+ as well.
121
+ :param list_instance: list.
122
+ :return: list.
123
+ """
124
+
125
+ return copy.deepcopy(list_instance)
atomicshop/datetimes.py CHANGED
@@ -2,6 +2,7 @@ import datetime
2
2
  from datetime import timedelta
3
3
  import time
4
4
  import random
5
+ import re
5
6
 
6
7
 
7
8
  class MonthToNumber:
@@ -46,6 +47,29 @@ class MonthToNumber:
46
47
  'דצמבר': '12'}
47
48
 
48
49
 
50
+ def get_datetime_from_complex_string_by_pattern(complex_string: str, date_pattern: str):
51
+ """
52
+ Function will get datetime object from a complex string by pattern.
53
+
54
+ :param complex_string: string that contains date and time.
55
+ :param date_pattern: pattern that will be used to extract date and time from the string.
56
+ :return: datetime object.
57
+ """
58
+
59
+ # Convert the date pattern to regex pattern
60
+ regex_pattern = re.sub(r'%[a-zA-Z]', r'\\d+', date_pattern)
61
+
62
+ # Find the date part in the file name using the regex pattern
63
+ date_str = re.search(regex_pattern, complex_string)
64
+
65
+ if date_str:
66
+ # Convert the date string to a datetime object based on the given pattern
67
+ date_obj = datetime.datetime.strptime(date_str.group(), date_pattern)
68
+ return date_obj
69
+ else:
70
+ raise ValueError("No valid date found in the string")
71
+
72
+
49
73
  def convert_single_digit_to_zero_padded(string: str):
50
74
  """
51
75
  Function will check if string is a single character digit and will add zero in front of it.
atomicshop/diff_check.py CHANGED
@@ -1,9 +1,11 @@
1
+ import datetime
1
2
  from pathlib import Path
2
- from typing import Union
3
+ from typing import Union, Literal
4
+ import json
3
5
 
4
6
  from .file_io import file_io, jsons
5
7
  from .print_api import print_api
6
- from .basics import list_of_dicts
8
+ from .basics import list_of_dicts, dicts
7
9
 
8
10
 
9
11
  class DiffChecker:
@@ -28,7 +30,8 @@ class DiffChecker:
28
30
  aggregation: bool = False,
29
31
  input_file_path: str = None,
30
32
  input_file_write_only: bool = True,
31
- return_first_cycle: bool = True
33
+ return_first_cycle: bool = True,
34
+ operation_type: Literal['new_objects', 'hit_statistics', 'all_objects', 'single_object'] = None
32
35
  ):
33
36
  """
34
37
  :param check_object: any, object to check if it changed.
@@ -38,7 +41,7 @@ class DiffChecker:
38
41
  The object is 'None' by default, since there are objects that are needed to be provided in the
39
42
  function input for that object. So, not always you know what your object type during class initialization.
40
43
  :param check_object_display_name: string, name of the object to display in the message.
41
- If not specified, the 'check_object' will be displayed.
44
+ If not specified, the provided 'check_object' will be displayed.
42
45
  :param aggregation: boolean, if True, the object will be aggregated with other objects in the list of objects.
43
46
  Meaning, that the object will be checked against the existing objects in the list, and if it is not
44
47
  in the list, it will be added to the list. If it is in the list, it will be ignored.
@@ -62,18 +65,98 @@ class DiffChecker:
62
65
 
63
66
  True: return updated dictionary on first cycle. This is the default.
64
67
  False: don't return updated dictionary on first cycle.
68
+ :param operation_type: string, type of operation to perform. The type must be one of the following:
69
+ 'new_objects': will only store the new objects in the input file.
70
+ 'hit_statistics': will only store the statistics of the entries in the input file.
71
+ 'all_objects': disable the DiffChecker features, meaning any new entries will be emitted as is.
72
+ 'single_object': will store the object as is, without any comparison. Meaning, that the object will be
73
+ compared only to itself, and if it changes, it will be updated.
74
+ None: Nothing will be done, you will get an exception.
75
+
76
+ --------------------------------------------------
77
+
78
+ Working example:
79
+ from atomicshop import diff_check
80
+
81
+
82
+ # Example of checking list of dicts.
83
+ check_list_of_dicts = [
84
+ {'name': 'John', 'age': 25},
85
+ {'name': 'Alice', 'age': 30}
86
+ ]
87
+
88
+ diff_checker = diff_check.DiffChecker(
89
+ check_object=check_list_of_dicts,
90
+ check_object_display_name='List of Dicts',
91
+ operation_type='new_objects'
92
+ input_file_path='D:\\input\\list_of_dicts.json',
93
+ input_file_write_only=True,
94
+ return_first_cycle=True
95
+ )
96
+
97
+ result, message = diff_checker.check_list_of_dicts(
98
+ sort_by_keys=['name']
99
+ )
100
+
101
+ # If result is not None, it means that the object was updated.
102
+ if result:
103
+ print(message)
104
+
105
+ --------------------------------------------------
106
+
107
+ Working example when you need to aggregate a list of dicts, meaning only new entries will be added to the list:
108
+ from atomicshop import diff_check
109
+
110
+
111
+ diff_checker = diff_check.DiffChecker(
112
+ check_object_display_name='List of Dicts',
113
+ input_file_path='D:\\input\\list_of_dicts.json',
114
+ input_file_write_only=True,
115
+ return_first_cycle=True,
116
+ operation_type='new_objects'
117
+ )
118
+
119
+ # Example of checking list of dicts.
120
+ check_list_of_dicts = [
121
+ {'name': 'John', 'age': 25},
122
+ {'name': 'Alice', 'age': 30}
123
+ ]
124
+
125
+ diff_checker.check_object = check_list_of_dicts
126
+ result, message = diff_checker.check_list_of_dicts()
127
+
128
+ # If result is not None, it means that the object was updated.
129
+ if result:
130
+ print(message)
131
+
132
+
133
+ check_list_of_dicts = [
134
+ {'name': 'John', 'age': 25},
135
+ {'name': 'Jessie', 'age': 50}
136
+ ]
137
+
138
+ diff_checker.check_object = check_list_of_dicts
139
+ result, message = diff_checker.check_list_of_dicts()
140
+
141
+ if result:
142
+ print(message)
65
143
  """
66
144
 
67
145
  # 'check_object' can be none, so checking if it not equals empty string.
68
146
  if check_object == "":
69
147
  raise ValueError("[check_object] option can't be empty string.")
70
148
 
149
+ if operation_type and operation_type not in ['new_objects', 'hit_statistics', 'all_objects', 'single_object']:
150
+ raise ValueError(f"[operation_type] must be one of the following: "
151
+ f"'new_objects', 'hit_statistics', 'all_objects', 'single_object'.")
152
+
71
153
  self.check_object = check_object
72
154
  self.check_object_display_name = check_object_display_name
73
155
  self.aggregation: bool = aggregation
74
156
  self.input_file_path: str = input_file_path
75
157
  self.input_file_write_only: bool = input_file_write_only
76
158
  self.return_first_cycle: bool = return_first_cycle
159
+ self.operation_type = operation_type
77
160
 
78
161
  if not self.check_object_display_name:
79
162
  self.check_object_display_name = self.check_object
@@ -119,6 +202,11 @@ class DiffChecker:
119
202
  return self._handle_input_file(sort_by_keys, print_kwargs=print_kwargs)
120
203
 
121
204
  def _handle_input_file(self, sort_by_keys=None, print_kwargs: dict = None):
205
+ # This point is the first one that is shared between the processing functions, so now we can check
206
+ # if the 'operation_type' is set.
207
+ if not self.operation_type:
208
+ raise ValueError("[operation_type] must be specified.")
209
+
122
210
  # If 'input_file_path' was specified, this means that the input file will be created for storing
123
211
  # content of the function to compare.
124
212
  if self.input_file_path:
@@ -130,14 +218,15 @@ class DiffChecker:
130
218
  try:
131
219
  if self.save_as == 'txt':
132
220
  self.previous_content = file_io.read_file(
133
- self.input_file_path, stderr=False, **print_kwargs)
221
+ self.input_file_path, stderr=False, **(print_kwargs or {}))
134
222
  elif self.save_as == 'json':
135
223
  self.previous_content = jsons.read_json_file(
136
- self.input_file_path, stderr=False, **print_kwargs)
224
+ self.input_file_path, stderr=False, **(print_kwargs or {}))
137
225
  except FileNotFoundError as except_object:
138
226
  message = f"Input File [{Path(except_object.filename).name}] doesn't exist - Will create new one."
139
- print_api(message, color='yellow', **print_kwargs)
140
- pass
227
+ print_api(message, color='yellow', **(print_kwargs or {}))
228
+ if not self.input_file_write_only:
229
+ self.previous_content = list()
141
230
 
142
231
  # get the content of current function.
143
232
  if isinstance(self.check_object, list):
@@ -149,10 +238,80 @@ class DiffChecker:
149
238
  result = None
150
239
  message = f'First Cycle on Object: {self.check_object_display_name}'
151
240
 
152
- if self.aggregation:
153
- return self._aggregation_handling(current_content, result, message, sort_by_keys=sort_by_keys, print_kwargs=print_kwargs)
241
+ if self.operation_type == 'all_objects':
242
+ return self._no_diffcheck_handling(
243
+ current_content, result, message, print_kwargs=print_kwargs)
244
+
245
+ if self.operation_type == 'hit_statistics':
246
+ return self._hit_statistics_only_handling(
247
+ current_content, result, message, sort_by_keys, print_kwargs=print_kwargs)
248
+
249
+ if self.operation_type == 'new_objects':
250
+ return self._aggregation_handling(
251
+ current_content, result, message, sort_by_keys=sort_by_keys, print_kwargs=print_kwargs)
252
+
253
+ if self.operation_type == 'single_object':
254
+ return self._singular_object_handling(current_content, result, message, print_kwargs=print_kwargs)
255
+
256
+ def _no_diffcheck_handling(self, current_content, result, message, print_kwargs: dict = None):
257
+ # if not self.previous_content:
258
+ # self.previous_content = []
259
+
260
+ self.previous_content.append(f"{datetime.datetime.now()},{current_content}")
261
+
262
+ result = {
263
+ 'object': self.check_object_display_name,
264
+ 'entry': current_content
265
+ }
266
+
267
+ message = f"Object: {result['object']} | Entry: {result['entry']}"
268
+
269
+ # If 'input_file_path' was specified by the user, it means that we will use the input file to save
270
+ # our known content there for next iterations to compare.
271
+ if self.input_file_path:
272
+ if self.save_as == 'txt':
273
+ # noinspection PyTypeChecker
274
+ file_io.write_file(self.previous_content, self.input_file_path, **(print_kwargs or {}))
275
+ elif self.save_as == 'json':
276
+ jsons.write_json_file(
277
+ self.previous_content, self.input_file_path, use_default_indent=True, **(print_kwargs or {}))
278
+
279
+ return result, message
280
+
281
+ def _hit_statistics_only_handling(self, current_content, result, message, sort_by_keys, print_kwargs: dict = None):
282
+ # Convert the dictionary entry to string, since we will use it as a key in the dictionary.
283
+ current_entry = json.dumps(current_content[0])
284
+
285
+ if not self.previous_content:
286
+ self.previous_content = {}
287
+
288
+ if not self.previous_content.get(current_entry):
289
+ self.previous_content[current_entry] = 1
154
290
  else:
155
- return self._non_aggregation_handling(current_content, result, message, print_kwargs=print_kwargs)
291
+ self.previous_content[current_entry] += 1
292
+
293
+ result = {
294
+ 'object': self.check_object_display_name,
295
+ 'entry': current_entry,
296
+ 'count': self.previous_content[current_entry]
297
+ }
298
+
299
+ message = f"Object: {result['object']} | Entry: {result['entry']} | Count: {result['count']}"
300
+
301
+ # Sort the dictionary by count of entries.
302
+ self.previous_content = dicts.sort_by_values(self.previous_content, reverse=True)
303
+
304
+ # If 'input_file_path' was specified by the user, it means that we will use the input file to save
305
+ # our known content there for next iterations to compare.
306
+ if self.input_file_path:
307
+ if self.save_as == 'txt':
308
+ # noinspection PyTypeChecker
309
+ file_io.write_file(self.previous_content, self.input_file_path, **(print_kwargs or {}))
310
+ elif self.save_as == 'json':
311
+ jsons.write_json_file(
312
+ self.previous_content, self.input_file_path, use_default_indent=True, **(print_kwargs or {}))
313
+
314
+ return result, message
156
315
 
157
316
  def _aggregation_handling(self, current_content, result, message, sort_by_keys, print_kwargs: dict = None):
158
317
  if current_content[0] not in self.previous_content:
@@ -182,16 +341,16 @@ class DiffChecker:
182
341
  if self.input_file_path:
183
342
  if self.save_as == 'txt':
184
343
  # noinspection PyTypeChecker
185
- file_io.write_file(self.previous_content, self.input_file_path, **print_kwargs)
344
+ file_io.write_file(self.previous_content, self.input_file_path, **(print_kwargs or {}))
186
345
  elif self.save_as == 'json':
187
346
  jsons.write_json_file(
188
- self.previous_content, self.input_file_path, use_default_indent=True, **print_kwargs)
347
+ self.previous_content, self.input_file_path, use_default_indent=True, **(print_kwargs or {}))
189
348
  else:
190
349
  message = f"Object didn't change: {self.check_object_display_name}"
191
350
 
192
351
  return result, message
193
352
 
194
- def _non_aggregation_handling(self, current_content, result, message, print_kwargs):
353
+ def _singular_object_handling(self, current_content, result, message, print_kwargs):
195
354
  if self.previous_content != current_content:
196
355
  # If known content is not empty (if it is, it means it is the first iteration, and we don't have the input
197
356
  # file, so we don't need to update the 'result', since there is nothing to compare yet).
@@ -14,6 +14,17 @@ class DnsTrace:
14
14
  every 100 ms. Since the DNS events doesn't contain the process name and command line, only PID.
15
15
  Then DNS events will be enriched with the process name and command line from the process poller.
16
16
  :param attrs: List of attributes to return. If None, all attributes will be returned.
17
+
18
+ Usage Example:
19
+ from atomicshop.etw import dns_trace
20
+
21
+
22
+ dns_trace_w = dns_trace.DnsTrace(enable_process_poller=True, attrs=['pid', 'name', 'cmdline', 'domain', 'query_type'])
23
+ dns_trace_w.start()
24
+ while True:
25
+ dns_dict = dns_trace_w.emit()
26
+ print(dns_dict)
27
+ dns_trace_w.stop()
17
28
  """
18
29
 
19
30
  self.enable_process_poller = enable_process_poller
@@ -84,15 +84,24 @@ def read_csv_to_list_of_lists(
84
84
  csv_reader = csv.reader(file_object)
85
85
 
86
86
  csv_list = list(csv_reader)
87
- header = csv_list[0]
88
87
 
89
- if exclude_header_from_content:
88
+ # Get the header if there is only something in the content.
89
+ if csv_list:
90
+ header = csv_list[0]
91
+ else:
92
+ header = []
93
+
94
+ if exclude_header_from_content and csv_list:
90
95
  csv_list.pop(0)
91
96
 
92
97
  return csv_list, header
93
98
 
94
99
 
95
- def write_list_to_csv(file_path: str, content_list: list, mode: str = 'w') -> None:
100
+ def write_list_to_csv(
101
+ file_path: str,
102
+ content_list: list,
103
+ mode: str = 'w'
104
+ ) -> None:
96
105
  """
97
106
  Function to write list object that each iteration of it contains dict object with same keys and different values.
98
107
 
@@ -103,15 +112,23 @@ def write_list_to_csv(file_path: str, content_list: list, mode: str = 'w') -> No
103
112
  """
104
113
 
105
114
  with open(file_path, mode=mode) as csv_file:
106
- # Create header from keys of the first dictionary in list.
107
- header = content_list[0].keys()
108
- # Create CSV writer.
109
- writer = csv.DictWriter(csv_file, fieldnames=header, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
110
-
111
- # Write header.
112
- writer.writeheader()
113
- # Write list of dits as rows.
114
- writer.writerows(content_list)
115
+ if len(content_list) > 0 and isinstance(content_list[0], dict):
116
+ # Treat the list as list of dictionaries.
117
+ header = content_list[0].keys()
118
+
119
+ # Create CSV writer.
120
+ writer = csv.DictWriter(csv_file, fieldnames=header, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
121
+
122
+ # Write header.
123
+ writer.writeheader()
124
+ # Write list of dits as rows.
125
+ writer.writerows(content_list)
126
+ # Else, treat the list as list of lists.
127
+ else:
128
+ # Create CSV writer.
129
+ writer = csv.writer(csv_file)
130
+ # Write list of lists as rows.
131
+ writer.writerows(content_list)
115
132
 
116
133
 
117
134
  def get_header(file_path: str, print_kwargs: dict = None) -> list:
@@ -156,7 +156,7 @@ def analyze(main_file_path: str):
156
156
  # Get the content from statistics files.
157
157
  statistics_content: list = reading.get_logs(
158
158
  config['statistic_files_path'],
159
- pattern='statistics*.csv',
159
+ file_name_pattern='statistics*.csv',
160
160
  log_type='csv',
161
161
  )
162
162
 
@@ -1,3 +1,5 @@
1
+ from typing import Literal, Union
2
+
1
3
  from .checks import dns, network, hash, process_running
2
4
  from .. import filesystem, scheduling
3
5
  from ..diff_check import DiffChecker
@@ -9,18 +11,34 @@ class ChangeMonitor:
9
11
  """
10
12
  def __init__(
11
13
  self,
12
- object_type: str,
14
+ object_type: Union[
15
+ Literal[
16
+ 'file',
17
+ 'dns',
18
+ 'network',
19
+ 'process_running',
20
+ 'url_urllib',
21
+ 'url_playwright_html',
22
+ 'url_playwright_pdf',
23
+ 'url_playwright_png',
24
+ 'url_playwright_jpeg'],
25
+ None] = None,
13
26
  check_object_list: list = None,
14
27
  input_file_directory: str = None,
15
28
  input_file_name: str = None,
16
29
  generate_input_file_name: bool = False,
17
30
  input_file_write_only: bool = True,
18
31
  store_original_object: bool = False,
32
+ operation_type: Literal['hit_statistics', 'all_objects'] = None
19
33
  ):
20
34
  """
21
35
  :param object_type: string, type of object to check. The type must be one of the following:
22
- 'dns': 'check_object_list' will be none, since the DNS events will be queried from the system.
23
36
  'file': 'check_object_list' must contain strings of full path to the file.
37
+ 'dns': 'check_object_list' will be none, since the DNS events will be queried from the system.
38
+ 'network': 'check_object_list' will be none, since the network events will be queried from the system.
39
+ 'process_running': 'check_object_list' must contain strings of process names to check if they are running.
40
+ Example: ['chrome.exe', 'firefox.exe']
41
+ No file is written.
24
42
  'url_urllib': 'check_object_list' must contain strings of full URL to a web page. The page will be
25
43
  downloaded using 'urllib' library in HTML.
26
44
  'url_playwright_html': 'check_object_list' must contain strings of full URL to a web page. The page will
@@ -52,6 +70,10 @@ class ChangeMonitor:
52
70
  from the memory.
53
71
  :param store_original_object: boolean, if True, the original object will be stored on the disk inside
54
72
  'Original' folder, inside 'input_file_directory'.
73
+ :param operation_type: string, type of operation to perform. The type must be one of the following:
74
+ 'hit_statistics': will only store the statistics of the entries in the input file.
75
+ 'all_objects': disable the DiffChecker features, meaning any new entries will be emitted as is.
76
+ None: will use the default operation type, based on the object type.
55
77
 
56
78
  If 'input_file_directory' is not specified, the 'input_file_name' is not specified, and
57
79
  'generate_input_file_name' is False, then the input file will not be used and the object will be stored
@@ -72,19 +94,25 @@ class ChangeMonitor:
72
94
  raise ValueError(
73
95
  'ERROR: [input_file_name] and [generate_input_file_name] cannot be both specified and True.')
74
96
 
97
+ if operation_type:
98
+ if operation_type not in ['hit_statistics', 'all_objects']:
99
+ raise ValueError(
100
+ 'ERROR: [operation_type] must be one of the following: "hit_statistics", "all_objects".')
101
+
75
102
  # === EOF Exception section ========================================
76
103
  # === Initialize Main variables ====================================
77
104
 
78
105
  if not check_object_list:
79
106
  check_object_list = list()
80
107
 
81
- self.object_type: str = object_type
108
+ self.object_type = object_type
82
109
  self.check_object_list: list = check_object_list
83
110
  self.input_file_directory: str = input_file_directory
84
111
  self.input_file_name: str = input_file_name
85
112
  self.generate_input_file_name: bool = generate_input_file_name
86
113
  self.input_file_write_only: bool = input_file_write_only
87
114
  self.store_original_object: bool = store_original_object
115
+ self.operation_type = operation_type
88
116
 
89
117
  # === EOF Initialize Main variables ================================
90
118
  # === Initialize Secondary variables ===============================
@@ -98,6 +126,7 @@ class ChangeMonitor:
98
126
  self.diff_check_list.append(
99
127
  DiffChecker(
100
128
  input_file_write_only=self.input_file_write_only,
129
+ operation_type=self.operation_type
101
130
  )
102
131
  )
103
132
  # Else, if 'check_object_list' is None, create a DiffChecker object only once.
@@ -105,6 +134,7 @@ class ChangeMonitor:
105
134
  self.diff_check_list.append(
106
135
  DiffChecker(
107
136
  input_file_write_only=self.input_file_write_only,
137
+ operation_type=self.operation_type
108
138
  )
109
139
  )
110
140
 
@@ -26,7 +26,9 @@ def _execute_cycle(change_monitor_instance, print_kwargs: dict = None):
26
26
 
27
27
  # Change settings for the DiffChecker object.
28
28
  change_monitor_instance.diff_check_list[0].return_first_cycle = True
29
- change_monitor_instance.diff_check_list[0].aggregation = True
29
+ # 'operation_type' is None.
30
+ if not change_monitor_instance.operation_type:
31
+ change_monitor_instance.diff_check_list[0].operation_type = 'new_objects'
30
32
 
31
33
  if change_monitor_instance.generate_input_file_name:
32
34
  original_name = 'known_domains'
@@ -48,6 +50,17 @@ def _execute_cycle(change_monitor_instance, print_kwargs: dict = None):
48
50
  # will return a dict with current DNS trace event.
49
51
  event_dict = change_monitor_instance.fetch_engine.emit()
50
52
 
53
+ # If 'disable_diff_check' is True, we'll return the event_dict as is.
54
+ # if change_monitor_instance.disable_diff_check:
55
+ # return_list.append(event_dict)
56
+ #
57
+ # message = \
58
+ # (f"Current domain: {event_dict['name']} | {event_dict['domain']} | {event_dict['query_type']} | "
59
+ # f"{event_dict['cmdline']}")
60
+ # print_api(message, color='yellow', **print_kwargs)
61
+ #
62
+ # return return_list
63
+ # else:
51
64
  change_monitor_instance.diff_check_list[0].check_object = [event_dict]
52
65
 
53
66
  # if event_dict not in change_monitor_instance.diff_check_list[0].check_object:
@@ -62,14 +75,27 @@ def _execute_cycle(change_monitor_instance, print_kwargs: dict = None):
62
75
  sort_by_keys=['cmdline', 'name'], print_kwargs=print_kwargs)
63
76
 
64
77
  if result:
65
- # Get list of new connections only.
66
- # new_connections_only: list = list_of_dicts.get_difference(result['old'], result['updated'])
67
-
68
- for connection in result['updated']:
69
- message = \
70
- f"New domain: {connection['name']} | " \
71
- f"{connection['domain']} | {connection['query_type']} | " \
72
- f"{connection['cmdline']}"
78
+ # Check if 'updated' key is in the result. THis means that this is a regular cycle.
79
+ if 'updated' in result:
80
+ # Get list of new connections only.
81
+ # new_connections_only: list = list_of_dicts.get_difference(result['old'], result['updated'])
82
+
83
+ for connection in result['updated']:
84
+ message = \
85
+ f"New domain: {connection['name']} | " \
86
+ f"{connection['domain']} | {connection['query_type']} | " \
87
+ f"{connection['cmdline']}"
88
+ print_api(message, color='yellow', **print_kwargs)
89
+
90
+ return_list.append(message)
91
+ # Check if 'count' key is in the result. This means that this a statistics cycle.
92
+ elif 'count' in result:
93
+ message = f"Current domain: {result['entry']} | Times hit: {result['count']}"
94
+ print_api(message, color='yellow', **print_kwargs)
95
+
96
+ return_list.append(message)
97
+ elif 'count' not in result and 'entry' in result:
98
+ message = f"Current domain: {result['entry']}"
73
99
  print_api(message, color='yellow', **print_kwargs)
74
100
 
75
101
  return_list.append(message)
@@ -28,6 +28,8 @@ def _execute_cycle(change_monitor_instance, print_kwargs: dict = None):
28
28
  # Set the input file path.
29
29
  change_monitor_instance._set_input_file_path(check_object_index=check_object_index)
30
30
 
31
+ change_monitor_instance.diff_check_list[check_object_index].operation_type = 'single_object'
32
+
31
33
  # Check if the object was updated.
32
34
  result, message = change_monitor_instance.diff_check_list[check_object_index].check_string(
33
35
  print_kwargs=print_kwargs)
@@ -70,6 +70,8 @@ def _get_list(change_monitor_instance):
70
70
  # Set the 'check_object' to empty list, since we will append the list of DNS events.
71
71
  change_monitor_instance.diff_check_list[0].check_object = list()
72
72
 
73
+ change_monitor_instance.diff_check_list[0].operation_type = 'single_object'
74
+
73
75
  # Get all connections (list of dicts), including process name and cmdline.
74
76
  connections_list_of_dicts: list = \
75
77
  change_monitor_instance.fetch_engine.get_connections_with_process_as_list_of_dicts(
atomicshop/permissions.py CHANGED
@@ -54,6 +54,18 @@ def set_executable_permission(file_path: str):
54
54
  os.chmod(file_path, os.stat(file_path).st_mode | stat.S_IXUSR)
55
55
 
56
56
 
57
+ def change_file_owner_ubuntu(file_path: str, username: str):
58
+ """
59
+ Function changes the owner of the file to the specified user.
60
+ :param file_path: str, path to the file.
61
+ :param username: str, username of the new owner.
62
+ :return:
63
+ """
64
+
65
+ uid = pwd.getpwnam(username).pw_uid
66
+ os.chown(file_path, uid, -1)
67
+
68
+
57
69
  def is_executable_permission(file_path: str) -> bool:
58
70
  """
59
71
  Function checks if the file has the executable permission.
@@ -1,15 +1,19 @@
1
1
  import os
2
- from typing import Literal
2
+ from typing import Literal, Union
3
3
  from pathlib import Path
4
4
 
5
5
  from ... import filesystem, datetimes
6
6
  from ...file_io import csvs
7
7
 
8
8
 
9
+ READING_EXISTING_LINES: list = []
10
+
11
+
9
12
  def get_logs_paths(
10
13
  log_files_directory_path: str = None,
11
14
  log_file_path: str = None,
12
- pattern: str = '*.*',
15
+ file_name_pattern: str = '*.*',
16
+ date_pattern: str = None,
13
17
  log_type: Literal['csv'] = 'csv',
14
18
  latest_only: bool = False,
15
19
  previous_day_only: bool = False
@@ -18,20 +22,23 @@ def get_logs_paths(
18
22
  This function gets the logs file paths from the directory. Supports rotating files to get the logs by time.
19
23
 
20
24
  :param log_files_directory_path: Path to the log files. If specified, the function will get all the files from the
21
- directory by the 'pattern'.
25
+ directory by the 'file_name_pattern'.
22
26
  :param log_file_path: Path to the log file. If specified, the function will get the file and all the rotated logs
23
- associated with this file. The 'pattern' will become the file name using the file name and extension.
27
+ associated with this file. The 'file_name_pattern' will become the file name using the file name and extension.
24
28
 
25
29
  Example:
26
30
  log_file_path = 'C:/logs/test_log.csv'
27
31
 
28
32
  # The function will get all the files that start with 'test_log' and have '.csv' extension:
29
- pattern = 'test_log*.csv'
33
+ file_name_pattern = 'test_log*.csv'
30
34
 
31
35
  # The 'log_files_directory_path' will also be taken from the 'log_file_path':
32
36
  log_files_directory_path = 'C:/logs'
33
- :param pattern: Pattern to match the log files names.
34
- Default pattern will match all the files.
37
+ :param file_name_pattern: Pattern to match the log files names.
38
+ Default file_name_pattern will match all the files.
39
+ :param date_pattern: Pattern to match the date in the log file name.
40
+ If specified, the function will get the log file by the date pattern.
41
+ If not specified, the function will get the file date by file last modified time.
35
42
  :param log_type: Type of log to get.
36
43
  :param latest_only: Boolean, if True, only the latest log file path will be returned.
37
44
  :param previous_day_only: Boolean, if True, only the log file path from the previous day will be returned.
@@ -48,20 +55,51 @@ def get_logs_paths(
48
55
  if latest_only and previous_day_only:
49
56
  raise ValueError('Both "latest_only" and "previous_day_only" cannot be True at the same time.')
50
57
 
51
- # If log file path is specified, get the pattern from the file name.
58
+ # If log file path is specified, get the file_name_pattern from the file name.
52
59
  if log_file_path:
53
- # Build the pattern.
60
+ # Build the file_name_pattern.
54
61
  log_file_name: str = Path(log_file_path).stem
55
62
  log_file_extension: str = Path(log_file_path).suffix
56
- pattern = f'{log_file_name}*{log_file_extension}'
63
+ file_name_pattern = f'{log_file_name}*{log_file_extension}'
57
64
 
58
65
  # Get the directory path from the file path.
59
66
  log_files_directory_path = Path(log_file_path).parent
60
67
 
61
- # Get all the log file paths by the pattern.
68
+ # Get all the log file paths by the file_name_pattern.
62
69
  logs_files: list = filesystem.get_file_paths_from_directory(
63
- log_files_directory_path, file_name_check_pattern=pattern,
64
- add_last_modified_time=True, sort_by_last_modified_time=True)
70
+ log_files_directory_path,
71
+ file_name_check_pattern=file_name_pattern,
72
+ add_last_modified_time=True,
73
+ sort_by_last_modified_time=True)
74
+
75
+ if date_pattern:
76
+ latest_timestamp: float = 0
77
+ for file_index, single_file in enumerate(logs_files):
78
+ # Get file name from current loop file path.
79
+ current_file_name: str = Path(single_file['file_path']).name
80
+ # Get the datetime object from the file name by the date pattern.
81
+ try:
82
+ datetime_object = datetimes.get_datetime_from_complex_string_by_pattern(current_file_name, date_pattern)
83
+ timestamp_float = datetime_object.timestamp()
84
+ # ValueError will be raised if the date pattern does not match the file name.
85
+ except ValueError:
86
+ timestamp_float = 0
87
+ # Update the last modified time to the dictionary.
88
+ logs_files[file_index]['last_modified'] = timestamp_float
89
+
90
+ if timestamp_float > latest_timestamp:
91
+ latest_timestamp = timestamp_float
92
+
93
+ # Now, there should be a file that doesn't have the string date pattern in the file name.
94
+ # We will add one day to the latest date that we found and assign to that file path.
95
+ for file_index, single_file in enumerate(logs_files):
96
+ if single_file['last_modified'] == 0:
97
+ latest_timestamp += 86400
98
+ logs_files[file_index]['last_modified'] = latest_timestamp
99
+ break
100
+
101
+ # Sort the files by the last modified time.
102
+ logs_files = sorted(logs_files, key=lambda x: x['last_modified'], reverse=False)
65
103
 
66
104
  if latest_only:
67
105
  logs_files = [logs_files[-1]]
@@ -79,7 +117,8 @@ def get_logs_paths(
79
117
  def get_logs(
80
118
  log_files_directory_path: str = None,
81
119
  log_file_path: str = None,
82
- pattern: str = '*.*',
120
+ file_name_pattern: str = '*.*',
121
+ date_pattern: str = None,
83
122
  log_type: Literal['csv'] = 'csv',
84
123
  header_type_of_files: Literal['first', 'all'] = 'first',
85
124
  remove_logs: bool = False,
@@ -91,8 +130,11 @@ def get_logs(
91
130
 
92
131
  :param log_files_directory_path: Path to the log files. Check the 'get_logs_paths' function for more details.
93
132
  :param log_file_path: Path to the log file. Check the 'get_logs_paths' function for more details.
94
- :param pattern: Pattern to match the log files names.
95
- Default pattern will match all the files.
133
+ :param file_name_pattern: Pattern to match the log files names.
134
+ Default file_name_pattern will match all the files.
135
+ :param date_pattern: Pattern to match the date in the log file name.
136
+ If specified, the function will get the log file by the date pattern.
137
+ If not specified, the function will get the file date by file last modified time.
96
138
  :param log_type: Type of log to get.
97
139
  :param header_type_of_files: Type of header to get from the files.
98
140
  'first' - Only the first file has a header for CSV. This header will be used for the rest of the files.
@@ -112,10 +154,13 @@ def get_logs(
112
154
  if header_type_of_files not in ['first', 'all']:
113
155
  raise ValueError('Only "first" and "all" header types are supported.')
114
156
 
115
- # Get all the log file paths by the pattern.
157
+ # Get all the log file paths by the file_name_pattern.
116
158
  logs_files: list = get_logs_paths(
117
- log_files_directory_path=log_files_directory_path, log_file_path=log_file_path,
118
- pattern=pattern, log_type=log_type)
159
+ log_files_directory_path=log_files_directory_path,
160
+ log_file_path=log_file_path,
161
+ file_name_pattern=file_name_pattern,
162
+ date_pattern=date_pattern,
163
+ log_type=log_type)
119
164
 
120
165
  # Read all the logs.
121
166
  logs_content: list = list()
@@ -128,7 +173,8 @@ def get_logs(
128
173
  elif header_type_of_files == 'first':
129
174
  # The function gets empty header to read it from the CSV file, the returns the header that it read.
130
175
  # Then each time the header is fed once again to the function.
131
- csv_content, header = csvs.read_csv_to_list_of_dicts_by_header(single_file['file_path'], header=header, **print_kwargs)
176
+ csv_content, header = csvs.read_csv_to_list_of_dicts_by_header(
177
+ single_file['file_path'], header=header, **print_kwargs)
132
178
  # Any way the first file will be read with header.
133
179
  logs_content.extend(csv_content)
134
180
 
@@ -157,3 +203,96 @@ def get_logs(
157
203
  filesystem.move_file(single_file['file_path'], move_to_path_with_file)
158
204
 
159
205
  return logs_content
206
+
207
+
208
+ def get_latest_lines(
209
+ log_file_path: str,
210
+ date_pattern: str = None,
211
+ log_type: Literal['csv'] = 'csv',
212
+ get_previous_file: bool = False
213
+ ) -> tuple:
214
+ """
215
+ This function gets the latest lines from the log file.
216
+
217
+ :param log_file_path: Path to the log file.
218
+ :param date_pattern: Pattern to match the date in the log file name.
219
+ If specified, the function will get the log file by the date pattern.
220
+ If not specified, the function will get the file date by file last modified time.
221
+ :param log_type: Type of log to get.
222
+ :param get_previous_file: Boolean, if True, the function will get the previous log file.
223
+ For example, your log is set to rotate every Midnight.
224
+ Meaning, once the day will change, the function will get the log file from the previous day in the third entry
225
+ of the return tuple. This happens only once each 24 hours. Not from the time the function was called, but from
226
+ the time the day changed.
227
+ return: List of new lines.
228
+
229
+ Usage:
230
+ while True:
231
+ latest_lines, current_lines, existing_lines, last_24_hours_lines = get_latest_log_lines(
232
+ log_file_path='/path/to/log.csv',
233
+ log_type='csv'
234
+ )
235
+
236
+ if latest_lines:
237
+ # Do something with the new lines.
238
+
239
+ if last_24_hours_lines:
240
+ # Do something with the last 24 hours lines. Reminder, this will happen once a day on log rotation.
241
+
242
+ time.sleep(1)
243
+ """
244
+
245
+ if log_type != 'csv':
246
+ raise ValueError('Only "csv" log type is supported.')
247
+
248
+ previous_file_lines: list = []
249
+
250
+ # Get the latest statistics file path.
251
+ latest_statistics_file_path: str = get_logs_paths(
252
+ log_file_path=log_file_path,
253
+ date_pattern=date_pattern,
254
+ log_type='csv',
255
+ latest_only=True
256
+ )[0]['file_path']
257
+
258
+ # Get the previous day statistics file path.
259
+ previous_day_statistics_file_path: Union[str, None] = None
260
+ try:
261
+ previous_day_statistics_file_path = get_logs_paths(
262
+ log_file_path=log_file_path,
263
+ date_pattern=date_pattern,
264
+ log_type='csv',
265
+ previous_day_only=True
266
+ )[0]['file_path']
267
+ except KeyError:
268
+ pass
269
+
270
+ current_lines, _ = csvs.read_csv_to_list_of_dicts_by_header(latest_statistics_file_path, stdout=False)
271
+ if len(current_lines) > len(READING_EXISTING_LINES):
272
+ # return current_lines
273
+ pass
274
+ elif len(current_lines) == len(READING_EXISTING_LINES):
275
+ # return None
276
+ pass
277
+ elif len(current_lines) < len(READING_EXISTING_LINES):
278
+ current_lines, _ = csvs.read_csv_to_list_of_dicts_by_header(
279
+ previous_day_statistics_file_path, stdout=False)
280
+ # Handle case where source CSV is empty (rotation period)
281
+ READING_EXISTING_LINES.clear() # Clear existing lines to start fresh after rotation
282
+
283
+ if get_previous_file:
284
+ previous_file_lines = current_lines
285
+
286
+ # return current_lines
287
+
288
+ new_lines: list = []
289
+ if current_lines:
290
+ for row in current_lines:
291
+ # If the row is not in the existing lines, then add it to the new lines.
292
+ if row not in READING_EXISTING_LINES:
293
+ new_lines.append(row)
294
+
295
+ if new_lines:
296
+ READING_EXISTING_LINES.extend(new_lines)
297
+
298
+ return new_lines, current_lines, READING_EXISTING_LINES, previous_file_lines
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: atomicshop
3
- Version: 2.12.1
3
+ Version: 2.12.4
4
4
  Summary: Atomic functions and classes to make developer life easier
5
5
  Author: Denis Kras
6
6
  License: MIT License
@@ -1,4 +1,4 @@
1
- atomicshop/__init__.py,sha256=EvosCo6MVhZ-HXgV5FtdTdD7o9rDV_EL4_xqhrsoJaw,123
1
+ atomicshop/__init__.py,sha256=A6TgdSzIf8j0mMvGKZ3xk-S6OpZAVrTeT1tDyVDSnUA,123
2
2
  atomicshop/_basics_temp.py,sha256=6cu2dd6r2dLrd1BRNcVDKTHlsHs_26Gpw8QS6v32lQ0,3699
3
3
  atomicshop/_create_pdf_demo.py,sha256=Yi-PGZuMg0RKvQmLqVeLIZYadqEZwUm-4A9JxBl_vYA,3713
4
4
  atomicshop/_patch_import.py,sha256=ENp55sKVJ0e6-4lBvZnpz9PQCt3Otbur7F6aXDlyje4,6334
@@ -8,8 +8,8 @@ atomicshop/command_line_processing.py,sha256=u5yT9Ger_cu7ni5ID0VFlRbVD46ARHeNC9t
8
8
  atomicshop/config_init.py,sha256=z2RXD_mw9nQlAOpuGry1h9QT-2LhNscXgGAktN3dCVQ,2497
9
9
  atomicshop/console_output.py,sha256=AOSJjrRryE97PAGtgDL03IBtWSi02aNol8noDnW3k6M,4667
10
10
  atomicshop/console_user_response.py,sha256=31HIy9QGXa7f-GVR8MzJauQ79E_ZqAeagF3Ks4GGdDU,3234
11
- atomicshop/datetimes.py,sha256=ICr0_gQqWnIw4BuNtabrHzjSlwnZkBfhyCrOILs5xpU,14623
12
- atomicshop/diff_check.py,sha256=RON9cSTgy3jAnwUmAUkOyfF6bgrBKOq9Sbgyl3RYodw,12350
11
+ atomicshop/datetimes.py,sha256=olsL01S5tkXk4WPzucxujqgLOh198BLgJntDnGYukRU,15533
12
+ atomicshop/diff_check.py,sha256=U7eshTajImlsL6aB0O2yiRsGCQCwumiCvGoFcJ2iAfs,19226
13
13
  atomicshop/dns.py,sha256=bNZOo5jVPzq7OT2qCPukXoK3zb1oOsyaelUwQEyK1SA,2500
14
14
  atomicshop/domains.py,sha256=Rxu6JhhMqFZRcoFs69IoEd1PtYca0lMCG6F1AomP7z4,3197
15
15
  atomicshop/emails.py,sha256=I0KyODQpIMEsNRi9YWSOL8EUPBiWyon3HRdIuSj3AEU,1410
@@ -22,7 +22,7 @@ atomicshop/inspect_wrapper.py,sha256=sGRVQhrJovNygHTydqJj0hxES-aB2Eg9KbIk3G31apw
22
22
  atomicshop/ip_addresses.py,sha256=Hvi4TumEFoTEpKWaq5WNF-YzcRzt24IxmNgv-Mgax1s,1190
23
23
  atomicshop/keyboard_press.py,sha256=1W5kRtOB75fulVx-uF2yarBhW0_IzdI1k73AnvXstk0,452
24
24
  atomicshop/pbtkmultifile_argparse.py,sha256=aEk8nhvoQVu-xyfZosK3ma17CwIgOjzO1erXXdjwtS4,4574
25
- atomicshop/permissions.py,sha256=pGynX57FqFdCW2Y6dE1T0oqL7ujagMAABw7nPHxi2IQ,4094
25
+ atomicshop/permissions.py,sha256=P6tiUKV-Gw-c3ePEVsst9bqWaHJbB4ZlJB4xbDYVpEs,4436
26
26
  atomicshop/print_api.py,sha256=DhbCQd0MWZZ5GYEk4oTu1opRFC-b31g1VWZgTGewG2Y,11568
27
27
  atomicshop/process.py,sha256=kOLrpUb5T5QN9ZvpGOjXyo7Kivrc14A9gcw9lvNMidI,15670
28
28
  atomicshop/process_name_cmd.py,sha256=TNAK6kQZm5JKWzEW6QLqVHEG98ZLNDQiSS4YwDk8V8c,3830
@@ -92,7 +92,7 @@ atomicshop/basics/hexs.py,sha256=i8CTG-J0TGGa25yFSbWEvpVyHFnof_qSWUrmXY-ylKM,105
92
92
  atomicshop/basics/if_else.py,sha256=MakivJChofZCpr0mOVjwCthzpiaBxXVB-zv7GwMOqVo,202
93
93
  atomicshop/basics/isinstancing.py,sha256=fQ35xfqbguQz2BUn-3a4KVGskhTcIn8JjRtxV2rFcRQ,876
94
94
  atomicshop/basics/list_of_dicts.py,sha256=EeUh5FwUSmjQ7_Df7yTBgwHsou5jx3tP2a0dzgs8-fk,5773
95
- atomicshop/basics/lists.py,sha256=pLpYPSu0BjJIAe_Ar55XhLsH2YBhftn7b-tTAdkK1sw,3928
95
+ atomicshop/basics/lists.py,sha256=I0C62vrDrNwCTNl0EjUZNa1Jsd8l0rTkp28GEx9QoEI,4258
96
96
  atomicshop/basics/multiprocesses.py,sha256=nSskxJSlEdalPM_Uf8cc9kAYYlVwYM1GonBLAhCL2mM,18831
97
97
  atomicshop/basics/numbers.py,sha256=ESX0z_7o_ok3sOmCKAUBoZinATklgMy2v-4RndqXlVM,1837
98
98
  atomicshop/basics/randoms.py,sha256=DmYLtnIhDK29tAQrGP1Nt-A-v8WC7WIEB8Edi-nk3N4,282
@@ -101,10 +101,10 @@ atomicshop/basics/threads.py,sha256=xvgdDJdmgN0wmmARoZ-H7Kvl1GOcEbvgaeGL4M3Hcx8,
101
101
  atomicshop/basics/timeit_template.py,sha256=fYLrk-X_dhdVtnPU22tarrhhvlggeW6FdKCXM8zkX68,405
102
102
  atomicshop/basics/tracebacks.py,sha256=cNfh_oAwF55kSIdqtv3boHZQIoQI8TajxkTnwJwpweI,535
103
103
  atomicshop/etw/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
104
- atomicshop/etw/dns_trace.py,sha256=RaREpwJETAMZSd1Lhbg0sO3ugBMw3y1fSKdvP5NfTqM,5189
104
+ atomicshop/etw/dns_trace.py,sha256=I4OZsiZUDyj7B4fKTOqsB1tcX1DUMw9uh4CwXlcmHfY,5571
105
105
  atomicshop/etw/etw.py,sha256=xVJNbfCq4KgRfsDnul6CrIdAMl9xRBixZ-hUyqiB2g4,2403
106
106
  atomicshop/file_io/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
107
- atomicshop/file_io/csvs.py,sha256=FwLTHFdngcgEpf-qviDrHpt7qT_QWtNGAR_RKvYZlpI,4816
107
+ atomicshop/file_io/csvs.py,sha256=oc4ijOHYzayx89DfW2_cktrf81kcGVFKUvKQDAljVrA,5300
108
108
  atomicshop/file_io/docxs.py,sha256=6tcYFGp0vRsHR47VwcRqwhdt2DQOwrAUYhrwN996n9U,5117
109
109
  atomicshop/file_io/file_io.py,sha256=FR84ihjGlr7Eqejo-_js4nBICVst31axD0bwX19S2eM,6385
110
110
  atomicshop/file_io/jsons.py,sha256=q9ZU8slBKnHLrtn3TnbK1qxrRpj5ZvCm6AlsFzoANjo,5303
@@ -118,7 +118,7 @@ atomicshop/mitm/initialize_engines.py,sha256=UGdT5DKYNri3MNOxESP7oeSxYiUDrVilJ4j
118
118
  atomicshop/mitm/initialize_mitm_server.py,sha256=aXNZlRu1_RGjC7lagvs2Q8rjQiygxYucy-U4C_SBnsk,13871
119
119
  atomicshop/mitm/message.py,sha256=u2U2f2SOHdBNU-6r1Ik2W14ai2EOwxUV4wVfGZA098k,1732
120
120
  atomicshop/mitm/shared_functions.py,sha256=PaK_sbnEA5zo9k2ktEOKLmvo-6wRUunxzSNRr41uXIQ,1924
121
- atomicshop/mitm/statistic_analyzer.py,sha256=K6HN7iKMthpEZYmVS1aa0jpW2g5Owq4Jl-mZIQzxWYo,23542
121
+ atomicshop/mitm/statistic_analyzer.py,sha256=WvTal-Aox-enM-5jYtFqiTplNquS4VMnmQYNEIXvZZA,23552
122
122
  atomicshop/mitm/engines/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
123
123
  atomicshop/mitm/engines/create_module_template.py,sha256=tRjVSm1sD6FzML71Qbuwvita0qsusdFGm8NZLsZ-XMs,4853
124
124
  atomicshop/mitm/engines/create_module_template_example.py,sha256=X5xhvbV6-g9jU_bQVhf_crZmaH50LRWz3bS-faQ18ds,489
@@ -131,11 +131,11 @@ atomicshop/mitm/engines/__reference_general/parser___reference_general.py,sha256
131
131
  atomicshop/mitm/engines/__reference_general/recorder___reference_general.py,sha256=KENDVf9OwXD9gwSh4B1XxACCe7iHYjrvnW1t6F64wdE,695
132
132
  atomicshop/mitm/engines/__reference_general/responder___reference_general.py,sha256=1AM49UaFTKA0AHw-k3SV3uH3QbG-o6ux0c-GoWkKNU0,6993
133
133
  atomicshop/monitor/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
134
- atomicshop/monitor/change_monitor.py,sha256=_H8OndEztTr1UNvO8ZXdv7775haPUMUcuKR_jyuOvEs,9215
134
+ atomicshop/monitor/change_monitor.py,sha256=5LNBcVodxeZMXsDvhzdhb67ipUau__Kh6v6Znj9QjyY,10858
135
135
  atomicshop/monitor/checks/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
136
- atomicshop/monitor/checks/dns.py,sha256=gyYc03faWPb3p-t-APtCvJNjhQCD2CIU2I2zHgjGPTI,3210
137
- atomicshop/monitor/checks/hash.py,sha256=Q3uNYHh-gNugJhnuLOUpNCjNfKKPODjOqhGr7OSiYNA,1847
138
- atomicshop/monitor/checks/network.py,sha256=pTM4IHHYCiKgIVSAyjvmG_odUXUBGI4dNVBIaLT-958,3733
136
+ atomicshop/monitor/checks/dns.py,sha256=orp-TgqL6EPzXVm0MtjEceFE8LRfTP3iPR6hGc8Y3TQ,4499
137
+ atomicshop/monitor/checks/hash.py,sha256=A6bJ7F5Qv_brdEh3sGhOyfviab2dsnvbXUufyBk5C1U,1951
138
+ atomicshop/monitor/checks/network.py,sha256=I9f3KyNnlx97E8igGZXpVJl4MlUp9iU6aSbILCKqbA0,3820
139
139
  atomicshop/monitor/checks/process_running.py,sha256=hJmqP0-KMsi6x46k4-4hGK0Mj_Ij9wj3qMb8SlRTHrg,1863
140
140
  atomicshop/monitor/checks/hash_checks/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
141
141
  atomicshop/monitor/checks/hash_checks/file.py,sha256=UDHrUphYSKeH4KJR5pC3ilPAGxX0oXTu3UD8ndnR5WU,2733
@@ -214,7 +214,7 @@ atomicshop/wrappers/loggingw/formatters.py,sha256=mUtcJJfmhLNrwUVYShXTmdu40dBaJu
214
214
  atomicshop/wrappers/loggingw/handlers.py,sha256=qm5Fbu8eDmlstMduUe5nKUlJU5IazFkSnQizz8Qt2os,5479
215
215
  atomicshop/wrappers/loggingw/loggers.py,sha256=DHOOTAtqkwn1xgvLHSkOiBm6yFGNuQy1kvbhG-TDog8,2374
216
216
  atomicshop/wrappers/loggingw/loggingw.py,sha256=v9WAseZXB50LluT9rIUcRvvevg2nLVKPgz3dbGejfV0,12151
217
- atomicshop/wrappers/loggingw/reading.py,sha256=XKQVggjleXqS-sjY8q7o_xzMBhWDdJO0A1d4DDE2rDA,7183
217
+ atomicshop/wrappers/loggingw/reading.py,sha256=iGvX2jMeeeF_CaPim2Gf8kbOxhK6ISfE-qgAfQs_j6g,13141
218
218
  atomicshop/wrappers/nodejsw/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
219
219
  atomicshop/wrappers/nodejsw/install_nodejs.py,sha256=QZg-R2iTQt7kFb8wNtnTmwraSGwvUs34JIasdbNa7ZU,5154
220
220
  atomicshop/wrappers/playwrightw/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -249,8 +249,8 @@ atomicshop/wrappers/socketw/socket_server_tester.py,sha256=AhpurHJmP2kgzHaUbq5ey
249
249
  atomicshop/wrappers/socketw/socket_wrapper.py,sha256=aXBwlEIJhFT0-c4i8iNlFx2It9VpCEpsv--5Oqcpxao,11624
250
250
  atomicshop/wrappers/socketw/ssl_base.py,sha256=k4V3gwkbq10MvOH4btU4onLX2GNOsSfUAdcHmL1rpVE,2274
251
251
  atomicshop/wrappers/socketw/statistics_csv.py,sha256=t3dtDEfN47CfYVi0CW6Kc2QHTEeZVyYhc57IYYh5nmA,826
252
- atomicshop-2.12.1.dist-info/LICENSE.txt,sha256=lLU7EYycfYcK2NR_1gfnhnRC8b8ccOTElACYplgZN88,1094
253
- atomicshop-2.12.1.dist-info/METADATA,sha256=TaPxIyYLWTGyaS4RhB9JdRuElraHbe0umXI6faSh9lE,10447
254
- atomicshop-2.12.1.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92
255
- atomicshop-2.12.1.dist-info/top_level.txt,sha256=EgKJB-7xcrAPeqTRF2laD_Np2gNGYkJkd4OyXqpJphA,11
256
- atomicshop-2.12.1.dist-info/RECORD,,
252
+ atomicshop-2.12.4.dist-info/LICENSE.txt,sha256=lLU7EYycfYcK2NR_1gfnhnRC8b8ccOTElACYplgZN88,1094
253
+ atomicshop-2.12.4.dist-info/METADATA,sha256=FFW2CBNFQycJ9fgKb3ZJFVmR0s2lZR_1YNhGyaDOdQ8,10447
254
+ atomicshop-2.12.4.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92
255
+ atomicshop-2.12.4.dist-info/top_level.txt,sha256=EgKJB-7xcrAPeqTRF2laD_Np2gNGYkJkd4OyXqpJphA,11
256
+ atomicshop-2.12.4.dist-info/RECORD,,