atomicshop 2.12.3__py3-none-any.whl → 2.12.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of atomicshop might be problematic. Click here for more details.
- atomicshop/__init__.py +1 -1
- atomicshop/basics/lists.py +14 -0
- atomicshop/datetimes.py +24 -0
- atomicshop/diff_check.py +102 -11
- atomicshop/etw/dns_trace.py +11 -0
- atomicshop/mains/installs/pycharm.py +9 -0
- atomicshop/mitm/statistic_analyzer.py +1 -1
- atomicshop/monitor/change_monitor.py +33 -3
- atomicshop/monitor/checks/dns.py +35 -9
- atomicshop/monitor/checks/hash.py +2 -0
- atomicshop/monitor/checks/network.py +2 -0
- atomicshop/permissions.py +12 -0
- atomicshop/wrappers/loggingw/reading.py +168 -28
- atomicshop/wrappers/pycharmw.py +56 -0
- {atomicshop-2.12.3.dist-info → atomicshop-2.12.5.dist-info}/METADATA +1 -1
- {atomicshop-2.12.3.dist-info → atomicshop-2.12.5.dist-info}/RECORD +19 -17
- {atomicshop-2.12.3.dist-info → atomicshop-2.12.5.dist-info}/LICENSE.txt +0 -0
- {atomicshop-2.12.3.dist-info → atomicshop-2.12.5.dist-info}/WHEEL +0 -0
- {atomicshop-2.12.3.dist-info → atomicshop-2.12.5.dist-info}/top_level.txt +0 -0
atomicshop/__init__.py
CHANGED
atomicshop/basics/lists.py
CHANGED
|
@@ -1,3 +1,6 @@
|
|
|
1
|
+
import copy
|
|
2
|
+
|
|
3
|
+
|
|
1
4
|
def remove_duplicates(list_instance: list):
|
|
2
5
|
# One of the fastest methods.
|
|
3
6
|
seen = set()
|
|
@@ -109,3 +112,14 @@ def get_the_most_frequent_element_from_list(list_instance: list[str]) -> str:
|
|
|
109
112
|
"""
|
|
110
113
|
|
|
111
114
|
return max(set(list_instance), key=list_instance.count)
|
|
115
|
+
|
|
116
|
+
|
|
117
|
+
def copy_list_of_mutable_objects(list_instance: list) -> list:
|
|
118
|
+
"""
|
|
119
|
+
This function will copy the list of mutable objects. Meaning that all the mutable objects inside will be copied
|
|
120
|
+
as well.
|
|
121
|
+
:param list_instance: list.
|
|
122
|
+
:return: list.
|
|
123
|
+
"""
|
|
124
|
+
|
|
125
|
+
return copy.deepcopy(list_instance)
|
atomicshop/datetimes.py
CHANGED
|
@@ -2,6 +2,7 @@ import datetime
|
|
|
2
2
|
from datetime import timedelta
|
|
3
3
|
import time
|
|
4
4
|
import random
|
|
5
|
+
import re
|
|
5
6
|
|
|
6
7
|
|
|
7
8
|
class MonthToNumber:
|
|
@@ -46,6 +47,29 @@ class MonthToNumber:
|
|
|
46
47
|
'דצמבר': '12'}
|
|
47
48
|
|
|
48
49
|
|
|
50
|
+
def get_datetime_from_complex_string_by_pattern(complex_string: str, date_pattern: str):
|
|
51
|
+
"""
|
|
52
|
+
Function will get datetime object from a complex string by pattern.
|
|
53
|
+
|
|
54
|
+
:param complex_string: string that contains date and time.
|
|
55
|
+
:param date_pattern: pattern that will be used to extract date and time from the string.
|
|
56
|
+
:return: datetime object.
|
|
57
|
+
"""
|
|
58
|
+
|
|
59
|
+
# Convert the date pattern to regex pattern
|
|
60
|
+
regex_pattern = re.sub(r'%[a-zA-Z]', r'\\d+', date_pattern)
|
|
61
|
+
|
|
62
|
+
# Find the date part in the file name using the regex pattern
|
|
63
|
+
date_str = re.search(regex_pattern, complex_string)
|
|
64
|
+
|
|
65
|
+
if date_str:
|
|
66
|
+
# Convert the date string to a datetime object based on the given pattern
|
|
67
|
+
date_obj = datetime.datetime.strptime(date_str.group(), date_pattern)
|
|
68
|
+
return date_obj
|
|
69
|
+
else:
|
|
70
|
+
raise ValueError("No valid date found in the string")
|
|
71
|
+
|
|
72
|
+
|
|
49
73
|
def convert_single_digit_to_zero_padded(string: str):
|
|
50
74
|
"""
|
|
51
75
|
Function will check if string is a single character digit and will add zero in front of it.
|
atomicshop/diff_check.py
CHANGED
|
@@ -1,9 +1,11 @@
|
|
|
1
|
+
import datetime
|
|
1
2
|
from pathlib import Path
|
|
2
|
-
from typing import Union
|
|
3
|
+
from typing import Union, Literal
|
|
4
|
+
import json
|
|
3
5
|
|
|
4
6
|
from .file_io import file_io, jsons
|
|
5
7
|
from .print_api import print_api
|
|
6
|
-
from .basics import list_of_dicts
|
|
8
|
+
from .basics import list_of_dicts, dicts
|
|
7
9
|
|
|
8
10
|
|
|
9
11
|
class DiffChecker:
|
|
@@ -28,7 +30,8 @@ class DiffChecker:
|
|
|
28
30
|
aggregation: bool = False,
|
|
29
31
|
input_file_path: str = None,
|
|
30
32
|
input_file_write_only: bool = True,
|
|
31
|
-
return_first_cycle: bool = True
|
|
33
|
+
return_first_cycle: bool = True,
|
|
34
|
+
operation_type: Literal['new_objects', 'hit_statistics', 'all_objects', 'single_object'] = None
|
|
32
35
|
):
|
|
33
36
|
"""
|
|
34
37
|
:param check_object: any, object to check if it changed.
|
|
@@ -62,6 +65,13 @@ class DiffChecker:
|
|
|
62
65
|
|
|
63
66
|
True: return updated dictionary on first cycle. This is the default.
|
|
64
67
|
False: don't return updated dictionary on first cycle.
|
|
68
|
+
:param operation_type: string, type of operation to perform. The type must be one of the following:
|
|
69
|
+
'new_objects': will only store the new objects in the input file.
|
|
70
|
+
'hit_statistics': will only store the statistics of the entries in the input file.
|
|
71
|
+
'all_objects': disable the DiffChecker features, meaning any new entries will be emitted as is.
|
|
72
|
+
'single_object': will store the object as is, without any comparison. Meaning, that the object will be
|
|
73
|
+
compared only to itself, and if it changes, it will be updated.
|
|
74
|
+
None: Nothing will be done, you will get an exception.
|
|
65
75
|
|
|
66
76
|
--------------------------------------------------
|
|
67
77
|
|
|
@@ -78,7 +88,7 @@ class DiffChecker:
|
|
|
78
88
|
diff_checker = diff_check.DiffChecker(
|
|
79
89
|
check_object=check_list_of_dicts,
|
|
80
90
|
check_object_display_name='List of Dicts',
|
|
81
|
-
|
|
91
|
+
operation_type='new_objects'
|
|
82
92
|
input_file_path='D:\\input\\list_of_dicts.json',
|
|
83
93
|
input_file_write_only=True,
|
|
84
94
|
return_first_cycle=True
|
|
@@ -100,10 +110,10 @@ class DiffChecker:
|
|
|
100
110
|
|
|
101
111
|
diff_checker = diff_check.DiffChecker(
|
|
102
112
|
check_object_display_name='List of Dicts',
|
|
103
|
-
aggregation=True,
|
|
104
113
|
input_file_path='D:\\input\\list_of_dicts.json',
|
|
105
114
|
input_file_write_only=True,
|
|
106
|
-
return_first_cycle=True
|
|
115
|
+
return_first_cycle=True,
|
|
116
|
+
operation_type='new_objects'
|
|
107
117
|
)
|
|
108
118
|
|
|
109
119
|
# Example of checking list of dicts.
|
|
@@ -136,12 +146,17 @@ class DiffChecker:
|
|
|
136
146
|
if check_object == "":
|
|
137
147
|
raise ValueError("[check_object] option can't be empty string.")
|
|
138
148
|
|
|
149
|
+
if operation_type and operation_type not in ['new_objects', 'hit_statistics', 'all_objects', 'single_object']:
|
|
150
|
+
raise ValueError(f"[operation_type] must be one of the following: "
|
|
151
|
+
f"'new_objects', 'hit_statistics', 'all_objects', 'single_object'.")
|
|
152
|
+
|
|
139
153
|
self.check_object = check_object
|
|
140
154
|
self.check_object_display_name = check_object_display_name
|
|
141
155
|
self.aggregation: bool = aggregation
|
|
142
156
|
self.input_file_path: str = input_file_path
|
|
143
157
|
self.input_file_write_only: bool = input_file_write_only
|
|
144
158
|
self.return_first_cycle: bool = return_first_cycle
|
|
159
|
+
self.operation_type = operation_type
|
|
145
160
|
|
|
146
161
|
if not self.check_object_display_name:
|
|
147
162
|
self.check_object_display_name = self.check_object
|
|
@@ -187,6 +202,11 @@ class DiffChecker:
|
|
|
187
202
|
return self._handle_input_file(sort_by_keys, print_kwargs=print_kwargs)
|
|
188
203
|
|
|
189
204
|
def _handle_input_file(self, sort_by_keys=None, print_kwargs: dict = None):
|
|
205
|
+
# This point is the first one that is shared between the processing functions, so now we can check
|
|
206
|
+
# if the 'operation_type' is set.
|
|
207
|
+
if not self.operation_type:
|
|
208
|
+
raise ValueError("[operation_type] must be specified.")
|
|
209
|
+
|
|
190
210
|
# If 'input_file_path' was specified, this means that the input file will be created for storing
|
|
191
211
|
# content of the function to compare.
|
|
192
212
|
if self.input_file_path:
|
|
@@ -205,7 +225,8 @@ class DiffChecker:
|
|
|
205
225
|
except FileNotFoundError as except_object:
|
|
206
226
|
message = f"Input File [{Path(except_object.filename).name}] doesn't exist - Will create new one."
|
|
207
227
|
print_api(message, color='yellow', **(print_kwargs or {}))
|
|
208
|
-
|
|
228
|
+
if not self.input_file_write_only:
|
|
229
|
+
self.previous_content = list()
|
|
209
230
|
|
|
210
231
|
# get the content of current function.
|
|
211
232
|
if isinstance(self.check_object, list):
|
|
@@ -217,10 +238,80 @@ class DiffChecker:
|
|
|
217
238
|
result = None
|
|
218
239
|
message = f'First Cycle on Object: {self.check_object_display_name}'
|
|
219
240
|
|
|
220
|
-
if self.
|
|
221
|
-
return self.
|
|
241
|
+
if self.operation_type == 'all_objects':
|
|
242
|
+
return self._no_diffcheck_handling(
|
|
243
|
+
current_content, result, message, print_kwargs=print_kwargs)
|
|
244
|
+
|
|
245
|
+
if self.operation_type == 'hit_statistics':
|
|
246
|
+
return self._hit_statistics_only_handling(
|
|
247
|
+
current_content, result, message, sort_by_keys, print_kwargs=print_kwargs)
|
|
248
|
+
|
|
249
|
+
if self.operation_type == 'new_objects':
|
|
250
|
+
return self._aggregation_handling(
|
|
251
|
+
current_content, result, message, sort_by_keys=sort_by_keys, print_kwargs=print_kwargs)
|
|
252
|
+
|
|
253
|
+
if self.operation_type == 'single_object':
|
|
254
|
+
return self._singular_object_handling(current_content, result, message, print_kwargs=print_kwargs)
|
|
255
|
+
|
|
256
|
+
def _no_diffcheck_handling(self, current_content, result, message, print_kwargs: dict = None):
|
|
257
|
+
# if not self.previous_content:
|
|
258
|
+
# self.previous_content = []
|
|
259
|
+
|
|
260
|
+
self.previous_content.append(f"{datetime.datetime.now()},{current_content}")
|
|
261
|
+
|
|
262
|
+
result = {
|
|
263
|
+
'object': self.check_object_display_name,
|
|
264
|
+
'entry': current_content
|
|
265
|
+
}
|
|
266
|
+
|
|
267
|
+
message = f"Object: {result['object']} | Entry: {result['entry']}"
|
|
268
|
+
|
|
269
|
+
# If 'input_file_path' was specified by the user, it means that we will use the input file to save
|
|
270
|
+
# our known content there for next iterations to compare.
|
|
271
|
+
if self.input_file_path:
|
|
272
|
+
if self.save_as == 'txt':
|
|
273
|
+
# noinspection PyTypeChecker
|
|
274
|
+
file_io.write_file(self.previous_content, self.input_file_path, **(print_kwargs or {}))
|
|
275
|
+
elif self.save_as == 'json':
|
|
276
|
+
jsons.write_json_file(
|
|
277
|
+
self.previous_content, self.input_file_path, use_default_indent=True, **(print_kwargs or {}))
|
|
278
|
+
|
|
279
|
+
return result, message
|
|
280
|
+
|
|
281
|
+
def _hit_statistics_only_handling(self, current_content, result, message, sort_by_keys, print_kwargs: dict = None):
|
|
282
|
+
# Convert the dictionary entry to string, since we will use it as a key in the dictionary.
|
|
283
|
+
current_entry = json.dumps(current_content[0])
|
|
284
|
+
|
|
285
|
+
if not self.previous_content:
|
|
286
|
+
self.previous_content = {}
|
|
287
|
+
|
|
288
|
+
if not self.previous_content.get(current_entry):
|
|
289
|
+
self.previous_content[current_entry] = 1
|
|
222
290
|
else:
|
|
223
|
-
|
|
291
|
+
self.previous_content[current_entry] += 1
|
|
292
|
+
|
|
293
|
+
result = {
|
|
294
|
+
'object': self.check_object_display_name,
|
|
295
|
+
'entry': current_entry,
|
|
296
|
+
'count': self.previous_content[current_entry]
|
|
297
|
+
}
|
|
298
|
+
|
|
299
|
+
message = f"Object: {result['object']} | Entry: {result['entry']} | Count: {result['count']}"
|
|
300
|
+
|
|
301
|
+
# Sort the dictionary by count of entries.
|
|
302
|
+
self.previous_content = dicts.sort_by_values(self.previous_content, reverse=True)
|
|
303
|
+
|
|
304
|
+
# If 'input_file_path' was specified by the user, it means that we will use the input file to save
|
|
305
|
+
# our known content there for next iterations to compare.
|
|
306
|
+
if self.input_file_path:
|
|
307
|
+
if self.save_as == 'txt':
|
|
308
|
+
# noinspection PyTypeChecker
|
|
309
|
+
file_io.write_file(self.previous_content, self.input_file_path, **(print_kwargs or {}))
|
|
310
|
+
elif self.save_as == 'json':
|
|
311
|
+
jsons.write_json_file(
|
|
312
|
+
self.previous_content, self.input_file_path, use_default_indent=True, **(print_kwargs or {}))
|
|
313
|
+
|
|
314
|
+
return result, message
|
|
224
315
|
|
|
225
316
|
def _aggregation_handling(self, current_content, result, message, sort_by_keys, print_kwargs: dict = None):
|
|
226
317
|
if current_content[0] not in self.previous_content:
|
|
@@ -259,7 +350,7 @@ class DiffChecker:
|
|
|
259
350
|
|
|
260
351
|
return result, message
|
|
261
352
|
|
|
262
|
-
def
|
|
353
|
+
def _singular_object_handling(self, current_content, result, message, print_kwargs):
|
|
263
354
|
if self.previous_content != current_content:
|
|
264
355
|
# If known content is not empty (if it is, it means it is the first iteration, and we don't have the input
|
|
265
356
|
# file, so we don't need to update the 'result', since there is nothing to compare yet).
|
atomicshop/etw/dns_trace.py
CHANGED
|
@@ -14,6 +14,17 @@ class DnsTrace:
|
|
|
14
14
|
every 100 ms. Since the DNS events doesn't contain the process name and command line, only PID.
|
|
15
15
|
Then DNS events will be enriched with the process name and command line from the process poller.
|
|
16
16
|
:param attrs: List of attributes to return. If None, all attributes will be returned.
|
|
17
|
+
|
|
18
|
+
Usage Example:
|
|
19
|
+
from atomicshop.etw import dns_trace
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
dns_trace_w = dns_trace.DnsTrace(enable_process_poller=True, attrs=['pid', 'name', 'cmdline', 'domain', 'query_type'])
|
|
23
|
+
dns_trace_w.start()
|
|
24
|
+
while True:
|
|
25
|
+
dns_dict = dns_trace_w.emit()
|
|
26
|
+
print(dns_dict)
|
|
27
|
+
dns_trace_w.stop()
|
|
17
28
|
"""
|
|
18
29
|
|
|
19
30
|
self.enable_process_poller = enable_process_poller
|
|
@@ -156,7 +156,7 @@ def analyze(main_file_path: str):
|
|
|
156
156
|
# Get the content from statistics files.
|
|
157
157
|
statistics_content: list = reading.get_logs(
|
|
158
158
|
config['statistic_files_path'],
|
|
159
|
-
|
|
159
|
+
file_name_pattern='statistics*.csv',
|
|
160
160
|
log_type='csv',
|
|
161
161
|
)
|
|
162
162
|
|
|
@@ -1,3 +1,5 @@
|
|
|
1
|
+
from typing import Literal, Union
|
|
2
|
+
|
|
1
3
|
from .checks import dns, network, hash, process_running
|
|
2
4
|
from .. import filesystem, scheduling
|
|
3
5
|
from ..diff_check import DiffChecker
|
|
@@ -9,18 +11,34 @@ class ChangeMonitor:
|
|
|
9
11
|
"""
|
|
10
12
|
def __init__(
|
|
11
13
|
self,
|
|
12
|
-
object_type:
|
|
14
|
+
object_type: Union[
|
|
15
|
+
Literal[
|
|
16
|
+
'file',
|
|
17
|
+
'dns',
|
|
18
|
+
'network',
|
|
19
|
+
'process_running',
|
|
20
|
+
'url_urllib',
|
|
21
|
+
'url_playwright_html',
|
|
22
|
+
'url_playwright_pdf',
|
|
23
|
+
'url_playwright_png',
|
|
24
|
+
'url_playwright_jpeg'],
|
|
25
|
+
None] = None,
|
|
13
26
|
check_object_list: list = None,
|
|
14
27
|
input_file_directory: str = None,
|
|
15
28
|
input_file_name: str = None,
|
|
16
29
|
generate_input_file_name: bool = False,
|
|
17
30
|
input_file_write_only: bool = True,
|
|
18
31
|
store_original_object: bool = False,
|
|
32
|
+
operation_type: Literal['hit_statistics', 'all_objects'] = None
|
|
19
33
|
):
|
|
20
34
|
"""
|
|
21
35
|
:param object_type: string, type of object to check. The type must be one of the following:
|
|
22
|
-
'dns': 'check_object_list' will be none, since the DNS events will be queried from the system.
|
|
23
36
|
'file': 'check_object_list' must contain strings of full path to the file.
|
|
37
|
+
'dns': 'check_object_list' will be none, since the DNS events will be queried from the system.
|
|
38
|
+
'network': 'check_object_list' will be none, since the network events will be queried from the system.
|
|
39
|
+
'process_running': 'check_object_list' must contain strings of process names to check if they are running.
|
|
40
|
+
Example: ['chrome.exe', 'firefox.exe']
|
|
41
|
+
No file is written.
|
|
24
42
|
'url_urllib': 'check_object_list' must contain strings of full URL to a web page. The page will be
|
|
25
43
|
downloaded using 'urllib' library in HTML.
|
|
26
44
|
'url_playwright_html': 'check_object_list' must contain strings of full URL to a web page. The page will
|
|
@@ -52,6 +70,10 @@ class ChangeMonitor:
|
|
|
52
70
|
from the memory.
|
|
53
71
|
:param store_original_object: boolean, if True, the original object will be stored on the disk inside
|
|
54
72
|
'Original' folder, inside 'input_file_directory'.
|
|
73
|
+
:param operation_type: string, type of operation to perform. The type must be one of the following:
|
|
74
|
+
'hit_statistics': will only store the statistics of the entries in the input file.
|
|
75
|
+
'all_objects': disable the DiffChecker features, meaning any new entries will be emitted as is.
|
|
76
|
+
None: will use the default operation type, based on the object type.
|
|
55
77
|
|
|
56
78
|
If 'input_file_directory' is not specified, the 'input_file_name' is not specified, and
|
|
57
79
|
'generate_input_file_name' is False, then the input file will not be used and the object will be stored
|
|
@@ -72,19 +94,25 @@ class ChangeMonitor:
|
|
|
72
94
|
raise ValueError(
|
|
73
95
|
'ERROR: [input_file_name] and [generate_input_file_name] cannot be both specified and True.')
|
|
74
96
|
|
|
97
|
+
if operation_type:
|
|
98
|
+
if operation_type not in ['hit_statistics', 'all_objects']:
|
|
99
|
+
raise ValueError(
|
|
100
|
+
'ERROR: [operation_type] must be one of the following: "hit_statistics", "all_objects".')
|
|
101
|
+
|
|
75
102
|
# === EOF Exception section ========================================
|
|
76
103
|
# === Initialize Main variables ====================================
|
|
77
104
|
|
|
78
105
|
if not check_object_list:
|
|
79
106
|
check_object_list = list()
|
|
80
107
|
|
|
81
|
-
self.object_type
|
|
108
|
+
self.object_type = object_type
|
|
82
109
|
self.check_object_list: list = check_object_list
|
|
83
110
|
self.input_file_directory: str = input_file_directory
|
|
84
111
|
self.input_file_name: str = input_file_name
|
|
85
112
|
self.generate_input_file_name: bool = generate_input_file_name
|
|
86
113
|
self.input_file_write_only: bool = input_file_write_only
|
|
87
114
|
self.store_original_object: bool = store_original_object
|
|
115
|
+
self.operation_type = operation_type
|
|
88
116
|
|
|
89
117
|
# === EOF Initialize Main variables ================================
|
|
90
118
|
# === Initialize Secondary variables ===============================
|
|
@@ -98,6 +126,7 @@ class ChangeMonitor:
|
|
|
98
126
|
self.diff_check_list.append(
|
|
99
127
|
DiffChecker(
|
|
100
128
|
input_file_write_only=self.input_file_write_only,
|
|
129
|
+
operation_type=self.operation_type
|
|
101
130
|
)
|
|
102
131
|
)
|
|
103
132
|
# Else, if 'check_object_list' is None, create a DiffChecker object only once.
|
|
@@ -105,6 +134,7 @@ class ChangeMonitor:
|
|
|
105
134
|
self.diff_check_list.append(
|
|
106
135
|
DiffChecker(
|
|
107
136
|
input_file_write_only=self.input_file_write_only,
|
|
137
|
+
operation_type=self.operation_type
|
|
108
138
|
)
|
|
109
139
|
)
|
|
110
140
|
|
atomicshop/monitor/checks/dns.py
CHANGED
|
@@ -26,7 +26,9 @@ def _execute_cycle(change_monitor_instance, print_kwargs: dict = None):
|
|
|
26
26
|
|
|
27
27
|
# Change settings for the DiffChecker object.
|
|
28
28
|
change_monitor_instance.diff_check_list[0].return_first_cycle = True
|
|
29
|
-
|
|
29
|
+
# 'operation_type' is None.
|
|
30
|
+
if not change_monitor_instance.operation_type:
|
|
31
|
+
change_monitor_instance.diff_check_list[0].operation_type = 'new_objects'
|
|
30
32
|
|
|
31
33
|
if change_monitor_instance.generate_input_file_name:
|
|
32
34
|
original_name = 'known_domains'
|
|
@@ -48,6 +50,17 @@ def _execute_cycle(change_monitor_instance, print_kwargs: dict = None):
|
|
|
48
50
|
# will return a dict with current DNS trace event.
|
|
49
51
|
event_dict = change_monitor_instance.fetch_engine.emit()
|
|
50
52
|
|
|
53
|
+
# If 'disable_diff_check' is True, we'll return the event_dict as is.
|
|
54
|
+
# if change_monitor_instance.disable_diff_check:
|
|
55
|
+
# return_list.append(event_dict)
|
|
56
|
+
#
|
|
57
|
+
# message = \
|
|
58
|
+
# (f"Current domain: {event_dict['name']} | {event_dict['domain']} | {event_dict['query_type']} | "
|
|
59
|
+
# f"{event_dict['cmdline']}")
|
|
60
|
+
# print_api(message, color='yellow', **print_kwargs)
|
|
61
|
+
#
|
|
62
|
+
# return return_list
|
|
63
|
+
# else:
|
|
51
64
|
change_monitor_instance.diff_check_list[0].check_object = [event_dict]
|
|
52
65
|
|
|
53
66
|
# if event_dict not in change_monitor_instance.diff_check_list[0].check_object:
|
|
@@ -62,14 +75,27 @@ def _execute_cycle(change_monitor_instance, print_kwargs: dict = None):
|
|
|
62
75
|
sort_by_keys=['cmdline', 'name'], print_kwargs=print_kwargs)
|
|
63
76
|
|
|
64
77
|
if result:
|
|
65
|
-
#
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
78
|
+
# Check if 'updated' key is in the result. THis means that this is a regular cycle.
|
|
79
|
+
if 'updated' in result:
|
|
80
|
+
# Get list of new connections only.
|
|
81
|
+
# new_connections_only: list = list_of_dicts.get_difference(result['old'], result['updated'])
|
|
82
|
+
|
|
83
|
+
for connection in result['updated']:
|
|
84
|
+
message = \
|
|
85
|
+
f"New domain: {connection['name']} | " \
|
|
86
|
+
f"{connection['domain']} | {connection['query_type']} | " \
|
|
87
|
+
f"{connection['cmdline']}"
|
|
88
|
+
print_api(message, color='yellow', **print_kwargs)
|
|
89
|
+
|
|
90
|
+
return_list.append(message)
|
|
91
|
+
# Check if 'count' key is in the result. This means that this a statistics cycle.
|
|
92
|
+
elif 'count' in result:
|
|
93
|
+
message = f"Current domain: {result['entry']} | Times hit: {result['count']}"
|
|
94
|
+
print_api(message, color='yellow', **print_kwargs)
|
|
95
|
+
|
|
96
|
+
return_list.append(message)
|
|
97
|
+
elif 'count' not in result and 'entry' in result:
|
|
98
|
+
message = f"Current domain: {result['entry']}"
|
|
73
99
|
print_api(message, color='yellow', **print_kwargs)
|
|
74
100
|
|
|
75
101
|
return_list.append(message)
|
|
@@ -28,6 +28,8 @@ def _execute_cycle(change_monitor_instance, print_kwargs: dict = None):
|
|
|
28
28
|
# Set the input file path.
|
|
29
29
|
change_monitor_instance._set_input_file_path(check_object_index=check_object_index)
|
|
30
30
|
|
|
31
|
+
change_monitor_instance.diff_check_list[check_object_index].operation_type = 'single_object'
|
|
32
|
+
|
|
31
33
|
# Check if the object was updated.
|
|
32
34
|
result, message = change_monitor_instance.diff_check_list[check_object_index].check_string(
|
|
33
35
|
print_kwargs=print_kwargs)
|
|
@@ -70,6 +70,8 @@ def _get_list(change_monitor_instance):
|
|
|
70
70
|
# Set the 'check_object' to empty list, since we will append the list of DNS events.
|
|
71
71
|
change_monitor_instance.diff_check_list[0].check_object = list()
|
|
72
72
|
|
|
73
|
+
change_monitor_instance.diff_check_list[0].operation_type = 'single_object'
|
|
74
|
+
|
|
73
75
|
# Get all connections (list of dicts), including process name and cmdline.
|
|
74
76
|
connections_list_of_dicts: list = \
|
|
75
77
|
change_monitor_instance.fetch_engine.get_connections_with_process_as_list_of_dicts(
|
atomicshop/permissions.py
CHANGED
|
@@ -54,6 +54,18 @@ def set_executable_permission(file_path: str):
|
|
|
54
54
|
os.chmod(file_path, os.stat(file_path).st_mode | stat.S_IXUSR)
|
|
55
55
|
|
|
56
56
|
|
|
57
|
+
def change_file_owner_ubuntu(file_path: str, username: str):
|
|
58
|
+
"""
|
|
59
|
+
Function changes the owner of the file to the specified user.
|
|
60
|
+
:param file_path: str, path to the file.
|
|
61
|
+
:param username: str, username of the new owner.
|
|
62
|
+
:return:
|
|
63
|
+
"""
|
|
64
|
+
|
|
65
|
+
uid = pwd.getpwnam(username).pw_uid
|
|
66
|
+
os.chown(file_path, uid, -1)
|
|
67
|
+
|
|
68
|
+
|
|
57
69
|
def is_executable_permission(file_path: str) -> bool:
|
|
58
70
|
"""
|
|
59
71
|
Function checks if the file has the executable permission.
|
|
@@ -1,15 +1,19 @@
|
|
|
1
1
|
import os
|
|
2
|
-
from typing import Literal
|
|
2
|
+
from typing import Literal, Union
|
|
3
3
|
from pathlib import Path
|
|
4
4
|
|
|
5
5
|
from ... import filesystem, datetimes
|
|
6
6
|
from ...file_io import csvs
|
|
7
7
|
|
|
8
8
|
|
|
9
|
+
READING_EXISTING_LINES: list = []
|
|
10
|
+
|
|
11
|
+
|
|
9
12
|
def get_logs_paths(
|
|
10
13
|
log_files_directory_path: str = None,
|
|
11
14
|
log_file_path: str = None,
|
|
12
|
-
|
|
15
|
+
file_name_pattern: str = '*.*',
|
|
16
|
+
date_pattern: str = None,
|
|
13
17
|
log_type: Literal['csv'] = 'csv',
|
|
14
18
|
latest_only: bool = False,
|
|
15
19
|
previous_day_only: bool = False
|
|
@@ -18,20 +22,23 @@ def get_logs_paths(
|
|
|
18
22
|
This function gets the logs file paths from the directory. Supports rotating files to get the logs by time.
|
|
19
23
|
|
|
20
24
|
:param log_files_directory_path: Path to the log files. If specified, the function will get all the files from the
|
|
21
|
-
directory by the '
|
|
25
|
+
directory by the 'file_name_pattern'.
|
|
22
26
|
:param log_file_path: Path to the log file. If specified, the function will get the file and all the rotated logs
|
|
23
|
-
associated with this file. The '
|
|
27
|
+
associated with this file. The 'file_name_pattern' will become the file name using the file name and extension.
|
|
24
28
|
|
|
25
29
|
Example:
|
|
26
30
|
log_file_path = 'C:/logs/test_log.csv'
|
|
27
31
|
|
|
28
32
|
# The function will get all the files that start with 'test_log' and have '.csv' extension:
|
|
29
|
-
|
|
33
|
+
file_name_pattern = 'test_log*.csv'
|
|
30
34
|
|
|
31
35
|
# The 'log_files_directory_path' will also be taken from the 'log_file_path':
|
|
32
36
|
log_files_directory_path = 'C:/logs'
|
|
33
|
-
:param
|
|
34
|
-
Default
|
|
37
|
+
:param file_name_pattern: Pattern to match the log files names.
|
|
38
|
+
Default file_name_pattern will match all the files.
|
|
39
|
+
:param date_pattern: Pattern to match the date in the log file name.
|
|
40
|
+
If specified, the function will get the log file by the date pattern.
|
|
41
|
+
If not specified, the function will get the file date by file last modified time.
|
|
35
42
|
:param log_type: Type of log to get.
|
|
36
43
|
:param latest_only: Boolean, if True, only the latest log file path will be returned.
|
|
37
44
|
:param previous_day_only: Boolean, if True, only the log file path from the previous day will be returned.
|
|
@@ -48,30 +55,62 @@ def get_logs_paths(
|
|
|
48
55
|
if latest_only and previous_day_only:
|
|
49
56
|
raise ValueError('Both "latest_only" and "previous_day_only" cannot be True at the same time.')
|
|
50
57
|
|
|
51
|
-
# If log file path is specified, get the
|
|
58
|
+
# If log file path is specified, get the file_name_pattern from the file name.
|
|
52
59
|
if log_file_path:
|
|
53
|
-
# Build the
|
|
60
|
+
# Build the file_name_pattern.
|
|
54
61
|
log_file_name: str = Path(log_file_path).stem
|
|
55
62
|
log_file_extension: str = Path(log_file_path).suffix
|
|
56
|
-
|
|
63
|
+
file_name_pattern = f'{log_file_name}*{log_file_extension}'
|
|
57
64
|
|
|
58
65
|
# Get the directory path from the file path.
|
|
59
66
|
log_files_directory_path = Path(log_file_path).parent
|
|
60
67
|
|
|
61
|
-
# Get all the log file paths by the
|
|
68
|
+
# Get all the log file paths by the file_name_pattern.
|
|
62
69
|
logs_files: list = filesystem.get_file_paths_from_directory(
|
|
63
|
-
log_files_directory_path,
|
|
64
|
-
|
|
70
|
+
log_files_directory_path,
|
|
71
|
+
file_name_check_pattern=file_name_pattern,
|
|
72
|
+
add_last_modified_time=True,
|
|
73
|
+
sort_by_last_modified_time=True)
|
|
74
|
+
|
|
75
|
+
if len(logs_files) > 1:
|
|
76
|
+
if date_pattern:
|
|
77
|
+
latest_timestamp: float = 0
|
|
78
|
+
for file_index, single_file in enumerate(logs_files):
|
|
79
|
+
# Get file name from current loop file path.
|
|
80
|
+
current_file_name: str = Path(single_file['file_path']).name
|
|
81
|
+
# Get the datetime object from the file name by the date pattern.
|
|
82
|
+
try:
|
|
83
|
+
datetime_object = datetimes.get_datetime_from_complex_string_by_pattern(current_file_name, date_pattern)
|
|
84
|
+
timestamp_float = datetime_object.timestamp()
|
|
85
|
+
# ValueError will be raised if the date pattern does not match the file name.
|
|
86
|
+
except ValueError:
|
|
87
|
+
timestamp_float = 0
|
|
88
|
+
# Update the last modified time to the dictionary.
|
|
89
|
+
logs_files[file_index]['last_modified'] = timestamp_float
|
|
90
|
+
|
|
91
|
+
if timestamp_float > latest_timestamp:
|
|
92
|
+
latest_timestamp = timestamp_float
|
|
65
93
|
|
|
66
|
-
|
|
67
|
-
|
|
94
|
+
# Now, there should be a file that doesn't have the string date pattern in the file name.
|
|
95
|
+
# We will add one day to the latest date that we found and assign to that file path.
|
|
96
|
+
for file_index, single_file in enumerate(logs_files):
|
|
97
|
+
if single_file['last_modified'] == 0:
|
|
98
|
+
latest_timestamp += 86400
|
|
99
|
+
logs_files[file_index]['last_modified'] = latest_timestamp
|
|
100
|
+
break
|
|
68
101
|
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
102
|
+
# Sort the files by the last modified time.
|
|
103
|
+
logs_files = sorted(logs_files, key=lambda x: x['last_modified'], reverse=False)
|
|
104
|
+
|
|
105
|
+
if latest_only:
|
|
106
|
+
logs_files = [logs_files[-1]]
|
|
107
|
+
|
|
108
|
+
if previous_day_only:
|
|
109
|
+
# Check if there is a previous day log file.
|
|
110
|
+
if len(logs_files) == 1:
|
|
111
|
+
logs_files = []
|
|
112
|
+
else:
|
|
113
|
+
logs_files = [logs_files[-2]]
|
|
75
114
|
|
|
76
115
|
return logs_files
|
|
77
116
|
|
|
@@ -79,7 +118,8 @@ def get_logs_paths(
|
|
|
79
118
|
def get_logs(
|
|
80
119
|
log_files_directory_path: str = None,
|
|
81
120
|
log_file_path: str = None,
|
|
82
|
-
|
|
121
|
+
file_name_pattern: str = '*.*',
|
|
122
|
+
date_pattern: str = None,
|
|
83
123
|
log_type: Literal['csv'] = 'csv',
|
|
84
124
|
header_type_of_files: Literal['first', 'all'] = 'first',
|
|
85
125
|
remove_logs: bool = False,
|
|
@@ -91,8 +131,11 @@ def get_logs(
|
|
|
91
131
|
|
|
92
132
|
:param log_files_directory_path: Path to the log files. Check the 'get_logs_paths' function for more details.
|
|
93
133
|
:param log_file_path: Path to the log file. Check the 'get_logs_paths' function for more details.
|
|
94
|
-
:param
|
|
95
|
-
Default
|
|
134
|
+
:param file_name_pattern: Pattern to match the log files names.
|
|
135
|
+
Default file_name_pattern will match all the files.
|
|
136
|
+
:param date_pattern: Pattern to match the date in the log file name.
|
|
137
|
+
If specified, the function will get the log file by the date pattern.
|
|
138
|
+
If not specified, the function will get the file date by file last modified time.
|
|
96
139
|
:param log_type: Type of log to get.
|
|
97
140
|
:param header_type_of_files: Type of header to get from the files.
|
|
98
141
|
'first' - Only the first file has a header for CSV. This header will be used for the rest of the files.
|
|
@@ -112,10 +155,13 @@ def get_logs(
|
|
|
112
155
|
if header_type_of_files not in ['first', 'all']:
|
|
113
156
|
raise ValueError('Only "first" and "all" header types are supported.')
|
|
114
157
|
|
|
115
|
-
# Get all the log file paths by the
|
|
158
|
+
# Get all the log file paths by the file_name_pattern.
|
|
116
159
|
logs_files: list = get_logs_paths(
|
|
117
|
-
log_files_directory_path=log_files_directory_path,
|
|
118
|
-
|
|
160
|
+
log_files_directory_path=log_files_directory_path,
|
|
161
|
+
log_file_path=log_file_path,
|
|
162
|
+
file_name_pattern=file_name_pattern,
|
|
163
|
+
date_pattern=date_pattern,
|
|
164
|
+
log_type=log_type)
|
|
119
165
|
|
|
120
166
|
# Read all the logs.
|
|
121
167
|
logs_content: list = list()
|
|
@@ -128,7 +174,8 @@ def get_logs(
|
|
|
128
174
|
elif header_type_of_files == 'first':
|
|
129
175
|
# The function gets empty header to read it from the CSV file, the returns the header that it read.
|
|
130
176
|
# Then each time the header is fed once again to the function.
|
|
131
|
-
csv_content, header = csvs.read_csv_to_list_of_dicts_by_header(
|
|
177
|
+
csv_content, header = csvs.read_csv_to_list_of_dicts_by_header(
|
|
178
|
+
single_file['file_path'], header=header, **print_kwargs)
|
|
132
179
|
# Any way the first file will be read with header.
|
|
133
180
|
logs_content.extend(csv_content)
|
|
134
181
|
|
|
@@ -157,3 +204,96 @@ def get_logs(
|
|
|
157
204
|
filesystem.move_file(single_file['file_path'], move_to_path_with_file)
|
|
158
205
|
|
|
159
206
|
return logs_content
|
|
207
|
+
|
|
208
|
+
|
|
209
|
+
def get_latest_lines(
|
|
210
|
+
log_file_path: str,
|
|
211
|
+
date_pattern: str = None,
|
|
212
|
+
log_type: Literal['csv'] = 'csv',
|
|
213
|
+
get_previous_file: bool = False
|
|
214
|
+
) -> tuple:
|
|
215
|
+
"""
|
|
216
|
+
This function gets the latest lines from the log file.
|
|
217
|
+
|
|
218
|
+
:param log_file_path: Path to the log file.
|
|
219
|
+
:param date_pattern: Pattern to match the date in the log file name.
|
|
220
|
+
If specified, the function will get the log file by the date pattern.
|
|
221
|
+
If not specified, the function will get the file date by file last modified time.
|
|
222
|
+
:param log_type: Type of log to get.
|
|
223
|
+
:param get_previous_file: Boolean, if True, the function will get the previous log file.
|
|
224
|
+
For example, your log is set to rotate every Midnight.
|
|
225
|
+
Meaning, once the day will change, the function will get the log file from the previous day in the third entry
|
|
226
|
+
of the return tuple. This happens only once each 24 hours. Not from the time the function was called, but from
|
|
227
|
+
the time the day changed.
|
|
228
|
+
return: List of new lines.
|
|
229
|
+
|
|
230
|
+
Usage:
|
|
231
|
+
while True:
|
|
232
|
+
latest_lines, current_lines, existing_lines, last_24_hours_lines = get_latest_log_lines(
|
|
233
|
+
log_file_path='/path/to/log.csv',
|
|
234
|
+
log_type='csv'
|
|
235
|
+
)
|
|
236
|
+
|
|
237
|
+
if latest_lines:
|
|
238
|
+
# Do something with the new lines.
|
|
239
|
+
|
|
240
|
+
if last_24_hours_lines:
|
|
241
|
+
# Do something with the last 24 hours lines. Reminder, this will happen once a day on log rotation.
|
|
242
|
+
|
|
243
|
+
time.sleep(1)
|
|
244
|
+
"""
|
|
245
|
+
|
|
246
|
+
if log_type != 'csv':
|
|
247
|
+
raise ValueError('Only "csv" log type is supported.')
|
|
248
|
+
|
|
249
|
+
previous_file_lines: list = []
|
|
250
|
+
|
|
251
|
+
# Get the latest statistics file path.
|
|
252
|
+
latest_statistics_file_path: str = get_logs_paths(
|
|
253
|
+
log_file_path=log_file_path,
|
|
254
|
+
date_pattern=date_pattern,
|
|
255
|
+
log_type='csv',
|
|
256
|
+
latest_only=True
|
|
257
|
+
)[0]['file_path']
|
|
258
|
+
|
|
259
|
+
# Get the previous day statistics file path.
|
|
260
|
+
previous_day_statistics_file_path: Union[str, None] = None
|
|
261
|
+
try:
|
|
262
|
+
previous_day_statistics_file_path = get_logs_paths(
|
|
263
|
+
log_file_path=log_file_path,
|
|
264
|
+
date_pattern=date_pattern,
|
|
265
|
+
log_type='csv',
|
|
266
|
+
previous_day_only=True
|
|
267
|
+
)[0]['file_path']
|
|
268
|
+
except KeyError:
|
|
269
|
+
pass
|
|
270
|
+
|
|
271
|
+
current_lines, _ = csvs.read_csv_to_list_of_dicts_by_header(latest_statistics_file_path, stdout=False)
|
|
272
|
+
if len(current_lines) > len(READING_EXISTING_LINES):
|
|
273
|
+
# return current_lines
|
|
274
|
+
pass
|
|
275
|
+
elif len(current_lines) == len(READING_EXISTING_LINES):
|
|
276
|
+
# return None
|
|
277
|
+
pass
|
|
278
|
+
elif len(current_lines) < len(READING_EXISTING_LINES):
|
|
279
|
+
current_lines, _ = csvs.read_csv_to_list_of_dicts_by_header(
|
|
280
|
+
previous_day_statistics_file_path, stdout=False)
|
|
281
|
+
# Handle case where source CSV is empty (rotation period)
|
|
282
|
+
READING_EXISTING_LINES.clear() # Clear existing lines to start fresh after rotation
|
|
283
|
+
|
|
284
|
+
if get_previous_file:
|
|
285
|
+
previous_file_lines = current_lines
|
|
286
|
+
|
|
287
|
+
# return current_lines
|
|
288
|
+
|
|
289
|
+
new_lines: list = []
|
|
290
|
+
if current_lines:
|
|
291
|
+
for row in current_lines:
|
|
292
|
+
# If the row is not in the existing lines, then add it to the new lines.
|
|
293
|
+
if row not in READING_EXISTING_LINES:
|
|
294
|
+
new_lines.append(row)
|
|
295
|
+
|
|
296
|
+
if new_lines:
|
|
297
|
+
READING_EXISTING_LINES.extend(new_lines)
|
|
298
|
+
|
|
299
|
+
return new_lines, current_lines, READING_EXISTING_LINES, previous_file_lines
|
|
@@ -0,0 +1,56 @@
|
|
|
1
|
+
import requests
|
|
2
|
+
import os
|
|
3
|
+
import subprocess
|
|
4
|
+
import tempfile
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
# URL to the PyCharm Community Edition download page
|
|
8
|
+
PYCHARM_DOWNLOAD_URL = 'https://www.jetbrains.com/pycharm/download/download-thanks.html?platform=windows&code=PCC'
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def download_install_main():
|
|
12
|
+
"""
|
|
13
|
+
Main function to download and install the latest PyCharm Community Edition.
|
|
14
|
+
|
|
15
|
+
Usage:
|
|
16
|
+
python -m atomicshop.mains.installs.pycharm
|
|
17
|
+
|
|
18
|
+
Or run the main function directly.
|
|
19
|
+
from atomicshop.wrappers import pycharmw
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
def main():
|
|
23
|
+
pycharmw.download_install_main()
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
if __name__ == "__main__":
|
|
27
|
+
main()
|
|
28
|
+
"""
|
|
29
|
+
|
|
30
|
+
# Get the redirect URL for the download
|
|
31
|
+
response = requests.get(PYCHARM_DOWNLOAD_URL, allow_redirects=True)
|
|
32
|
+
|
|
33
|
+
# Extract the final download URL
|
|
34
|
+
download_url = response.url
|
|
35
|
+
|
|
36
|
+
# Get the file name from the download URL
|
|
37
|
+
file_name = download_url.split('/')[-1]
|
|
38
|
+
|
|
39
|
+
# Create a temporary directory to download the installer
|
|
40
|
+
temp_dir = tempfile.mkdtemp()
|
|
41
|
+
installer_path = os.path.join(temp_dir, file_name)
|
|
42
|
+
|
|
43
|
+
# Download the installer
|
|
44
|
+
print(f"Downloading {file_name}...")
|
|
45
|
+
with requests.get(download_url, stream=True) as r:
|
|
46
|
+
r.raise_for_status()
|
|
47
|
+
with open(installer_path, 'wb') as f:
|
|
48
|
+
for chunk in r.iter_content(chunk_size=8192):
|
|
49
|
+
f.write(chunk)
|
|
50
|
+
print("Download complete.")
|
|
51
|
+
|
|
52
|
+
# Install PyCharm
|
|
53
|
+
# Run the installer
|
|
54
|
+
print("Running the installer...")
|
|
55
|
+
subprocess.run([installer_path, '/S'], check=True) # /S for silent installation
|
|
56
|
+
print("Installation complete.")
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
atomicshop/__init__.py,sha256=
|
|
1
|
+
atomicshop/__init__.py,sha256=uqlTGePEf3VopmrqXyANaBI4Z722YLLgrlwbU_6-8Eo,123
|
|
2
2
|
atomicshop/_basics_temp.py,sha256=6cu2dd6r2dLrd1BRNcVDKTHlsHs_26Gpw8QS6v32lQ0,3699
|
|
3
3
|
atomicshop/_create_pdf_demo.py,sha256=Yi-PGZuMg0RKvQmLqVeLIZYadqEZwUm-4A9JxBl_vYA,3713
|
|
4
4
|
atomicshop/_patch_import.py,sha256=ENp55sKVJ0e6-4lBvZnpz9PQCt3Otbur7F6aXDlyje4,6334
|
|
@@ -8,8 +8,8 @@ atomicshop/command_line_processing.py,sha256=u5yT9Ger_cu7ni5ID0VFlRbVD46ARHeNC9t
|
|
|
8
8
|
atomicshop/config_init.py,sha256=z2RXD_mw9nQlAOpuGry1h9QT-2LhNscXgGAktN3dCVQ,2497
|
|
9
9
|
atomicshop/console_output.py,sha256=AOSJjrRryE97PAGtgDL03IBtWSi02aNol8noDnW3k6M,4667
|
|
10
10
|
atomicshop/console_user_response.py,sha256=31HIy9QGXa7f-GVR8MzJauQ79E_ZqAeagF3Ks4GGdDU,3234
|
|
11
|
-
atomicshop/datetimes.py,sha256=
|
|
12
|
-
atomicshop/diff_check.py,sha256=
|
|
11
|
+
atomicshop/datetimes.py,sha256=olsL01S5tkXk4WPzucxujqgLOh198BLgJntDnGYukRU,15533
|
|
12
|
+
atomicshop/diff_check.py,sha256=U7eshTajImlsL6aB0O2yiRsGCQCwumiCvGoFcJ2iAfs,19226
|
|
13
13
|
atomicshop/dns.py,sha256=bNZOo5jVPzq7OT2qCPukXoK3zb1oOsyaelUwQEyK1SA,2500
|
|
14
14
|
atomicshop/domains.py,sha256=Rxu6JhhMqFZRcoFs69IoEd1PtYca0lMCG6F1AomP7z4,3197
|
|
15
15
|
atomicshop/emails.py,sha256=I0KyODQpIMEsNRi9YWSOL8EUPBiWyon3HRdIuSj3AEU,1410
|
|
@@ -22,7 +22,7 @@ atomicshop/inspect_wrapper.py,sha256=sGRVQhrJovNygHTydqJj0hxES-aB2Eg9KbIk3G31apw
|
|
|
22
22
|
atomicshop/ip_addresses.py,sha256=Hvi4TumEFoTEpKWaq5WNF-YzcRzt24IxmNgv-Mgax1s,1190
|
|
23
23
|
atomicshop/keyboard_press.py,sha256=1W5kRtOB75fulVx-uF2yarBhW0_IzdI1k73AnvXstk0,452
|
|
24
24
|
atomicshop/pbtkmultifile_argparse.py,sha256=aEk8nhvoQVu-xyfZosK3ma17CwIgOjzO1erXXdjwtS4,4574
|
|
25
|
-
atomicshop/permissions.py,sha256=
|
|
25
|
+
atomicshop/permissions.py,sha256=P6tiUKV-Gw-c3ePEVsst9bqWaHJbB4ZlJB4xbDYVpEs,4436
|
|
26
26
|
atomicshop/print_api.py,sha256=DhbCQd0MWZZ5GYEk4oTu1opRFC-b31g1VWZgTGewG2Y,11568
|
|
27
27
|
atomicshop/process.py,sha256=kOLrpUb5T5QN9ZvpGOjXyo7Kivrc14A9gcw9lvNMidI,15670
|
|
28
28
|
atomicshop/process_name_cmd.py,sha256=TNAK6kQZm5JKWzEW6QLqVHEG98ZLNDQiSS4YwDk8V8c,3830
|
|
@@ -92,7 +92,7 @@ atomicshop/basics/hexs.py,sha256=i8CTG-J0TGGa25yFSbWEvpVyHFnof_qSWUrmXY-ylKM,105
|
|
|
92
92
|
atomicshop/basics/if_else.py,sha256=MakivJChofZCpr0mOVjwCthzpiaBxXVB-zv7GwMOqVo,202
|
|
93
93
|
atomicshop/basics/isinstancing.py,sha256=fQ35xfqbguQz2BUn-3a4KVGskhTcIn8JjRtxV2rFcRQ,876
|
|
94
94
|
atomicshop/basics/list_of_dicts.py,sha256=EeUh5FwUSmjQ7_Df7yTBgwHsou5jx3tP2a0dzgs8-fk,5773
|
|
95
|
-
atomicshop/basics/lists.py,sha256=
|
|
95
|
+
atomicshop/basics/lists.py,sha256=I0C62vrDrNwCTNl0EjUZNa1Jsd8l0rTkp28GEx9QoEI,4258
|
|
96
96
|
atomicshop/basics/multiprocesses.py,sha256=nSskxJSlEdalPM_Uf8cc9kAYYlVwYM1GonBLAhCL2mM,18831
|
|
97
97
|
atomicshop/basics/numbers.py,sha256=ESX0z_7o_ok3sOmCKAUBoZinATklgMy2v-4RndqXlVM,1837
|
|
98
98
|
atomicshop/basics/randoms.py,sha256=DmYLtnIhDK29tAQrGP1Nt-A-v8WC7WIEB8Edi-nk3N4,282
|
|
@@ -101,7 +101,7 @@ atomicshop/basics/threads.py,sha256=xvgdDJdmgN0wmmARoZ-H7Kvl1GOcEbvgaeGL4M3Hcx8,
|
|
|
101
101
|
atomicshop/basics/timeit_template.py,sha256=fYLrk-X_dhdVtnPU22tarrhhvlggeW6FdKCXM8zkX68,405
|
|
102
102
|
atomicshop/basics/tracebacks.py,sha256=cNfh_oAwF55kSIdqtv3boHZQIoQI8TajxkTnwJwpweI,535
|
|
103
103
|
atomicshop/etw/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
104
|
-
atomicshop/etw/dns_trace.py,sha256=
|
|
104
|
+
atomicshop/etw/dns_trace.py,sha256=I4OZsiZUDyj7B4fKTOqsB1tcX1DUMw9uh4CwXlcmHfY,5571
|
|
105
105
|
atomicshop/etw/etw.py,sha256=xVJNbfCq4KgRfsDnul6CrIdAMl9xRBixZ-hUyqiB2g4,2403
|
|
106
106
|
atomicshop/file_io/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
107
107
|
atomicshop/file_io/csvs.py,sha256=oc4ijOHYzayx89DfW2_cktrf81kcGVFKUvKQDAljVrA,5300
|
|
@@ -111,6 +111,7 @@ atomicshop/file_io/jsons.py,sha256=q9ZU8slBKnHLrtn3TnbK1qxrRpj5ZvCm6AlsFzoANjo,5
|
|
|
111
111
|
atomicshop/file_io/tomls.py,sha256=oa0Wm8yMkPRXKN9jgBuTnKbioSOee4mABW5IMUFCYyU,3041
|
|
112
112
|
atomicshop/file_io/xlsxs.py,sha256=v_dyg9GD4LqgWi6wA1QuWRZ8zG4ZwB6Dz52ytdcmmmI,2184
|
|
113
113
|
atomicshop/file_io/xmls.py,sha256=zh3SuK-dNaFq2NDNhx6ivcf4GYCfGM8M10PcEwDSpxk,2104
|
|
114
|
+
atomicshop/mains/installs/pycharm.py,sha256=uYTfME7hOeNkAsOZxDDPj2hDqmkxrFqVV6Nv6xnYNVk,141
|
|
114
115
|
atomicshop/mitm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
115
116
|
atomicshop/mitm/connection_thread_worker.py,sha256=PQ8bwOgrPudYP5oPnSi_DWaKXOi038M8TMImlLkxuPI,20486
|
|
116
117
|
atomicshop/mitm/import_config.py,sha256=_V-IVJ7a1L6E-VOR4CDfZj-S1odbsIlBe13ij0NlpqY,7974
|
|
@@ -118,7 +119,7 @@ atomicshop/mitm/initialize_engines.py,sha256=UGdT5DKYNri3MNOxESP7oeSxYiUDrVilJ4j
|
|
|
118
119
|
atomicshop/mitm/initialize_mitm_server.py,sha256=aXNZlRu1_RGjC7lagvs2Q8rjQiygxYucy-U4C_SBnsk,13871
|
|
119
120
|
atomicshop/mitm/message.py,sha256=u2U2f2SOHdBNU-6r1Ik2W14ai2EOwxUV4wVfGZA098k,1732
|
|
120
121
|
atomicshop/mitm/shared_functions.py,sha256=PaK_sbnEA5zo9k2ktEOKLmvo-6wRUunxzSNRr41uXIQ,1924
|
|
121
|
-
atomicshop/mitm/statistic_analyzer.py,sha256=
|
|
122
|
+
atomicshop/mitm/statistic_analyzer.py,sha256=WvTal-Aox-enM-5jYtFqiTplNquS4VMnmQYNEIXvZZA,23552
|
|
122
123
|
atomicshop/mitm/engines/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
123
124
|
atomicshop/mitm/engines/create_module_template.py,sha256=tRjVSm1sD6FzML71Qbuwvita0qsusdFGm8NZLsZ-XMs,4853
|
|
124
125
|
atomicshop/mitm/engines/create_module_template_example.py,sha256=X5xhvbV6-g9jU_bQVhf_crZmaH50LRWz3bS-faQ18ds,489
|
|
@@ -131,11 +132,11 @@ atomicshop/mitm/engines/__reference_general/parser___reference_general.py,sha256
|
|
|
131
132
|
atomicshop/mitm/engines/__reference_general/recorder___reference_general.py,sha256=KENDVf9OwXD9gwSh4B1XxACCe7iHYjrvnW1t6F64wdE,695
|
|
132
133
|
atomicshop/mitm/engines/__reference_general/responder___reference_general.py,sha256=1AM49UaFTKA0AHw-k3SV3uH3QbG-o6ux0c-GoWkKNU0,6993
|
|
133
134
|
atomicshop/monitor/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
134
|
-
atomicshop/monitor/change_monitor.py,sha256=
|
|
135
|
+
atomicshop/monitor/change_monitor.py,sha256=5LNBcVodxeZMXsDvhzdhb67ipUau__Kh6v6Znj9QjyY,10858
|
|
135
136
|
atomicshop/monitor/checks/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
136
|
-
atomicshop/monitor/checks/dns.py,sha256=
|
|
137
|
-
atomicshop/monitor/checks/hash.py,sha256=
|
|
138
|
-
atomicshop/monitor/checks/network.py,sha256=
|
|
137
|
+
atomicshop/monitor/checks/dns.py,sha256=orp-TgqL6EPzXVm0MtjEceFE8LRfTP3iPR6hGc8Y3TQ,4499
|
|
138
|
+
atomicshop/monitor/checks/hash.py,sha256=A6bJ7F5Qv_brdEh3sGhOyfviab2dsnvbXUufyBk5C1U,1951
|
|
139
|
+
atomicshop/monitor/checks/network.py,sha256=I9f3KyNnlx97E8igGZXpVJl4MlUp9iU6aSbILCKqbA0,3820
|
|
139
140
|
atomicshop/monitor/checks/process_running.py,sha256=hJmqP0-KMsi6x46k4-4hGK0Mj_Ij9wj3qMb8SlRTHrg,1863
|
|
140
141
|
atomicshop/monitor/checks/hash_checks/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
141
142
|
atomicshop/monitor/checks/hash_checks/file.py,sha256=UDHrUphYSKeH4KJR5pC3ilPAGxX0oXTu3UD8ndnR5WU,2733
|
|
@@ -155,6 +156,7 @@ atomicshop/wrappers/numpyw.py,sha256=sBV4gSKyr23kXTalqAb1oqttzE_2XxBooCui66jbAqc
|
|
|
155
156
|
atomicshop/wrappers/olefilew.py,sha256=biD5m58rogifCYmYhJBrAFb9O_Bn_spLek_9HofLeYE,2051
|
|
156
157
|
atomicshop/wrappers/pipw.py,sha256=mu4jnHkSaYNfpBiLZKMZxEX_E2LqW5BVthMZkblPB_c,1317
|
|
157
158
|
atomicshop/wrappers/process_wrapper_pbtk.py,sha256=ycPmBRnv627RWks6N8OhxJQe8Gu3h3Vwj-4HswPOw0k,599
|
|
159
|
+
atomicshop/wrappers/pycharmw.py,sha256=Ar7SDG15HD-GUiLH-Mclo7xpoPj9nzSXg4ks63lL8fw,1642
|
|
158
160
|
atomicshop/wrappers/pyopensslw.py,sha256=OBWxA6EJ2vU_Qlf4M8m6ilcG3hyYB4yB0EsXUf7NhEU,6804
|
|
159
161
|
atomicshop/wrappers/ubuntu_terminal.py,sha256=BBZD3EH6KSDORd5IZBZM-ti4U6Qh1sZwftx42s7hqB4,10917
|
|
160
162
|
atomicshop/wrappers/wslw.py,sha256=AKphiHLSddL7ErevUowr3f9Y1AgGz_R3KZ3NssW07h8,6959
|
|
@@ -214,7 +216,7 @@ atomicshop/wrappers/loggingw/formatters.py,sha256=mUtcJJfmhLNrwUVYShXTmdu40dBaJu
|
|
|
214
216
|
atomicshop/wrappers/loggingw/handlers.py,sha256=qm5Fbu8eDmlstMduUe5nKUlJU5IazFkSnQizz8Qt2os,5479
|
|
215
217
|
atomicshop/wrappers/loggingw/loggers.py,sha256=DHOOTAtqkwn1xgvLHSkOiBm6yFGNuQy1kvbhG-TDog8,2374
|
|
216
218
|
atomicshop/wrappers/loggingw/loggingw.py,sha256=v9WAseZXB50LluT9rIUcRvvevg2nLVKPgz3dbGejfV0,12151
|
|
217
|
-
atomicshop/wrappers/loggingw/reading.py,sha256=
|
|
219
|
+
atomicshop/wrappers/loggingw/reading.py,sha256=OIQaLZLtk6s3LLV1IfWFkBJRWRwWJA9v_NgoFd6iXMg,13302
|
|
218
220
|
atomicshop/wrappers/nodejsw/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
219
221
|
atomicshop/wrappers/nodejsw/install_nodejs.py,sha256=QZg-R2iTQt7kFb8wNtnTmwraSGwvUs34JIasdbNa7ZU,5154
|
|
220
222
|
atomicshop/wrappers/playwrightw/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
@@ -249,8 +251,8 @@ atomicshop/wrappers/socketw/socket_server_tester.py,sha256=AhpurHJmP2kgzHaUbq5ey
|
|
|
249
251
|
atomicshop/wrappers/socketw/socket_wrapper.py,sha256=aXBwlEIJhFT0-c4i8iNlFx2It9VpCEpsv--5Oqcpxao,11624
|
|
250
252
|
atomicshop/wrappers/socketw/ssl_base.py,sha256=k4V3gwkbq10MvOH4btU4onLX2GNOsSfUAdcHmL1rpVE,2274
|
|
251
253
|
atomicshop/wrappers/socketw/statistics_csv.py,sha256=t3dtDEfN47CfYVi0CW6Kc2QHTEeZVyYhc57IYYh5nmA,826
|
|
252
|
-
atomicshop-2.12.
|
|
253
|
-
atomicshop-2.12.
|
|
254
|
-
atomicshop-2.12.
|
|
255
|
-
atomicshop-2.12.
|
|
256
|
-
atomicshop-2.12.
|
|
254
|
+
atomicshop-2.12.5.dist-info/LICENSE.txt,sha256=lLU7EYycfYcK2NR_1gfnhnRC8b8ccOTElACYplgZN88,1094
|
|
255
|
+
atomicshop-2.12.5.dist-info/METADATA,sha256=pZCLXNyHoMpmuyqyWM8BWph8Cy-rHWAnbo4ay-gaQNA,10447
|
|
256
|
+
atomicshop-2.12.5.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92
|
|
257
|
+
atomicshop-2.12.5.dist-info/top_level.txt,sha256=EgKJB-7xcrAPeqTRF2laD_Np2gNGYkJkd4OyXqpJphA,11
|
|
258
|
+
atomicshop-2.12.5.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|