atomicshop 2.14.10__py3-none-any.whl → 2.14.11__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of atomicshop might be problematic. Click here for more details.
- atomicshop/__init__.py +1 -1
- atomicshop/basics/list_of_dicts.py +58 -3
- atomicshop/datetimes.py +10 -3
- atomicshop/file_io/docxs.py +1 -1
- atomicshop/filesystem.py +14 -4
- atomicshop/wrappers/loggingw/consts.py +49 -0
- atomicshop/wrappers/loggingw/formatters.py +3 -54
- atomicshop/wrappers/loggingw/handlers.py +6 -6
- atomicshop/wrappers/loggingw/reading.py +14 -9
- {atomicshop-2.14.10.dist-info → atomicshop-2.14.11.dist-info}/METADATA +1 -1
- {atomicshop-2.14.10.dist-info → atomicshop-2.14.11.dist-info}/RECORD +14 -13
- {atomicshop-2.14.10.dist-info → atomicshop-2.14.11.dist-info}/LICENSE.txt +0 -0
- {atomicshop-2.14.10.dist-info → atomicshop-2.14.11.dist-info}/WHEEL +0 -0
- {atomicshop-2.14.10.dist-info → atomicshop-2.14.11.dist-info}/top_level.txt +0 -0
atomicshop/__init__.py
CHANGED
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
from operator import itemgetter
|
|
2
|
-
|
|
2
|
+
import json
|
|
3
3
|
|
|
4
4
|
from . import dicts, strings
|
|
5
5
|
|
|
@@ -150,7 +150,7 @@ def convert_to_set(list_of_dicts, sort_keys: bool = False) -> set:
|
|
|
150
150
|
:return: set.
|
|
151
151
|
"""
|
|
152
152
|
|
|
153
|
-
return set(dumps(x, sort_keys=sort_keys) for x in list_of_dicts)
|
|
153
|
+
return set(json.dumps(x, sort_keys=sort_keys) for x in list_of_dicts)
|
|
154
154
|
|
|
155
155
|
|
|
156
156
|
def convert_from_set(set_object: set) -> list:
|
|
@@ -161,4 +161,59 @@ def convert_from_set(set_object: set) -> list:
|
|
|
161
161
|
:return: list of dicts.
|
|
162
162
|
"""
|
|
163
163
|
|
|
164
|
-
return [loads(x) for x in set_object]
|
|
164
|
+
return [json.loads(x) for x in set_object]
|
|
165
|
+
|
|
166
|
+
|
|
167
|
+
def summarize_entries(list_instance: list, list_of_keys_to_remove: list = None) -> list:
|
|
168
|
+
"""
|
|
169
|
+
The function will summarize entries in a list of dicts.
|
|
170
|
+
|
|
171
|
+
:param list_instance: list of dicts, the entries to summarize.
|
|
172
|
+
:param list_of_keys_to_remove: list, the keys to remove from each entry before summarizing.
|
|
173
|
+
:return: list, of the summarized entries, each entry without the keys in 'list_of_keys_to_remove',
|
|
174
|
+
including the count of the entry.
|
|
175
|
+
|
|
176
|
+
--------------------------------------
|
|
177
|
+
|
|
178
|
+
Example:
|
|
179
|
+
list_instance = [
|
|
180
|
+
{'time': '2021-08-01 00:00:00', 'name': 'name1', 'cmdline': 'cmdline1', 'domain': 'domain1'},
|
|
181
|
+
{'time': '2021-08-01 00:00:00', 'name': 'name2', 'cmdline': 'cmdline2', 'domain': 'domain2'},
|
|
182
|
+
{'time': '2021-08-01 00:00:00', 'name': 'name1', 'cmdline': 'cmdline1', 'domain': 'domain1'}
|
|
183
|
+
]
|
|
184
|
+
|
|
185
|
+
list_of_keys_to_remove = ['time', 'cmdline']
|
|
186
|
+
|
|
187
|
+
summarize_entries(list_instance, list_of_keys_to_remove)
|
|
188
|
+
|
|
189
|
+
Output:
|
|
190
|
+
[
|
|
191
|
+
{'name': 'name1', 'domain': 'domain1', 'count': 2},
|
|
192
|
+
{'name': 'name2', 'domain': 'domain2', 'count': 1}
|
|
193
|
+
]
|
|
194
|
+
"""
|
|
195
|
+
|
|
196
|
+
summed_entries: dict = dict()
|
|
197
|
+
for entry in list_instance:
|
|
198
|
+
# Copy the entry to new dict, since we're going to remove a key.
|
|
199
|
+
line_copied = entry.copy()
|
|
200
|
+
|
|
201
|
+
# Remove the keys in the 'list_of_keys_to_remove'.
|
|
202
|
+
if list_of_keys_to_remove:
|
|
203
|
+
for key in list_of_keys_to_remove:
|
|
204
|
+
_ = line_copied.pop(key, None)
|
|
205
|
+
|
|
206
|
+
line_json_string = json.dumps(line_copied)
|
|
207
|
+
if line_json_string not in summed_entries:
|
|
208
|
+
summed_entries[line_json_string] = 1
|
|
209
|
+
else:
|
|
210
|
+
summed_entries[line_json_string] += 1
|
|
211
|
+
|
|
212
|
+
result_list: list = []
|
|
213
|
+
for json_string_record, count in summed_entries.items():
|
|
214
|
+
record = json.loads(json_string_record)
|
|
215
|
+
result_list.append(
|
|
216
|
+
{**record, 'count': count}
|
|
217
|
+
)
|
|
218
|
+
|
|
219
|
+
return result_list
|
atomicshop/datetimes.py
CHANGED
|
@@ -70,13 +70,20 @@ DATE_TIME_STRING_FORMAT_SPECIFIERS_TO_REGEX: dict = {
|
|
|
70
70
|
}
|
|
71
71
|
|
|
72
72
|
|
|
73
|
-
def get_datetime_from_complex_string_by_pattern(
|
|
73
|
+
def get_datetime_from_complex_string_by_pattern(
|
|
74
|
+
complex_string: str,
|
|
75
|
+
date_pattern: str
|
|
76
|
+
) -> tuple[
|
|
77
|
+
Union[datetime, None],
|
|
78
|
+
Union[str, None],
|
|
79
|
+
Union[float, None]
|
|
80
|
+
]:
|
|
74
81
|
"""
|
|
75
82
|
Function will get datetime object from a complex string by pattern.
|
|
76
83
|
|
|
77
84
|
:param complex_string: string that contains date and time.
|
|
78
85
|
:param date_pattern: pattern that will be used to extract date and time from the string.
|
|
79
|
-
:return: datetime object
|
|
86
|
+
:return: tuple(datetime object, date string, timestamp float)
|
|
80
87
|
"""
|
|
81
88
|
|
|
82
89
|
# Convert the date pattern to regex pattern
|
|
@@ -91,7 +98,7 @@ def get_datetime_from_complex_string_by_pattern(complex_string: str, date_patter
|
|
|
91
98
|
date_timestamp = date_obj.timestamp()
|
|
92
99
|
return date_obj, date_str.group(), date_timestamp
|
|
93
100
|
else:
|
|
94
|
-
|
|
101
|
+
return None, None, None
|
|
95
102
|
|
|
96
103
|
|
|
97
104
|
def datetime_format_to_regex(format_str: str) -> str:
|
atomicshop/file_io/docxs.py
CHANGED
|
@@ -62,7 +62,7 @@ def search_for_hyperlink_in_files(directory_path: str, hyperlink: str, relative_
|
|
|
62
62
|
input('press Enter')
|
|
63
63
|
"""
|
|
64
64
|
|
|
65
|
-
if not filesystem.
|
|
65
|
+
if not filesystem.is_directory_exists(directory_path):
|
|
66
66
|
raise NotADirectoryError(f"Directory doesn't exist: {directory_path}")
|
|
67
67
|
|
|
68
68
|
# Get all the docx files in the specified directory.
|
atomicshop/filesystem.py
CHANGED
|
@@ -246,6 +246,16 @@ def check_absolute_path___add_full(filesystem_path: str, full_path_to_add: str)
|
|
|
246
246
|
|
|
247
247
|
|
|
248
248
|
def check_file_existence(file_path: str) -> bool:
|
|
249
|
+
"""This will be removed in future versions. Use 'is_file_exists' instead."""
|
|
250
|
+
return is_file_exists(file_path)
|
|
251
|
+
|
|
252
|
+
|
|
253
|
+
def check_directory_existence(directory_path: str) -> bool:
|
|
254
|
+
"""This will be removed in future versions. Use 'is_directory_exists' instead."""
|
|
255
|
+
return is_directory_exists(directory_path)
|
|
256
|
+
|
|
257
|
+
|
|
258
|
+
def is_file_exists(file_path: str) -> bool:
|
|
249
259
|
"""
|
|
250
260
|
Function to check if the path is a file.
|
|
251
261
|
|
|
@@ -260,7 +270,7 @@ def check_file_existence(file_path: str) -> bool:
|
|
|
260
270
|
return False
|
|
261
271
|
|
|
262
272
|
|
|
263
|
-
def
|
|
273
|
+
def is_directory_exists(directory_path: str) -> bool:
|
|
264
274
|
"""
|
|
265
275
|
Function to check if a path is a directory.
|
|
266
276
|
|
|
@@ -459,7 +469,7 @@ def move_folder(source_directory: str, target_directory: str, overwrite: bool =
|
|
|
459
469
|
|
|
460
470
|
# Check if 'overwrite' is set to 'True' and if the directory exists.
|
|
461
471
|
if not overwrite:
|
|
462
|
-
if
|
|
472
|
+
if is_directory_exists(target_directory):
|
|
463
473
|
raise FileExistsError(f'Directory already exists: {target_directory}')
|
|
464
474
|
|
|
465
475
|
# Move directory.
|
|
@@ -545,7 +555,7 @@ def copy_directory(source_directory: str, target_directory: str, overwrite: bool
|
|
|
545
555
|
|
|
546
556
|
# Check if 'overwrite' is set to 'True' and if the directory exists.
|
|
547
557
|
if overwrite:
|
|
548
|
-
if
|
|
558
|
+
if is_directory_exists(target_directory):
|
|
549
559
|
remove_directory(target_directory)
|
|
550
560
|
|
|
551
561
|
# Copy directory.
|
|
@@ -1383,7 +1393,7 @@ def backup_folder(directory_path: str, backup_directory: str) -> None:
|
|
|
1383
1393
|
Final path will look like: 'C:\\Users\\user1\\Downloads\\backup\\20231003-120000-000000_folder1'
|
|
1384
1394
|
"""
|
|
1385
1395
|
|
|
1386
|
-
if
|
|
1396
|
+
if is_directory_exists(directory_path):
|
|
1387
1397
|
timestamp: str = datetimes.TimeFormats().get_current_formatted_time_filename_stamp(True)
|
|
1388
1398
|
directory_name = Path(directory_path).name
|
|
1389
1399
|
backup_directory_path: str = str(Path(backup_directory) / f"{timestamp}_{directory_name}")
|
|
@@ -0,0 +1,49 @@
|
|
|
1
|
+
DEFAULT_ROTATING_SUFFIXES_FROM_WHEN: dict = {
|
|
2
|
+
'midnight': '%Y-%m-%d',
|
|
3
|
+
'S': '%Y-%m-%d_%H-%M-%S',
|
|
4
|
+
'M': '%Y-%m-%d_%H-%M',
|
|
5
|
+
'H': '%Y-%m-%d_%H',
|
|
6
|
+
'D': '%Y-%m-%d',
|
|
7
|
+
'W0': '%Y-%m-%d',
|
|
8
|
+
'W1': '%Y-%m-%d',
|
|
9
|
+
'W2': '%Y-%m-%d',
|
|
10
|
+
'W3': '%Y-%m-%d',
|
|
11
|
+
'W4': '%Y-%m-%d',
|
|
12
|
+
'W5': '%Y-%m-%d',
|
|
13
|
+
'W6': '%Y-%m-%d'
|
|
14
|
+
}
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
DEFAULT_STREAM_FORMATTER: str = "%(levelname)s | %(threadName)s | %(name)s | %(message)s"
|
|
18
|
+
DEFAULT_MESSAGE_FORMATTER: str = "%(message)s"
|
|
19
|
+
|
|
20
|
+
FORMAT_ELEMENT_TO_HEADER: dict = {
|
|
21
|
+
'asctime': 'Event Time [Y-M-D H:M:S]',
|
|
22
|
+
'created': 'Created',
|
|
23
|
+
'filename': "ModuleFileName ",
|
|
24
|
+
'funcName': 'Function',
|
|
25
|
+
'levelname': 'Log Level',
|
|
26
|
+
'levelno': 'Level Number',
|
|
27
|
+
'lineno': 'Line ',
|
|
28
|
+
'module': 'Module',
|
|
29
|
+
'msecs': '[MS.mS]',
|
|
30
|
+
'message': 'Message',
|
|
31
|
+
'name': 'Logger Name ',
|
|
32
|
+
'pathname': 'Path',
|
|
33
|
+
'process': 'Process',
|
|
34
|
+
'processName': 'Process Name',
|
|
35
|
+
'relativeCreated': 'Relative Created',
|
|
36
|
+
'thread': 'Thread',
|
|
37
|
+
'threadName': 'Thread Name'
|
|
38
|
+
}
|
|
39
|
+
|
|
40
|
+
DEFAULT_FORMATTER_TXT_FILE: str = \
|
|
41
|
+
"{asctime} | " \
|
|
42
|
+
"{levelname:<" + f"{len(FORMAT_ELEMENT_TO_HEADER['levelname'])}" + "s} | " \
|
|
43
|
+
"{name:<" + f"{len(FORMAT_ELEMENT_TO_HEADER['name'])}" + "s} | " \
|
|
44
|
+
"{filename:<" + f"{len(FORMAT_ELEMENT_TO_HEADER['filename'])}" + "s} : " \
|
|
45
|
+
"{lineno:<" + f"{len(FORMAT_ELEMENT_TO_HEADER['lineno'])}" + "d} | " \
|
|
46
|
+
"{threadName} | {message}"
|
|
47
|
+
|
|
48
|
+
DEFAULT_FORMATTER_CSV_FILE: str = \
|
|
49
|
+
'\"{asctime}\",{levelname},{name},{filename},{lineno},{threadName},\"{message}\"'
|
|
@@ -1,6 +1,8 @@
|
|
|
1
1
|
import logging
|
|
2
2
|
import time
|
|
3
3
|
|
|
4
|
+
from . import consts
|
|
5
|
+
|
|
4
6
|
|
|
5
7
|
# Log formatter, means how the log will look inside the file
|
|
6
8
|
# Format for specific object: %(levelname)s
|
|
@@ -10,41 +12,6 @@ import time
|
|
|
10
12
|
# ".40" truncating the string to only 40 characters. Example: %(message).250s
|
|
11
13
|
|
|
12
14
|
|
|
13
|
-
DEFAULT_STREAM_FORMATTER: str = "%(levelname)s | %(threadName)s | %(name)s | %(message)s"
|
|
14
|
-
DEFAULT_MESSAGE_FORMATTER: str = "%(message)s"
|
|
15
|
-
|
|
16
|
-
FORMAT_ELEMENT_TO_HEADER: dict = {
|
|
17
|
-
'asctime': 'Event Time [Y-M-D H:M:S]',
|
|
18
|
-
'created': 'Created',
|
|
19
|
-
'filename': "ModuleFileName ",
|
|
20
|
-
'funcName': 'Function',
|
|
21
|
-
'levelname': 'Log Level',
|
|
22
|
-
'levelno': 'Level Number',
|
|
23
|
-
'lineno': 'Line ',
|
|
24
|
-
'module': 'Module',
|
|
25
|
-
'msecs': '[MS.mS]',
|
|
26
|
-
'message': 'Message',
|
|
27
|
-
'name': 'Logger Name ',
|
|
28
|
-
'pathname': 'Path',
|
|
29
|
-
'process': 'Process',
|
|
30
|
-
'processName': 'Process Name',
|
|
31
|
-
'relativeCreated': 'Relative Created',
|
|
32
|
-
'thread': 'Thread',
|
|
33
|
-
'threadName': 'Thread Name'
|
|
34
|
-
}
|
|
35
|
-
|
|
36
|
-
DEFAULT_FORMATTER_TXT_FILE: str = \
|
|
37
|
-
"{asctime} | " \
|
|
38
|
-
"{levelname:<" + f"{len(FORMAT_ELEMENT_TO_HEADER['levelname'])}" + "s} | " \
|
|
39
|
-
"{name:<" + f"{len(FORMAT_ELEMENT_TO_HEADER['name'])}" + "s} | " \
|
|
40
|
-
"{filename:<" + f"{len(FORMAT_ELEMENT_TO_HEADER['filename'])}" + "s} : " \
|
|
41
|
-
"{lineno:<" + f"{len(FORMAT_ELEMENT_TO_HEADER['lineno'])}" + "d} | " \
|
|
42
|
-
"{threadName} | {message}"
|
|
43
|
-
|
|
44
|
-
DEFAULT_FORMATTER_CSV_FILE: str = \
|
|
45
|
-
'\"{asctime}\",{levelname},{name},{filename},{lineno},{threadName},\"{message}\"'
|
|
46
|
-
|
|
47
|
-
|
|
48
15
|
class NanosecondsFormatter(logging.Formatter):
|
|
49
16
|
def __init__(self, fmt=None, datefmt=None, style='%', use_nanoseconds=False):
|
|
50
17
|
super().__init__(fmt, datefmt, style)
|
|
@@ -73,24 +40,6 @@ class NanosecondsFormatter(logging.Formatter):
|
|
|
73
40
|
return s
|
|
74
41
|
|
|
75
42
|
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
# if datefmt is None:
|
|
80
|
-
# # Use the default behavior if no datefmt is provided
|
|
81
|
-
# return super().formatTime(record, datefmt)
|
|
82
|
-
# elif '%f' in datefmt:
|
|
83
|
-
# # Format the time up to seconds
|
|
84
|
-
# base_time = time.strftime(datefmt.replace('%f', ''), ct)
|
|
85
|
-
# # Calculate nanoseconds from the fractional part of the timestamp
|
|
86
|
-
# nanoseconds = f'{record.created:.9f}'.split('.')[1]
|
|
87
|
-
# # Return the formatted string with nanoseconds appended
|
|
88
|
-
# return base_time + nanoseconds
|
|
89
|
-
# else:
|
|
90
|
-
# # Use the provided datefmt if it doesn't include %f
|
|
91
|
-
# return time.strftime(datefmt, ct)
|
|
92
|
-
|
|
93
|
-
|
|
94
43
|
class FormatterProcessor:
|
|
95
44
|
"""
|
|
96
45
|
Class to process the formatter.
|
|
@@ -168,7 +117,7 @@ class FormatterProcessor:
|
|
|
168
117
|
# Iterate through all the elements and get the header list.
|
|
169
118
|
header_dict: dict = dict()
|
|
170
119
|
for element in self.list_of_elements:
|
|
171
|
-
header_dict.update({element: FORMAT_ELEMENT_TO_HEADER[element]})
|
|
120
|
+
header_dict.update({element: consts.FORMAT_ELEMENT_TO_HEADER[element]})
|
|
172
121
|
|
|
173
122
|
return header_dict
|
|
174
123
|
|
|
@@ -9,7 +9,7 @@ from typing import Literal, Union
|
|
|
9
9
|
import threading
|
|
10
10
|
from datetime import datetime
|
|
11
11
|
|
|
12
|
-
from . import loggers, formatters, filters
|
|
12
|
+
from . import loggers, formatters, filters, consts
|
|
13
13
|
from ... import datetimes, filesystem
|
|
14
14
|
|
|
15
15
|
|
|
@@ -34,15 +34,15 @@ def _process_formatter_attribute(
|
|
|
34
34
|
"""
|
|
35
35
|
|
|
36
36
|
if formatter == 'DEFAULT' and file_type is None:
|
|
37
|
-
return
|
|
37
|
+
return consts.DEFAULT_STREAM_FORMATTER
|
|
38
38
|
elif formatter == 'DEFAULT' and file_type == 'txt':
|
|
39
|
-
return
|
|
39
|
+
return consts.DEFAULT_FORMATTER_TXT_FILE
|
|
40
40
|
elif formatter == 'DEFAULT' and file_type == 'csv':
|
|
41
|
-
return
|
|
41
|
+
return consts.DEFAULT_FORMATTER_CSV_FILE
|
|
42
42
|
elif formatter == 'DEFAULT' and file_type == 'json':
|
|
43
|
-
return
|
|
43
|
+
return consts.DEFAULT_MESSAGE_FORMATTER
|
|
44
44
|
elif formatter == 'MESSAGE':
|
|
45
|
-
return
|
|
45
|
+
return consts.DEFAULT_MESSAGE_FORMATTER
|
|
46
46
|
else:
|
|
47
47
|
return formatter
|
|
48
48
|
|
|
@@ -65,7 +65,17 @@ def get_logs_paths(
|
|
|
65
65
|
add_last_modified_time=True,
|
|
66
66
|
sort_by_last_modified_time=True)
|
|
67
67
|
|
|
68
|
-
|
|
68
|
+
# Get the datetime object from the first file name by the date pattern.
|
|
69
|
+
first_date_string = None
|
|
70
|
+
if logs_files:
|
|
71
|
+
first_file_name: str = Path(logs_files[0]['file_path']).name
|
|
72
|
+
first_datetime_object, first_date_string, first_timestamp_float = (
|
|
73
|
+
datetimes.get_datetime_from_complex_string_by_pattern(first_file_name, date_pattern))
|
|
74
|
+
|
|
75
|
+
# The problem here is the file name that doesn't contain the date string in the name.
|
|
76
|
+
# If it is regular log rotation, then there will be one file that doesn't have the date string in the name.
|
|
77
|
+
# If the function used to get the previous day log, then there will be no file that doesn't have the date string.
|
|
78
|
+
if len(logs_files) > 1 or (len(logs_files) == 1 and first_date_string):
|
|
69
79
|
if date_pattern:
|
|
70
80
|
latest_timestamp: float = 0
|
|
71
81
|
for file_index, single_file in enumerate(logs_files):
|
|
@@ -74,14 +84,8 @@ def get_logs_paths(
|
|
|
74
84
|
logs_files[file_index]['file_name'] = current_file_name
|
|
75
85
|
|
|
76
86
|
# Get the datetime object from the file name by the date pattern.
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
datetimes.get_datetime_from_complex_string_by_pattern(current_file_name, date_pattern))
|
|
80
|
-
# ValueError will be raised if the date pattern does not match the file name.
|
|
81
|
-
except ValueError:
|
|
82
|
-
timestamp_float = 0
|
|
83
|
-
datetime_object = None
|
|
84
|
-
date_string = None
|
|
87
|
+
datetime_object, date_string, timestamp_float = (
|
|
88
|
+
datetimes.get_datetime_from_complex_string_by_pattern(current_file_name, date_pattern))
|
|
85
89
|
|
|
86
90
|
# Update the last modified time to the dictionary.
|
|
87
91
|
logs_files[file_index]['last_modified'] = timestamp_float
|
|
@@ -118,6 +122,7 @@ def get_logs_paths(
|
|
|
118
122
|
elif len(logs_files) == 1 and previous_day_only:
|
|
119
123
|
logs_files = []
|
|
120
124
|
|
|
125
|
+
|
|
121
126
|
return logs_files
|
|
122
127
|
|
|
123
128
|
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
atomicshop/__init__.py,sha256=
|
|
1
|
+
atomicshop/__init__.py,sha256=j4I9LL6R2TZPz5HJCn6_tLwz-NnCBoG0zgr315IRFG8,124
|
|
2
2
|
atomicshop/_basics_temp.py,sha256=6cu2dd6r2dLrd1BRNcVDKTHlsHs_26Gpw8QS6v32lQ0,3699
|
|
3
3
|
atomicshop/_create_pdf_demo.py,sha256=Yi-PGZuMg0RKvQmLqVeLIZYadqEZwUm-4A9JxBl_vYA,3713
|
|
4
4
|
atomicshop/_patch_import.py,sha256=ENp55sKVJ0e6-4lBvZnpz9PQCt3Otbur7F6aXDlyje4,6334
|
|
@@ -8,13 +8,13 @@ atomicshop/command_line_processing.py,sha256=u5yT9Ger_cu7ni5ID0VFlRbVD46ARHeNC9t
|
|
|
8
8
|
atomicshop/config_init.py,sha256=z2RXD_mw9nQlAOpuGry1h9QT-2LhNscXgGAktN3dCVQ,2497
|
|
9
9
|
atomicshop/console_output.py,sha256=AOSJjrRryE97PAGtgDL03IBtWSi02aNol8noDnW3k6M,4667
|
|
10
10
|
atomicshop/console_user_response.py,sha256=31HIy9QGXa7f-GVR8MzJauQ79E_ZqAeagF3Ks4GGdDU,3234
|
|
11
|
-
atomicshop/datetimes.py,sha256=
|
|
11
|
+
atomicshop/datetimes.py,sha256=XF-6PbMlXgxHAOCVBGWUnAwDlFuZS1YFUGk6STFWsq0,18362
|
|
12
12
|
atomicshop/diff_check.py,sha256=RJvzJhyYAZyRPKVDk1dS7UwZCx0kq__WDZ6N0rNfZUY,27110
|
|
13
13
|
atomicshop/dns.py,sha256=h4uZKoz4wbBlLOOduL1GtRcTm-YpiPnGOEGxUm7hhOI,2140
|
|
14
14
|
atomicshop/domains.py,sha256=Rxu6JhhMqFZRcoFs69IoEd1PtYca0lMCG6F1AomP7z4,3197
|
|
15
15
|
atomicshop/emails.py,sha256=I0KyODQpIMEsNRi9YWSOL8EUPBiWyon3HRdIuSj3AEU,1410
|
|
16
16
|
atomicshop/file_types.py,sha256=-0jzQMRlmU1AP9DARjk-HJm1tVE22E6ngP2mRblyEjY,763
|
|
17
|
-
atomicshop/filesystem.py,sha256=
|
|
17
|
+
atomicshop/filesystem.py,sha256=emiwRQuM56yXATPTMFrhYmLp9LlQ9l_RA3TU6q5NoIg,54232
|
|
18
18
|
atomicshop/functions.py,sha256=pK8hoCE9z61PtWCxQJsda7YAphrLH1wxU5x-1QJP-sY,499
|
|
19
19
|
atomicshop/get_process_list.py,sha256=hi1NOG-i8S6EcyQ6LTfP4pdxqRfjEijz9SZ5nEbcM9Q,6076
|
|
20
20
|
atomicshop/get_process_name_cmd_dll.py,sha256=CtaSp3mgxxJKCCVW8BLx6BJNx4giCklU_T7USiCEwfc,5162
|
|
@@ -92,7 +92,7 @@ atomicshop/basics/guids.py,sha256=iRx5n18ATZWhpo748BwEjuLWLsu9y3OwF5-Adp-Dtik,40
|
|
|
92
92
|
atomicshop/basics/hexs.py,sha256=i8CTG-J0TGGa25yFSbWEvpVyHFnof_qSWUrmXY-ylKM,1054
|
|
93
93
|
atomicshop/basics/if_else.py,sha256=MakivJChofZCpr0mOVjwCthzpiaBxXVB-zv7GwMOqVo,202
|
|
94
94
|
atomicshop/basics/isinstancing.py,sha256=fQ35xfqbguQz2BUn-3a4KVGskhTcIn8JjRtxV2rFcRQ,876
|
|
95
|
-
atomicshop/basics/list_of_dicts.py,sha256=
|
|
95
|
+
atomicshop/basics/list_of_dicts.py,sha256=tj0LNPf1ljNI_qpoO-PiOT4Ulmk1M-UpTGyn9twVcw8,8039
|
|
96
96
|
atomicshop/basics/lists.py,sha256=I0C62vrDrNwCTNl0EjUZNa1Jsd8l0rTkp28GEx9QoEI,4258
|
|
97
97
|
atomicshop/basics/multiprocesses.py,sha256=nSskxJSlEdalPM_Uf8cc9kAYYlVwYM1GonBLAhCL2mM,18831
|
|
98
98
|
atomicshop/basics/numbers.py,sha256=ESX0z_7o_ok3sOmCKAUBoZinATklgMy2v-4RndqXlVM,1837
|
|
@@ -112,7 +112,7 @@ atomicshop/etws/traces/trace_dns.py,sha256=rWQ8bv8eMHBRRkA8oxO9caYqj0h4Emw4aZXmo
|
|
|
112
112
|
atomicshop/etws/traces/trace_sysmon_process_creation.py,sha256=OM-bkK38uYMwWLZKNOTDa0Xdk3sO6sqsxoMUIiPvm5g,4656
|
|
113
113
|
atomicshop/file_io/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
114
114
|
atomicshop/file_io/csvs.py,sha256=eS2SSGwcC1MlRPPgoqyFyE-qqH2esUWQvv3wWLMiOuA,5876
|
|
115
|
-
atomicshop/file_io/docxs.py,sha256=
|
|
115
|
+
atomicshop/file_io/docxs.py,sha256=yNlNXKLIvPkHQNF544VvCrbxcXsHX6G-6_V-8Ixp2zI,5111
|
|
116
116
|
atomicshop/file_io/file_io.py,sha256=FOZ6_PjOASxSDHESe4fwDv5miXYR10OHTxkxtEHoZYQ,6555
|
|
117
117
|
atomicshop/file_io/jsons.py,sha256=q9ZU8slBKnHLrtn3TnbK1qxrRpj5ZvCm6AlsFzoANjo,5303
|
|
118
118
|
atomicshop/file_io/tomls.py,sha256=oa0Wm8yMkPRXKN9jgBuTnKbioSOee4mABW5IMUFCYyU,3041
|
|
@@ -234,12 +234,13 @@ atomicshop/wrappers/factw/rest/statistics.py,sha256=vznwzKP1gEF7uXz3HsuV66BU9wrp
|
|
|
234
234
|
atomicshop/wrappers/factw/rest/status.py,sha256=4O3xS1poafwyUiLDkhyx4oMMe4PBwABuRPpOMnMKgIU,641
|
|
235
235
|
atomicshop/wrappers/fibratusw/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
236
236
|
atomicshop/wrappers/fibratusw/install.py,sha256=PLVymDe0HuOvU0r2lje8BkQAgtiOWEeRO7n-1zKuL7A,3287
|
|
237
|
+
atomicshop/wrappers/loggingw/consts.py,sha256=JWiUJEydjhwatBxtIJsGTmDUSTLbmIRidtR6qRLMaIY,1608
|
|
237
238
|
atomicshop/wrappers/loggingw/filters.py,sha256=CMs5PAMb68zxJgBcQobaOFDG5kLJBOVYnoBHjDgksO8,2859
|
|
238
|
-
atomicshop/wrappers/loggingw/formatters.py,sha256=
|
|
239
|
-
atomicshop/wrappers/loggingw/handlers.py,sha256=
|
|
239
|
+
atomicshop/wrappers/loggingw/formatters.py,sha256=7XUJvlB0CK4DCkEp8NTL0S0dkyrZD0UTADgEwkStKOY,5483
|
|
240
|
+
atomicshop/wrappers/loggingw/handlers.py,sha256=yFYBeTkxnpmtlauoH3ZEFEHUYQYu9YL-ycd9sYTvOl4,16928
|
|
240
241
|
atomicshop/wrappers/loggingw/loggers.py,sha256=DHOOTAtqkwn1xgvLHSkOiBm6yFGNuQy1kvbhG-TDog8,2374
|
|
241
242
|
atomicshop/wrappers/loggingw/loggingw.py,sha256=lo4OZPXCbYZi3GqpaaJSs9SOGFfqD2EgHzzTK7f5IR4,11275
|
|
242
|
-
atomicshop/wrappers/loggingw/reading.py,sha256=
|
|
243
|
+
atomicshop/wrappers/loggingw/reading.py,sha256=TUTzffs3hSgfKjoJlf0l8o-1MOA4ag7O7cMglaxJFDQ,17500
|
|
243
244
|
atomicshop/wrappers/nodejsw/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
244
245
|
atomicshop/wrappers/nodejsw/install_nodejs.py,sha256=QZg-R2iTQt7kFb8wNtnTmwraSGwvUs34JIasdbNa7ZU,5154
|
|
245
246
|
atomicshop/wrappers/playwrightw/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
@@ -284,8 +285,8 @@ atomicshop/wrappers/socketw/socket_server_tester.py,sha256=AhpurHJmP2kgzHaUbq5ey
|
|
|
284
285
|
atomicshop/wrappers/socketw/socket_wrapper.py,sha256=aXBwlEIJhFT0-c4i8iNlFx2It9VpCEpsv--5Oqcpxao,11624
|
|
285
286
|
atomicshop/wrappers/socketw/ssl_base.py,sha256=k4V3gwkbq10MvOH4btU4onLX2GNOsSfUAdcHmL1rpVE,2274
|
|
286
287
|
atomicshop/wrappers/socketw/statistics_csv.py,sha256=t3dtDEfN47CfYVi0CW6Kc2QHTEeZVyYhc57IYYh5nmA,826
|
|
287
|
-
atomicshop-2.14.
|
|
288
|
-
atomicshop-2.14.
|
|
289
|
-
atomicshop-2.14.
|
|
290
|
-
atomicshop-2.14.
|
|
291
|
-
atomicshop-2.14.
|
|
288
|
+
atomicshop-2.14.11.dist-info/LICENSE.txt,sha256=lLU7EYycfYcK2NR_1gfnhnRC8b8ccOTElACYplgZN88,1094
|
|
289
|
+
atomicshop-2.14.11.dist-info/METADATA,sha256=GQTzZr5oR0sbtuoNqgw0kIIB80QY36y2sYn1I9qG1i4,10479
|
|
290
|
+
atomicshop-2.14.11.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92
|
|
291
|
+
atomicshop-2.14.11.dist-info/top_level.txt,sha256=EgKJB-7xcrAPeqTRF2laD_Np2gNGYkJkd4OyXqpJphA,11
|
|
292
|
+
atomicshop-2.14.11.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|