atomicshop 2.6.8__py3-none-any.whl → 2.6.10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of atomicshop might be problematic. Click here for more details.

@@ -0,0 +1,283 @@
1
+ import os
2
+ import time
3
+ import zipfile
4
+
5
+ from .. import filesystem
6
+ from ..print_api import print_api
7
+
8
+
9
+ def is_zip_zipfile(file_path: str) -> bool:
10
+ """
11
+ Function checks if the file is a zip file.
12
+ :param file_path: string, full path to the file.
13
+ :return: boolean.
14
+ """
15
+
16
+ try:
17
+ with zipfile.ZipFile(file_path) as zip_object:
18
+ zip_object.testzip()
19
+ return True
20
+ except zipfile.BadZipFile:
21
+ return False
22
+
23
+
24
+ def is_zip_magic_number(file_path: str) -> bool:
25
+ """
26
+ Function checks if the file is a zip file using magic number.
27
+ :param file_path: string, full path to the file.
28
+ :return: boolean.
29
+
30
+ 50 4B 03 04: This is the most common signature, found at the beginning of a ZIP file.
31
+ It signifies the start of a file within the ZIP archive and is present in almost all ZIP files.
32
+ Each file within the ZIP archive starts with this signature.
33
+ 50 4B 05 06: This is the end of central directory record signature.
34
+ It's found at the end of a ZIP file and is essential for identifying the structure of the ZIP archive,
35
+ especially in cases where the file is split or is a multi-part archive.
36
+ 50 4B 07 08: This signature is used for spanned ZIP archives (also known as split or multi-volume ZIP archives).
37
+ It's found in the end of central directory locator for ZIP files that are split across multiple volumes.
38
+ """
39
+
40
+ with open(file_path, 'rb') as file:
41
+ # Read the first 4 bytes of the file
42
+ signature = file.read(4)
43
+
44
+ # Check if the signature matches any of the ZIP signatures
45
+ return signature in [b'PK\x03\x04', b'PK\x05\x06', b'PK\x07\x08']
46
+
47
+
48
+ def extract_archive_with_zipfile(
49
+ archive_path: str,
50
+ extract_directory: str = None,
51
+ files_without_directories: bool = False,
52
+ remove_first_directory: bool = False,
53
+ print_kwargs: dict = None
54
+ ) -> str:
55
+ """
56
+ Function will extract the archive using standard library 'zipfile'.
57
+ This method preserves original date and time of the files inside the archive.
58
+
59
+ :param archive_path: string, full path to archived file.
60
+ :param extract_directory: string, full path to directory that the files will be extracted to.
61
+ If not specified, the files will be extracted to the same directory as the archived file, using the file name
62
+ without extension as the directory name.
63
+ :param files_without_directories: boolean, default 'False'.
64
+ 'True': All the files in the archive will be extracted without subdirectories hierarchy.
65
+ Meaning, that if there are duplicate file names, the latest file with the same file name will overwrite
66
+ all the rest of the files with the same name.
67
+ 'False': Subdirectory hierarchy will be preserved as it is currently in the archived file.
68
+ :param remove_first_directory: boolean, default is 'False'.
69
+ 'True': all the files will be extracted without first directory in the hierarchy.
70
+ Example: package_some_name_1.1.1_build/subdir1/file.exe
71
+ Will be extracted as: subdir/file.exe
72
+ :param print_kwargs: dict, kwargs for print_api.
73
+
74
+ :return: string, full path to directory that the files were extracted to.
75
+ """
76
+
77
+ if print_kwargs is None:
78
+ print_kwargs = dict()
79
+
80
+ # If 'extract_directory' is not specified, extract to the same directory as the archived file.
81
+ if extract_directory is None:
82
+ extract_directory = (
83
+ filesystem.get_file_directory(archive_path) + os.sep +
84
+ filesystem.get_file_name_without_extension(archive_path))
85
+
86
+ print_api(f'Extracting to directory: {extract_directory}', **print_kwargs)
87
+
88
+ # initiating the archived file path as 'zipfile.ZipFile' object.
89
+ with zipfile.ZipFile(archive_path) as zip_object:
90
+ # '.infolist()' method of the object contains all the directories and files that are in the archive including
91
+ # information about each one, like date and time of archiving.
92
+ for zip_info in zip_object.infolist():
93
+ # '.filename' attribute of the 'infolist()' method is relative path to each directory and file.
94
+ # If 'filename' ends with '/' it is a directory (it doesn't matter if it is windows or *nix)
95
+ # If so, skip current iteration.
96
+ if zip_info.filename[-1] == '/':
97
+ continue
98
+
99
+ if files_without_directories:
100
+ # Put into 'filename' the string that contains only the filename without subdirectories.
101
+ zip_info.filename = os.path.basename(zip_info.filename)
102
+ elif remove_first_directory:
103
+ # Cut the first directory from the filename.
104
+ zip_info.filename = zip_info.filename.split('/', maxsplit=1)[1]
105
+
106
+ print_api(f'Extracting: {zip_info.filename}', **print_kwargs)
107
+
108
+ # Extract current file from the archive using 'zip_info' of the current file with 'filename' that we
109
+ # updated under specified parameters to specified directory.
110
+ zip_object.extract(zip_info, extract_directory)
111
+
112
+ # === Change the date and time of extracted file from current time to the time specified in 'zip_info'.
113
+ # Get full path to extracted file.
114
+ extracted_file_path: str = extract_directory + os.sep + zip_info.filename
115
+ # Create needed datetime object with original archived datetime from 'zip_info.date_time'.
116
+ date_time = time.mktime(zip_info.date_time + (0, 0, -1))
117
+ # Using 'os' library, changed the datetime of the file to the object created in previous step.
118
+ os.utime(extracted_file_path, (date_time, date_time))
119
+ print_api('Extraction done.', color="green", **print_kwargs)
120
+
121
+ return extract_directory
122
+
123
+
124
+ def get_file_list_from_zip(file_path: str) -> list:
125
+ """
126
+ Function returns the list of file names and their relative directories inside the zip file.
127
+ :param file_path: string, full path to the zip file.
128
+ :return: list of strings.
129
+ """
130
+
131
+ with zipfile.ZipFile(file_path, 'r') as zip_object:
132
+ return zip_object.namelist()
133
+
134
+
135
+ # def search_file_in_zip(
136
+ # file_path: str = None,
137
+ # file_bytes: bytes = None,
138
+ # file_names_to_search: list[str] = None,
139
+ # case_sensitive: bool = True,
140
+ # return_first_only: bool = False,
141
+ # return_empty_list_per_file_name: bool = False,
142
+ # recursive: bool = False,
143
+ # callback_functions: list = None,
144
+ # extract_file_to_path: str = None
145
+ # ) -> dict[str, list[bytes]]:
146
+ # """
147
+ # Function searches for the file names inside the zip file and returns a dictionary where the keys are the
148
+ # names of the callback functions and the values are lists of found file bytes.
149
+ # :param file_path: string, full path to the zip file.
150
+ # :param file_bytes: bytes, the bytes of the zip file.
151
+ # :param file_names_to_search: list of strings, the names of the files to search.
152
+ # :param case_sensitive: boolean, default is 'True'. Determines if file name search should be case sensitive.
153
+ # :param return_first_only: boolean, default is 'False'. Return only the first found file for each file name.
154
+ # :param return_empty_list_per_file_name: boolean, default is 'False'.
155
+ # True: Return empty list for each file name that wasn't found.
156
+ # False: Don't return empty list for each file name that wasn't found.
157
+ # :param recursive: boolean, default is 'False'. If True, search for file names recursively in nested zip files.
158
+ # :param callback_functions: list of callables, default is None. Each function takes a file name and should return a
159
+ # boolean that will tell the main function if this file is 'found' or not.
160
+ # :param extract_file_to_path: string, full path to the directory where the found files should be extracted.
161
+ # :return: dictionary of lists of bytes.
162
+ # """
163
+ #
164
+ # def get_unique_filename(directory, filename):
165
+ # """
166
+ # Generates a unique filename by appending a number if the file already exists.
167
+ # """
168
+ # name, ext = os.path.splitext(filename)
169
+ # counter = 1
170
+ # unique_filename = filename
171
+ # while os.path.exists(os.path.join(directory, unique_filename)):
172
+ # unique_filename = f"{name}_{counter}{ext}"
173
+ # counter += 1
174
+ # return unique_filename
175
+ #
176
+ # def is_zip_file(file, zip_obj):
177
+ # try:
178
+ # with zip_obj.open(file) as file_data:
179
+ # with zipfile.ZipFile(BytesIO(file_data.read())) as zip_file:
180
+ # if zip_file.testzip() is None: # No errors found
181
+ # return True
182
+ # except zipfile.BadZipFile:
183
+ # return False
184
+ # return False
185
+ #
186
+ # def match_file_name(target, current):
187
+ # if case_sensitive:
188
+ # return current.endswith(target)
189
+ # else:
190
+ # return current.lower().endswith(target.lower())
191
+ #
192
+ # def search_in_zip(zip_obj, file_names, results, found_set):
193
+ # for item in zip_obj.infolist():
194
+ # if item.filename.endswith('/'): # Skip directories
195
+ # continue
196
+ # is_nested_zip = recursive and is_zip_file(item.filename, zip_obj)
197
+ #
198
+ # with zip_obj.open(item) as file_data:
199
+ # archived_file_bytes = file_data.read()
200
+ #
201
+ # # This is needed to know if the file should be extracted to directory or not.
202
+ # should_extract = False
203
+ #
204
+ # name_matched = False
205
+ # if file_names is not None:
206
+ # name_matched = any(match_file_name(file_name, item.filename) for file_name in file_names)
207
+ # if name_matched:
208
+ # should_extract = True
209
+ #
210
+ # callback_matched = False
211
+ # if callback_functions:
212
+ # for callback in callback_functions:
213
+ # callback_result = callback(archived_file_bytes)
214
+ # if callback_result:
215
+ # callback_matched = True
216
+ # # Initialize key for callback function name if not present
217
+ # if callback.__name__ not in results:
218
+ # results[callback.__name__] = []
219
+ # file_info = {
220
+ # 'bytes': archived_file_bytes,
221
+ # 'name': item.filename,
222
+ # 'size': item.file_size,
223
+ # 'modified_time': item.date_time
224
+ # }
225
+ # results[callback.__name__].append(file_info)
226
+ # if return_first_only:
227
+ # found_set.add(item.filename)
228
+ #
229
+ # should_extract = True
230
+ # break # Stop checking other callbacks if one has found it
231
+ #
232
+ # if should_extract and extract_file_to_path:
233
+ # unique_filename = get_unique_filename(extract_file_to_path, os.path.basename(item.filename))
234
+ # with open(os.path.join(extract_file_to_path, unique_filename), 'wb') as f:
235
+ # f.write(archived_file_bytes)
236
+ #
237
+ # if not callback_matched:
238
+ # if is_nested_zip:
239
+ # # If the file is a nested ZIP and hasn't matched a callback, search recursively
240
+ # nested_zip_bytes = BytesIO(archived_file_bytes)
241
+ # with zipfile.ZipFile(nested_zip_bytes) as nested_zip:
242
+ # search_in_zip(nested_zip, file_names, results, found_set)
243
+ # elif name_matched:
244
+ # # Handle name match when no callbacks are provided or no callback matched
245
+ # if item.filename not in results:
246
+ # results[item.filename] = []
247
+ # file_info = {
248
+ # 'bytes': archived_file_bytes,
249
+ # 'name': item.filename,
250
+ # 'size': item.file_size,
251
+ # 'modified_time': item.date_time
252
+ # }
253
+ # results[item.filename].append(file_info)
254
+ # if return_first_only:
255
+ # found_set.add(item.filename) # Mark as found
256
+ #
257
+ # if file_names is not None and len(found_set) == len(file_names):
258
+ # return # All files found, stop searching
259
+ #
260
+ # if file_names_to_search is None and callback_functions is None:
261
+ # raise ValueError("Either file_names_to_search or callback_functions must be provided.")
262
+ #
263
+ # # Initialize results dictionary.
264
+ # if callback_functions:
265
+ # results = {callback.__name__: [] for callback in callback_functions}
266
+ # else:
267
+ # results = {}
268
+ #
269
+ # found_set = set()
270
+ # if file_bytes is not None:
271
+ # with zipfile.ZipFile(BytesIO(file_bytes), 'r') as zip_ref:
272
+ # search_in_zip(zip_ref, file_names_to_search, results, found_set)
273
+ # elif file_path is not None:
274
+ # with zipfile.ZipFile(file_path, 'r') as zip_ref:
275
+ # search_in_zip(zip_ref, file_names_to_search, results, found_set)
276
+ # else:
277
+ # raise ValueError("Either file_path or file_bytes must be provided.")
278
+ #
279
+ # if not return_empty_list_per_file_name:
280
+ # # Filter out keys with empty lists
281
+ # results = {key: value for key, value in results.items() if value}
282
+ #
283
+ # return results
@@ -53,18 +53,26 @@ def convert_sequence_of_bytes_to_sequence_of_strings(byte_sequence: bytes) -> li
53
53
  return result
54
54
 
55
55
 
56
- def find_position(file_path: str, target: bytes, chunk_size: int = None, starting_position: int = 0) -> int:
56
+ def find_position(target: bytes, file_path: str = None, file_bytes: bytes = None, chunk_size: int = None, starting_position: int = 0) -> int:
57
57
  """
58
58
  Find position of the target bytes string in the file.
59
59
 
60
- :param file_path: string, path to file.
61
60
  :param target: bytes, target bytes string.
61
+ :param file_path: string, path to file.
62
+ :param file_bytes: bytes, bytes string of the file.
62
63
  :param chunk_size: integer, chunk size in bytes.
63
64
  :param starting_position: integer, starting position in bytes. You can specify the starting seeking point
64
65
  in the file.
65
66
  :return:
66
67
  """
67
68
 
69
+ def read_chunk(position):
70
+ if file_path:
71
+ return file.read(chunk_size)
72
+ else:
73
+ end_position = min(position + chunk_size, len(file_bytes))
74
+ return file_bytes[position:end_position]
75
+
68
76
  if not chunk_size:
69
77
  chunk_size = len(target)
70
78
 
@@ -73,12 +81,23 @@ def find_position(file_path: str, target: bytes, chunk_size: int = None, startin
73
81
  # Overlap between chunks to ensure target isn't split between chunks
74
82
  overlap_size = len(target) - 1
75
83
 
76
- with open(file_path, 'rb') as file:
84
+ # Update the position variable to match the starting position.
85
+ position = starting_position
86
+
87
+ # Check if file_bytes is provided, otherwise read from the file path
88
+ if file_bytes is not None:
89
+ file = file_bytes
90
+ length = len(file_bytes)
91
+ else:
92
+ if not file_path:
93
+ raise ValueError("Either file_path or file_bytes must be provided.")
94
+ file = open(file_path, 'rb')
77
95
  # Move the file cursor to the starting position.
78
96
  file.seek(starting_position)
79
- # Update the position variable to match the starting position.
80
- position = starting_position
81
- chunk = file.read(chunk_size)
97
+
98
+ # try-finally block to ensure the file is closed properly if opened.
99
+ try:
100
+ chunk = read_chunk(position)
82
101
 
83
102
  while chunk:
84
103
  index = chunk.find(target)
@@ -86,25 +105,50 @@ def find_position(file_path: str, target: bytes, chunk_size: int = None, startin
86
105
  # Return the absolute position of the target in the file
87
106
  return position + index
88
107
 
89
- # Move the file cursor back by the overlap size to ensure target isn't split between chunks
90
- file.seek(position + chunk_size - overlap_size)
108
+ # Move the file cursor back by the overlap size to ensure target isn't split between chunks.
91
109
  # Get the current position of the cursor in the file.
92
- position = file.tell()
93
- chunk = file.read(chunk_size)
110
+
111
+ # Update position differently depending on the input type
112
+ if file_path:
113
+ position = file.tell() - overlap_size
114
+ file.seek(position)
115
+ else:
116
+ position += chunk_size - overlap_size
117
+
118
+ chunk = read_chunk(position)
119
+
120
+ finally:
121
+ if file_path:
122
+ file.close()
94
123
 
95
124
  # Return -1 if the target is not found
96
125
  return -1
97
126
 
98
127
 
99
- def read_bytes_from_position(file_path: str, starting_position: int, num_bytes: int) -> bytes:
128
+ def read_bytes_from_position(
129
+ starting_position: int,
130
+ num_bytes: int,
131
+ file_path: str = None,
132
+ file_bytes: bytes = None
133
+ ) -> bytes:
100
134
  """
101
135
  Read bytes from specified position in the file.
102
- :param file_path: string, path to file.
103
136
  :param starting_position: integer, starting position in bytes.
104
137
  :param num_bytes: integer, number of bytes to read.
138
+ :param file_path: string, path to file.
139
+ :param file_bytes: bytes, bytes string of the file.
105
140
  :return: bytes.
106
141
  """
107
142
 
143
+ if not file_path and not file_bytes:
144
+ raise ValueError("Either file_path or file_bytes must be provided.")
145
+
146
+ if file_bytes is not None:
147
+ # Ensure starting position and number of bytes are within the length of file_bytes
148
+ if starting_position < 0 or starting_position + num_bytes > len(file_bytes):
149
+ raise ValueError("Starting position and number of bytes to read are out of bounds.")
150
+ return file_bytes[starting_position:starting_position + num_bytes]
151
+
108
152
  with open(file_path, 'rb') as file:
109
153
  # Move the file cursor to the specified position.
110
154
  file.seek(starting_position)
@@ -96,3 +96,14 @@ def convert_dict_to_json_string(
96
96
  indent = 2
97
97
 
98
98
  return json.dumps(dict_or_list, indent=indent)
99
+
100
+
101
+ def convert_json_string_to_dict(json_string: str) -> dict:
102
+ """
103
+ Convert json formatted string to dictionary.
104
+
105
+ :param json_string: json formatted string.
106
+ :return: dictionary.
107
+ """
108
+
109
+ return json.loads(json_string)
@@ -1,14 +1,15 @@
1
1
  import xml.etree.ElementTree as ET
2
2
 
3
3
 
4
- def read_xml_file(
5
- file_path: str,
6
- **kwargs):
4
+ def read_xml(
5
+ file_path: str = None,
6
+ xml_bytes: bytes = None,
7
+ ):
7
8
  """
8
- Read the xml file and return its content as dictionary.
9
- :param file_path: string, full path to xml file.
10
- :param kwargs: dict, keyword arguments for print_api function.
11
- :return:
9
+ Read XML from a file or bytes and return its content as a dictionary.
10
+ :param file_path: Optional string, full path to xml file.
11
+ :param xml_bytes: Optional bytes, XML data as bytes.
12
+ :return: Tuple containing the XML dictionary, the ElementTree, and the root element.
12
13
  """
13
14
 
14
15
  def xml_to_dict(element):
@@ -40,8 +41,18 @@ def read_xml_file(
40
41
 
41
42
  return element_dict
42
43
 
43
- tree = ET.parse(file_path)
44
- root = tree.getroot()
44
+ # Determine source of XML data
45
+ if xml_bytes is not None:
46
+ # Parse XML from bytes object
47
+ root = ET.fromstring(xml_bytes)
48
+ tree = ET.ElementTree(root)
49
+ elif file_path is not None:
50
+ # Parse XML from file
51
+ tree = ET.parse(file_path)
52
+ root = tree.getroot()
53
+ else:
54
+ raise ValueError("Either file_path or xml_bytes must be provided")
55
+
45
56
  result_xml_dict: dict = xml_to_dict(root)
46
57
 
47
58
  return result_xml_dict, tree, root
atomicshop/web.py CHANGED
@@ -2,7 +2,7 @@ import os
2
2
  import urllib.request
3
3
 
4
4
  from .print_api import print_api
5
- from .archiver import extract_archive_with_zipfile
5
+ from .archiver import zip
6
6
  from .urls import url_parser
7
7
  from .file_io import file_io
8
8
  from .wrappers.playwrightw import scenarios
@@ -239,7 +239,7 @@ def download_and_extract_file(
239
239
  file_url=file_url, target_directory=target_directory, file_name=file_name, **kwargs)
240
240
 
241
241
  # Extract the archive and remove the first directory.
242
- extract_archive_with_zipfile(
242
+ zip.extract_archive_with_zipfile(
243
243
  archive_path=f'{file_path}', extract_directory=target_directory,
244
244
  remove_first_directory=archive_remove_first_directory, **kwargs)
245
245
 
@@ -1,6 +1,7 @@
1
1
  import os
2
2
  import configparser
3
3
  from typing import Any
4
+ import io
4
5
 
5
6
  from ..print_api import print_api
6
7
  from ..basics import lists
@@ -12,7 +13,13 @@ class ConfigParserWrapper:
12
13
  ImportConfig class is responsible for importing 'config.ini' file and its variables.
13
14
  """
14
15
 
15
- def __init__(self, file_name: str = 'config.ini', directory_path: str = None, file_path: str = None):
16
+ def __init__(
17
+ self,
18
+ file_name: str = 'config.ini',
19
+ directory_path: str = None,
20
+ file_path: str = None,
21
+ config_content: str = None
22
+ ):
16
23
  """
17
24
  The function will initialize the 'ImportConfig' object.
18
25
  You can specify either full 'file_path' or 'directory_path' and 'file_name'.
@@ -21,16 +28,18 @@ class ConfigParserWrapper:
21
28
  :param file_name: The name of the file to be imported. Default is 'config.ini'.
22
29
  :param directory_path: The directory that 'config.ini' file is in.
23
30
  :param file_path: sting, the full path to the file.
31
+ :param config_content: string, the content of the config file. If specified, the 'file_path' will be ignored.
24
32
  """
25
33
 
26
- if not directory_path and not file_path:
27
- raise ValueError("You must specify either 'directory_path' or 'file_path'.")
28
- elif directory_path and file_path:
29
- raise ValueError("You can't specify both 'directory_path' and 'file_path'.")
34
+ if not directory_path and not file_path and not config_content:
35
+ raise ValueError("You must specify either 'directory_path' or 'file_path' or 'config_content'.")
36
+ elif (directory_path and file_path) or (directory_path and config_content) or (file_path and config_content):
37
+ raise ValueError("You can't specify both 'directory_path' and 'file_path' or 'config_content'.")
30
38
 
31
39
  self.file_name: str = file_name
32
40
  self.directory_path: str = directory_path
33
41
  self.file_path: str = file_path
42
+ self.config_content: str = config_content
34
43
 
35
44
  self.config_parser = None
36
45
  # Final configuration dictionary.
@@ -38,7 +47,7 @@ class ConfigParserWrapper:
38
47
 
39
48
  if file_path:
40
49
  self.file_path: str = file_path
41
- else:
50
+ elif directory_path:
42
51
  self.file_path: str = self.directory_path + os.sep + self.file_name
43
52
 
44
53
  # After that you can use 'convert_string_values' function to convert certain key values to other types.
@@ -208,11 +217,15 @@ class ConfigParserWrapper:
208
217
  if not self.config_parser:
209
218
  self.initialize_config_parser()
210
219
 
211
- if not unicode_encoding:
212
- self.config_parser.read(self.file_path)
213
- else:
214
- # Reading with 'utf-8' (non ansi languages):
215
- self.config_parser.read(self.file_path, encoding='utf-8')
220
+ if self.file_path:
221
+ if not unicode_encoding:
222
+ self.config_parser.read(self.file_path)
223
+ else:
224
+ # Reading with 'utf-8' (non ansi languages):
225
+ self.config_parser.read(self.file_path, encoding='utf-8')
226
+ elif self.config_content:
227
+ config_io = io.StringIO(self.config_content)
228
+ self.config_parser.read_file(config_io)
216
229
 
217
230
  # If there are no sections in the configparser object, that means there really no sections, or the file
218
231
  # doesn't exist. ConfigParser doesn't check the file for existence.
@@ -10,3 +10,5 @@ PRE_INSTALL_FILE_PATH: str = SRC_DIRECTORY_PATH + os.sep + 'install' + os.sep +
10
10
  # After restart.
11
11
  INSTALL_LOG_FILE_NAME: str = 'install.log'
12
12
  INSTALL_FILE_PATH: str = SRC_DIRECTORY_PATH + os.sep + 'install.py'
13
+
14
+ FACT_EXTRACTOR_FILE_NAME: str = 'extract.py'
@@ -67,8 +67,8 @@ class GitHubWrapper:
67
67
 
68
68
  :param target_directory:
69
69
  :param archive_remove_first_directory: boolean, sets if archive extract function will extract the archive
70
- without first directory in the archive. Check reference in the 'archiver.extract_archive_with_zipfile'
71
- function.
70
+ without first directory in the archive. Check reference in the
71
+ 'archiver.zip.extract_archive_with_zipfile' function.
72
72
  :return:
73
73
  """
74
74
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: atomicshop
3
- Version: 2.6.8
3
+ Version: 2.6.10
4
4
  Summary: Atomic functions and classes to make developer life easier
5
5
  Author: Denis Kras
6
6
  License: MIT License
@@ -45,6 +45,7 @@ Requires-Dist: playwright
45
45
  Requires-Dist: playwright-stealth
46
46
  Requires-Dist: protobuf
47
47
  Requires-Dist: psutil
48
+ Requires-Dist: py7zr
48
49
  Requires-Dist: pyautogui
49
50
  Requires-Dist: pyopenssl
50
51
  Requires-Dist: python-bidi
@@ -114,7 +115,7 @@ Currently, the library is being developed on Windows 10 and python 3.11. No othe
114
115
  This means that the official support of the library is mainly for python 3.11 under Windows 10.
115
116
  Most of the features will work, but some require at least version 3.10, like SocketWrapper. Since, some features in it are 3.10 specific.
116
117
 
117
- The library and its features will evolve based on my curiosity and needs. But as of now, the updates to features list are almost on daily basis.
118
+ The library and its features will evolve based on my curiosity and needs. But as of now, the updates to features list are relatively frequent.
118
119
 
119
120
 
120
121
  <!-- GETTING STARTED -->