abstract-utilities 0.2.2.513__py3-none-any.whl → 0.2.2.583__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- abstract_utilities/class_utils/caller_utils.py +18 -0
- abstract_utilities/class_utils/global_utils.py +3 -2
- abstract_utilities/class_utils/imports/imports.py +1 -1
- abstract_utilities/directory_utils/__init__.py +2 -4
- abstract_utilities/directory_utils/imports/__init__.py +2 -0
- abstract_utilities/directory_utils/imports/imports.py +1 -0
- abstract_utilities/directory_utils/imports/module_imports.py +2 -0
- abstract_utilities/directory_utils/src/__init__.py +4 -0
- abstract_utilities/directory_utils/src/directory_utils.py +108 -0
- abstract_utilities/directory_utils/src/name_utils.py +43 -0
- abstract_utilities/directory_utils/src/size_utils.py +57 -0
- abstract_utilities/directory_utils/src/utils.py +116 -0
- abstract_utilities/file_utils/imports/constants.py +81 -7
- abstract_utilities/file_utils/imports/imports.py +0 -4
- abstract_utilities/file_utils/imports/module_imports.py +1 -1
- abstract_utilities/file_utils/src/__init__.py +2 -4
- abstract_utilities/file_utils/src/file_filters/__init__.py +4 -0
- abstract_utilities/file_utils/src/file_filters/ensure_utils.py +116 -0
- abstract_utilities/file_utils/src/file_filters/filter_params.py +86 -0
- abstract_utilities/file_utils/src/file_filters/filter_utils.py +78 -0
- abstract_utilities/file_utils/src/file_filters/predicate_utils.py +114 -0
- abstract_utilities/file_utils/src/file_filters.py +114 -47
- abstract_utilities/file_utils/src/file_reader.py +0 -64
- abstract_utilities/file_utils/src/file_utils.py +7 -130
- abstract_utilities/file_utils/src/filter_params.py +128 -86
- abstract_utilities/file_utils/src/find_collect.py +85 -165
- abstract_utilities/file_utils/src/find_content.py +210 -0
- abstract_utilities/file_utils/src/initFunctionsGen.py +3 -9
- abstract_utilities/file_utils/src/reader_utils/__init__.py +4 -0
- abstract_utilities/file_utils/src/reader_utils/directory_reader.py +53 -0
- abstract_utilities/file_utils/src/reader_utils/file_reader.py +543 -0
- abstract_utilities/file_utils/src/reader_utils/file_readers.py +376 -0
- abstract_utilities/file_utils/src/reader_utils/imports.py +18 -0
- abstract_utilities/file_utils/src/reader_utils/pdf_utils.py +300 -0
- abstract_utilities/file_utils (2)/__init__.py +2 -0
- abstract_utilities/file_utils (2)/imports/__init__.py +2 -0
- abstract_utilities/file_utils (2)/imports/constants.py +118 -0
- abstract_utilities/file_utils (2)/imports/imports/__init__.py +3 -0
- abstract_utilities/file_utils (2)/imports/imports/constants.py +119 -0
- abstract_utilities/file_utils (2)/imports/imports/imports.py +46 -0
- abstract_utilities/file_utils (2)/imports/imports/module_imports.py +8 -0
- abstract_utilities/file_utils (2)/imports/utils/__init__.py +3 -0
- abstract_utilities/file_utils (2)/imports/utils/classes.py +379 -0
- abstract_utilities/file_utils (2)/imports/utils/clean_imps.py +155 -0
- abstract_utilities/file_utils (2)/imports/utils/filter_utils.py +341 -0
- abstract_utilities/file_utils (2)/src/__init__.py +8 -0
- abstract_utilities/file_utils (2)/src/file_filters.py +155 -0
- abstract_utilities/file_utils (2)/src/file_reader.py +604 -0
- abstract_utilities/file_utils (2)/src/find_collect.py +258 -0
- abstract_utilities/file_utils (2)/src/initFunctionsGen.py +286 -0
- abstract_utilities/file_utils (2)/src/map_utils.py +28 -0
- abstract_utilities/file_utils (2)/src/pdf_utils.py +300 -0
- abstract_utilities/import_utils/circular_import_finder.py +222 -0
- abstract_utilities/import_utils/circular_import_finder2.py +118 -0
- abstract_utilities/import_utils/imports/module_imports.py +3 -1
- abstract_utilities/import_utils/src/clean_imports.py +156 -25
- abstract_utilities/import_utils/src/dot_utils.py +11 -0
- abstract_utilities/import_utils/src/extract_utils.py +4 -0
- abstract_utilities/import_utils/src/import_functions.py +47 -2
- abstract_utilities/import_utils/src/pkg_utils.py +58 -4
- abstract_utilities/import_utils/src/sysroot_utils.py +56 -1
- abstract_utilities/log_utils/log_file.py +3 -2
- abstract_utilities/path_utils/path_utils.py +25 -23
- abstract_utilities/safe_utils/safe_utils.py +30 -0
- {abstract_utilities-0.2.2.513.dist-info → abstract_utilities-0.2.2.583.dist-info}/METADATA +1 -1
- {abstract_utilities-0.2.2.513.dist-info → abstract_utilities-0.2.2.583.dist-info}/RECORD +68 -28
- {abstract_utilities-0.2.2.513.dist-info → abstract_utilities-0.2.2.583.dist-info}/WHEEL +0 -0
- {abstract_utilities-0.2.2.513.dist-info → abstract_utilities-0.2.2.583.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,300 @@
|
|
|
1
|
+
from ..imports import *
|
|
2
|
+
def if_none_return(obj: object, obj_2: object) -> object:
|
|
3
|
+
"""
|
|
4
|
+
Return obj if obj_2 is None, otherwise return obj_2.
|
|
5
|
+
|
|
6
|
+
Args:
|
|
7
|
+
obj (Any): Primary object to return.
|
|
8
|
+
obj_2 (Any): Secondary object to check.
|
|
9
|
+
|
|
10
|
+
Returns:
|
|
11
|
+
Any: obj if obj_2 is None, else obj_2.
|
|
12
|
+
"""
|
|
13
|
+
return obj if obj_2 is None else obj_2
|
|
14
|
+
|
|
15
|
+
def write_pdf() -> PyPDF2.PdfWriter:
|
|
16
|
+
"""
|
|
17
|
+
Return a new PDF writer object.
|
|
18
|
+
|
|
19
|
+
Returns:
|
|
20
|
+
PyPDF2.PdfWriter: New PDF writer object.
|
|
21
|
+
"""
|
|
22
|
+
return PyPDF2.PdfWriter()
|
|
23
|
+
def read_pdf(file: str):
|
|
24
|
+
"""
|
|
25
|
+
Read and return a PDF reader object from the provided file path.
|
|
26
|
+
|
|
27
|
+
Args:
|
|
28
|
+
file (str): Path to the PDF file.
|
|
29
|
+
|
|
30
|
+
Returns:
|
|
31
|
+
PyPDF2.PdfReader: PDF reader object.
|
|
32
|
+
"""
|
|
33
|
+
return PyPDF2.PdfReader(file)
|
|
34
|
+
def is_pdf_path(file: str):
|
|
35
|
+
"""
|
|
36
|
+
Checks if a given file path corresponds to a PDF file.
|
|
37
|
+
|
|
38
|
+
Args:
|
|
39
|
+
file (str): A string representing the file path.
|
|
40
|
+
|
|
41
|
+
Returns:
|
|
42
|
+
bool: True if the file has a '.pdf' extension, False otherwise.
|
|
43
|
+
"""
|
|
44
|
+
if is_file(file):
|
|
45
|
+
if get_ext(file) == '.pdf':
|
|
46
|
+
return True
|
|
47
|
+
return False
|
|
48
|
+
|
|
49
|
+
def read_pdf(file: str):
|
|
50
|
+
"""Read and return a PDF reader object from the provided file path."""
|
|
51
|
+
return PyPDF2.PdfReader(file)
|
|
52
|
+
def get_pdf_obj(pdf_obj: Union[str, object]) -> object:
|
|
53
|
+
"""
|
|
54
|
+
Processes and returns a PDF object. If provided with a file path to a PDF,
|
|
55
|
+
it reads and returns the PDF content as an object.
|
|
56
|
+
|
|
57
|
+
Args:
|
|
58
|
+
pdf_obj: Either a PDF file path or an existing PDF object.
|
|
59
|
+
|
|
60
|
+
Returns:
|
|
61
|
+
object: The PDF content as an object.
|
|
62
|
+
"""
|
|
63
|
+
if is_str(pdf_obj):
|
|
64
|
+
if is_pdf_path(pdf_obj):
|
|
65
|
+
pdf_obj = read_pdf(pdf_obj) # Assuming there's a function read_pdf() to read PDF content
|
|
66
|
+
return pdf_obj
|
|
67
|
+
def get_separate_pages(pdf_reader, start_page:int=1, end_page:int=None):
|
|
68
|
+
"""
|
|
69
|
+
Get specific pages from a PDF and return them as a new PDF object.
|
|
70
|
+
|
|
71
|
+
Args:
|
|
72
|
+
pdf_reader (object): The PDF reader object.
|
|
73
|
+
start_page (int, optional): The starting page number. Defaults to 1.
|
|
74
|
+
end_page (int, optional): The ending page number. Defaults to the last page.
|
|
75
|
+
|
|
76
|
+
Returns:
|
|
77
|
+
object: A new PDF writer object with the specified pages.
|
|
78
|
+
"""
|
|
79
|
+
num_pages = get_pdf_pages(pdf_reader)
|
|
80
|
+
|
|
81
|
+
# Handling default or out-of-bounds page values
|
|
82
|
+
if end_page is None or num_pages < end_page:
|
|
83
|
+
end_page = num_pages
|
|
84
|
+
elif num_pages < start_page:
|
|
85
|
+
return False
|
|
86
|
+
|
|
87
|
+
pdf_writer = write_pdf()
|
|
88
|
+
|
|
89
|
+
for page_num in range(num_pages):
|
|
90
|
+
if start_page <= page_num <= end_page:
|
|
91
|
+
pdf_writer.add_page(pdf_reader.pages[page_num])
|
|
92
|
+
return pdf_writer
|
|
93
|
+
def is_pdf_path(file):
|
|
94
|
+
"""
|
|
95
|
+
Check if the provided file path corresponds to a valid PDF file.
|
|
96
|
+
|
|
97
|
+
Args:
|
|
98
|
+
file (str): File path.
|
|
99
|
+
|
|
100
|
+
Returns:
|
|
101
|
+
bool: True if it's a valid PDF path, False otherwise.
|
|
102
|
+
"""
|
|
103
|
+
if is_file(file) and get_ext(file).lower() == '.pdf':
|
|
104
|
+
return True
|
|
105
|
+
return False
|
|
106
|
+
|
|
107
|
+
def get_pdf_pages(pdf_file):
|
|
108
|
+
"""
|
|
109
|
+
Get the total number of pages in the PDF.
|
|
110
|
+
|
|
111
|
+
Args:
|
|
112
|
+
pdf_file (object/str): PDF reader object or path to a PDF file.
|
|
113
|
+
|
|
114
|
+
Returns:
|
|
115
|
+
int: Number of pages in the PDF.
|
|
116
|
+
"""
|
|
117
|
+
pdf_file = get_pdf_obj(pdf_file)
|
|
118
|
+
try:
|
|
119
|
+
return len(pdf_file.pages)
|
|
120
|
+
except:
|
|
121
|
+
return False
|
|
122
|
+
def save_pdf(output_file_path, pdf_writer):
|
|
123
|
+
"""
|
|
124
|
+
Save a PDF writer object to a file.
|
|
125
|
+
|
|
126
|
+
Args:
|
|
127
|
+
output_file_path (str): Path to save the PDF.
|
|
128
|
+
pdf_writer (object): PDF writer object to save.
|
|
129
|
+
"""
|
|
130
|
+
with open(output_file_path, 'wb') as output_file:
|
|
131
|
+
pdf_writer.write(output_file)
|
|
132
|
+
def split_pdf(input_path: str, output_folder: Optional[str] = None, file_name: Optional[str] = None) -> List[str]:
|
|
133
|
+
"""
|
|
134
|
+
Split a PDF file into separate files for each page.
|
|
135
|
+
|
|
136
|
+
Args:
|
|
137
|
+
input_path (str): Path to the input PDF file.
|
|
138
|
+
output_folder (str, optional): Directory to save the split PDF files. Defaults to the directory of input_path.
|
|
139
|
+
file_name (str, optional): Base name for the output files. Defaults to the base name of input_path.
|
|
140
|
+
|
|
141
|
+
Returns:
|
|
142
|
+
list: List of paths to the created split PDF files.
|
|
143
|
+
"""
|
|
144
|
+
pdf_pages = []
|
|
145
|
+
file_name = get_file_name(input_path) if file_name is None else file_name
|
|
146
|
+
output_folder = if_none_return(get_directory(input_path), output_folder)
|
|
147
|
+
|
|
148
|
+
print(f"Splitting PDF: {input_path}")
|
|
149
|
+
print(f"Output Folder: {output_folder}")
|
|
150
|
+
print(f"Using Filename: {file_name}")
|
|
151
|
+
|
|
152
|
+
with open(input_path, 'rb') as pdf_file:
|
|
153
|
+
pdf_reader = PyPDF2.PdfReader(pdf_file)
|
|
154
|
+
num_pages = len(pdf_reader.pages) # Replace getNumPages() with len(pdf_reader.pages)
|
|
155
|
+
|
|
156
|
+
print(f"Number of pages in PDF: {num_pages}")
|
|
157
|
+
|
|
158
|
+
for page_num in range(num_pages):
|
|
159
|
+
pdf_writer = PyPDF2.PdfWriter()
|
|
160
|
+
pdf_writer.add_page(pdf_reader.pages[page_num]) # Use the pdf_writer instance you created
|
|
161
|
+
|
|
162
|
+
output_file_path = os.path.join(output_folder, f'{file_name}_page_{page_num + 1}.pdf')
|
|
163
|
+
output_img_path = os.path.join(output_folder, f'{file_name}_page_{page_num + 1}.png')
|
|
164
|
+
print(f"Writing to: {output_file_path}")
|
|
165
|
+
pdf_pages.append(output_file_path)
|
|
166
|
+
save_pdf(output_file_path,pdf_writer)
|
|
167
|
+
|
|
168
|
+
return pdf_pages
|
|
169
|
+
def pdf_to_img_list(pdf_list: List[str], output_folder: Optional[str] = None, file_name: Optional[str] = None,
|
|
170
|
+
paginate: bool = False, extension: str = "png") -> List[str]:
|
|
171
|
+
"""
|
|
172
|
+
Convert a list of PDF files to images.
|
|
173
|
+
|
|
174
|
+
Args:
|
|
175
|
+
pdf_list (List[str]): List of paths to PDF files.
|
|
176
|
+
output_folder (str, optional): Directory to save the images. Defaults to PDF's directory.
|
|
177
|
+
file_name (str, optional): Base name for the images. Defaults to PDF's name.
|
|
178
|
+
paginate (bool): Whether to paginate the image names. Defaults to False.
|
|
179
|
+
extension (str): Extension for the image files. Defaults to "png".
|
|
180
|
+
|
|
181
|
+
Returns:
|
|
182
|
+
List[str]: List of paths to the created image files.
|
|
183
|
+
"""
|
|
184
|
+
image_list=[]
|
|
185
|
+
file_name_start = file_name
|
|
186
|
+
for i, each in enumerate(pdf_list):
|
|
187
|
+
try:
|
|
188
|
+
images = convert_from_path(each)
|
|
189
|
+
except Exception as e:
|
|
190
|
+
print("An error occurred while converting the PDF:", e)
|
|
191
|
+
|
|
192
|
+
if output_folder is None:
|
|
193
|
+
output_folder = get_directory(each)
|
|
194
|
+
if file_name_start is None:
|
|
195
|
+
file_name = get_file_name(each)
|
|
196
|
+
if paginate:
|
|
197
|
+
file_name=f"{file_name}_Page_{i}"
|
|
198
|
+
|
|
199
|
+
for i, image in enumerate(images):
|
|
200
|
+
image_output_path = os.path.join(output_folder, f"{file_name}.{extension}")
|
|
201
|
+
image_list.append(image_output_path)
|
|
202
|
+
save_image(image=image, image_path=image_output_path, format=extension.upper())
|
|
203
|
+
return image_list
|
|
204
|
+
def img_to_txt_list(img_list: List[str], output_folder: Optional[str] = None, file_name: Optional[str] = None,
|
|
205
|
+
paginate: bool = False, extension: str = "txt") -> List[str]:
|
|
206
|
+
"""
|
|
207
|
+
Convert a list of image files to text.
|
|
208
|
+
|
|
209
|
+
Args:
|
|
210
|
+
img_list (List[str]): List of paths to image files.
|
|
211
|
+
output_folder (str, optional): Directory to save the text files. Defaults to image's directory.
|
|
212
|
+
file_name (str, optional): Base name for the text files. Defaults to image's name.
|
|
213
|
+
paginate (bool): Whether to paginate the text filenames. Defaults to False.
|
|
214
|
+
extension (str): Extension for the text files. Defaults to "txt".
|
|
215
|
+
|
|
216
|
+
Returns:
|
|
217
|
+
List[str]: List of paths to the created text files.
|
|
218
|
+
"""
|
|
219
|
+
text_list = []
|
|
220
|
+
file_name_start = file_name
|
|
221
|
+
for i, each in enumerate(img_list):
|
|
222
|
+
if output_folder is None:
|
|
223
|
+
output_folder = get_directory(each)
|
|
224
|
+
if file_name_start is None:
|
|
225
|
+
file_name = get_file_name(each)
|
|
226
|
+
if paginate:
|
|
227
|
+
file_name=f"{file_name}_Page_{i}"
|
|
228
|
+
|
|
229
|
+
text_output = image_to_text(each)
|
|
230
|
+
text_output_path = os.path.join(output_folder, f"{get_file_name(each)}.{extension}")
|
|
231
|
+
text_list.append(text_output_path)
|
|
232
|
+
write_to_file(filepath=text_output_path, contents=text_output)
|
|
233
|
+
return text_list
|
|
234
|
+
def open_pdf_file(pdf_file_path: str) -> None:
|
|
235
|
+
"""
|
|
236
|
+
Open a PDF file using the default associated program.
|
|
237
|
+
|
|
238
|
+
Args:
|
|
239
|
+
pdf_file_path (str): Path to the PDF file to open.
|
|
240
|
+
"""
|
|
241
|
+
try:
|
|
242
|
+
# Open the PDF file using the default associated program
|
|
243
|
+
cmd_input("open "+pdf_file_path)
|
|
244
|
+
except FileNotFoundError:
|
|
245
|
+
print("Error: The specified file does not exist.")
|
|
246
|
+
except Exception as e:
|
|
247
|
+
print("Error:", e)
|
|
248
|
+
# use it before writing to a file
|
|
249
|
+
|
|
250
|
+
|
|
251
|
+
def get_pdfs_in_directory(directory: str) -> List[str]:
|
|
252
|
+
"""
|
|
253
|
+
Get a list of PDF filenames in a given directory.
|
|
254
|
+
|
|
255
|
+
Args:
|
|
256
|
+
directory (str): Path to the directory.
|
|
257
|
+
|
|
258
|
+
Returns:
|
|
259
|
+
list: List of PDF filenames in the directory.
|
|
260
|
+
"""
|
|
261
|
+
pdfs = []
|
|
262
|
+
for filename in os.listdir(directory):
|
|
263
|
+
if is_pdf_path(filename):
|
|
264
|
+
pdfs.append(filename)
|
|
265
|
+
return pdfs
|
|
266
|
+
|
|
267
|
+
def get_all_pdf_in_directory(file_directory: Optional[str] = None) -> List[str]:
|
|
268
|
+
"""
|
|
269
|
+
Get a list of complete paths to PDF files in a given directory.
|
|
270
|
+
|
|
271
|
+
Args:
|
|
272
|
+
file_directory (str, optional): Path to the directory.
|
|
273
|
+
|
|
274
|
+
Returns:
|
|
275
|
+
list: List of paths to PDF files in the directory.
|
|
276
|
+
"""
|
|
277
|
+
pdfs=[]
|
|
278
|
+
for filename in sorted(os.listdir(file_directory)):
|
|
279
|
+
if is_pdf_path(filename):
|
|
280
|
+
pdf_path = os.path.join(file_directory, filename)
|
|
281
|
+
if is_file(pdf_path):
|
|
282
|
+
pdfs.append(pdf_path)
|
|
283
|
+
return pdfs
|
|
284
|
+
|
|
285
|
+
def collate_pdfs(pdf_list: List[str], output_pdf_path: str) -> None:
|
|
286
|
+
"""
|
|
287
|
+
Merge multiple PDF files into a single PDF.
|
|
288
|
+
|
|
289
|
+
Args:
|
|
290
|
+
pdf_list (list): List of paths to PDF files to be merged.
|
|
291
|
+
output_pdf_path (str): Path to save the merged PDF.
|
|
292
|
+
"""
|
|
293
|
+
pdf_writer = PyPDF2.PdfWriter()
|
|
294
|
+
for file_path in pdf_list:
|
|
295
|
+
with open(file_path, 'rb') as pdf_file:
|
|
296
|
+
pdf_reader = PyPDF2.PdfReader(pdf_file)
|
|
297
|
+
for page_num in range(len(pdf_reader.pages)):
|
|
298
|
+
pdf_writer.add_page(pdf_reader.pages[page_num])
|
|
299
|
+
save_pdf(output_file_path, pdf_writer)
|
|
300
|
+
|
|
@@ -0,0 +1,222 @@
|
|
|
1
|
+
from abstract_utilities import *
|
|
2
|
+
from collections import defaultdict
|
|
3
|
+
def clean_line(line):
|
|
4
|
+
return eatAll(line,[' ','','\t','\n'])
|
|
5
|
+
def is_from_line_group(line):
|
|
6
|
+
if line and line.startswith(FROM_TAG) and IMPORT_TAG in line and '(' in line:
|
|
7
|
+
import_spl = line.split(IMPORT_TAG)[-1]
|
|
8
|
+
import_spl_clean = clean_line(line)
|
|
9
|
+
if not import_spl_clean.endswith(')'):
|
|
10
|
+
return True
|
|
11
|
+
return False
|
|
12
|
+
def clean_imports(text=None,file_path=None,import_pkg_js=None,fill_nulines=False):
|
|
13
|
+
if text and os.path.isfile(text):
|
|
14
|
+
file_path = text
|
|
15
|
+
input(file_path)
|
|
16
|
+
text = read_from_file(file_path)
|
|
17
|
+
if not import_pkg_js:
|
|
18
|
+
import_pkg_js = get_all_imports(text=text,file_path=file_path)
|
|
19
|
+
import_pkg_js = ensure_import_pkg_js(import_pkg_js,file_path=file_path)
|
|
20
|
+
nu_lines = import_pkg_js["context"]["nulines"]
|
|
21
|
+
for pkg,values in import_pkg_js.items():
|
|
22
|
+
comments = []
|
|
23
|
+
if pkg not in ["context"]:
|
|
24
|
+
|
|
25
|
+
imports = values.get('imports')
|
|
26
|
+
for i,imp in enumerate(imports):
|
|
27
|
+
if '#' in imp:
|
|
28
|
+
imp_spl = imp.split('#')
|
|
29
|
+
comments.append(imp_spl[-1])
|
|
30
|
+
imports[i] = clean_line(imp_spl[0])
|
|
31
|
+
imports = list(set(imports))
|
|
32
|
+
if '*' in imports:
|
|
33
|
+
imports="*"
|
|
34
|
+
else:
|
|
35
|
+
imports=','.join(imports)
|
|
36
|
+
if comments:
|
|
37
|
+
comments=','.join(comments)
|
|
38
|
+
imports+=f" #{comments}"
|
|
39
|
+
import_pkg_js[pkg]["imports"]=imports
|
|
40
|
+
if fill_nulines:
|
|
41
|
+
line = values.get('line')
|
|
42
|
+
if len(nu_lines) >= line:
|
|
43
|
+
nu_lines[line] += imports
|
|
44
|
+
return import_pkg_js
|
|
45
|
+
def get_all_imports(text=None,file_path=None,import_pkg_js=None):
|
|
46
|
+
if text and os.path.isfile(text):
|
|
47
|
+
|
|
48
|
+
try:
|
|
49
|
+
text = read_from_file(text)
|
|
50
|
+
except:
|
|
51
|
+
pass
|
|
52
|
+
file_path = text
|
|
53
|
+
text = get_text_or_read(text=text,file_path=file_path)
|
|
54
|
+
lines = text.split('\n')
|
|
55
|
+
cleaned_import_list=[]
|
|
56
|
+
nu_lines = []
|
|
57
|
+
is_from_group = False
|
|
58
|
+
import_pkg_js = ensure_import_pkg_js(import_pkg_js,file_path=file_path)
|
|
59
|
+
for line in lines:
|
|
60
|
+
if line.startswith(IMPORT_TAG) and ' from ' not in line:
|
|
61
|
+
cleaned_import_list = get_cleaned_import_list(line)
|
|
62
|
+
import_pkg_js = add_imports_to_import_pkg_js("import",cleaned_import_list,import_pkg_js=import_pkg_js)
|
|
63
|
+
else:
|
|
64
|
+
if is_from_group:
|
|
65
|
+
import_pkg=is_from_group
|
|
66
|
+
line = clean_line(line)
|
|
67
|
+
if line.endswith(')'):
|
|
68
|
+
is_from_group=False
|
|
69
|
+
line=line[:-1]
|
|
70
|
+
imports_from_import_pkg = clean_imports(line)
|
|
71
|
+
import_pkg_js = add_imports_to_import_pkg_js(import_pkg,imports_from_import_pkg,import_pkg_js=import_pkg_js)
|
|
72
|
+
|
|
73
|
+
else:
|
|
74
|
+
import_pkg_js=update_import_pkg_js(line,import_pkg_js=import_pkg_js)
|
|
75
|
+
if is_from_line_group(line) and is_from_group == False:
|
|
76
|
+
is_from_group=get_import_pkg(line)
|
|
77
|
+
return import_pkg_js
|
|
78
|
+
def get_path_or_init(pkg_info):
|
|
79
|
+
root_dirname = pkg_info.get("root_dirname")
|
|
80
|
+
pkg = pkg_info.get("pkg")
|
|
81
|
+
rel_path = pkg.replace('.','/')
|
|
82
|
+
dirname = os.path.dirname(root_dirname)
|
|
83
|
+
pkg_path = os.path.join(dirname,rel_path)
|
|
84
|
+
pkg_py_path = f"{pkg_path}.py"
|
|
85
|
+
if os.path.isfile(pkg_py_path):
|
|
86
|
+
return pkg_py_path
|
|
87
|
+
pkg_init_path = os.path.join(pkg_path,'__init__.py')
|
|
88
|
+
if os.path.isdir(pkg_path):
|
|
89
|
+
if os.path.isfile(pkg_init_path):
|
|
90
|
+
return pkg_init_path
|
|
91
|
+
#input(f"nnot found == {pkg_info}")
|
|
92
|
+
def get_dot_fro_line(line,dirname=None,file_path=None,get_info=False):
|
|
93
|
+
info_js = {"nuline":line,"og_line":line,"pkg":line,"dirname":dirname,"file_path":file_path,"root_dirname":None,"local":False}
|
|
94
|
+
if dirname and is_file(dirname):
|
|
95
|
+
file_path=dirname
|
|
96
|
+
dirname = os.path.dirname(dirname)
|
|
97
|
+
info_js["file_path"]=file_path
|
|
98
|
+
info_js["dirname"]=dirname
|
|
99
|
+
from_line = line.split(FROM_TAG)[-1]
|
|
100
|
+
dot_fro = ""
|
|
101
|
+
for char in from_line:
|
|
102
|
+
if char != '.':
|
|
103
|
+
pkg = f"{dot_fro}{eatAll(from_line,'.')}"
|
|
104
|
+
nuline=f"from {pkg}"
|
|
105
|
+
info_js["nuline"]=nuline
|
|
106
|
+
info_js["pkg"]=pkg
|
|
107
|
+
break
|
|
108
|
+
if dirname:
|
|
109
|
+
info_js["root_dirname"]=dirname
|
|
110
|
+
dirbase = os.path.basename(dirname)
|
|
111
|
+
dirname = os.path.dirname(dirname)
|
|
112
|
+
|
|
113
|
+
dot_fro = f"{dirbase}.{dot_fro}"
|
|
114
|
+
if get_info:
|
|
115
|
+
if dot_fro and os.path.isdir(info_js["root_dirname"]):
|
|
116
|
+
info_js["local"]=True
|
|
117
|
+
info_js["pkg_path"]=get_path_or_init(info_js)
|
|
118
|
+
return info_js
|
|
119
|
+
return line
|
|
120
|
+
def get_top_level_imp(line,dirname=None):
|
|
121
|
+
imp = get_dot_fro_line(line,dirname)
|
|
122
|
+
return imp.split('.')[0]
|
|
123
|
+
def return_local_imps(file_path):
|
|
124
|
+
local_imps = []
|
|
125
|
+
dirname = os.path.dirname(file_path)
|
|
126
|
+
imports_js = get_all_imports(file_path)
|
|
127
|
+
for pkg,imps in imports_js.items():
|
|
128
|
+
if pkg not in ['context','nulines']:
|
|
129
|
+
full_imp_info = get_dot_fro_line(pkg,dirname,file_path=file_path,get_info=True)
|
|
130
|
+
if full_imp_info.get("local") == True:
|
|
131
|
+
local_imps.append(full_imp_info)
|
|
132
|
+
return local_imps
|
|
133
|
+
def get_all_pkg_paths(file_path):
|
|
134
|
+
pkg_paths = []
|
|
135
|
+
local_imps = return_local_imps(file_path)
|
|
136
|
+
for local_imp in local_imps:
|
|
137
|
+
curr_file_path = local_imp.get('file_path')
|
|
138
|
+
pkg_path = local_imp.get('pkg_path')
|
|
139
|
+
if pkg_path != None:
|
|
140
|
+
pkg_paths.append(pkg_path)
|
|
141
|
+
return pkg_paths
|
|
142
|
+
def get_cir_dir(pkg_path):
|
|
143
|
+
dirname = os.path.dirname(pkg_path)
|
|
144
|
+
dirbase = os.path.basename(dirname)
|
|
145
|
+
while True:
|
|
146
|
+
if dirname == "/home/flerb/Documents/pythonTools/modules/src/modules/abstract_utilities/src/abstract_utilities":
|
|
147
|
+
break
|
|
148
|
+
dirbase = os.path.basename(dirname)
|
|
149
|
+
dirname = os.path.dirname(dirname)
|
|
150
|
+
#input(f"{dirbase} is circular")
|
|
151
|
+
return dirbase
|
|
152
|
+
def is_circular(pkg_path):
|
|
153
|
+
pkg_paths = get_all_pkg_paths(pkg_path)
|
|
154
|
+
if pkg_path in pkg_paths:
|
|
155
|
+
return pkg_path
|
|
156
|
+
def are_circular(pkg_path,cir_dirs = None):
|
|
157
|
+
cir_dirs = cir_dirs or []
|
|
158
|
+
pkg_path = is_circular(pkg_path)
|
|
159
|
+
if pkg_path:
|
|
160
|
+
if pkg_path not in cir_dirs:
|
|
161
|
+
cir_dirs.append(pkg_path)
|
|
162
|
+
return cir_dirs
|
|
163
|
+
|
|
164
|
+
|
|
165
|
+
def build_dependency_graph(main_directory):
|
|
166
|
+
"""Map each file to all local imports (by resolved pkg_path)."""
|
|
167
|
+
graph = defaultdict(list)
|
|
168
|
+
dirs, all_local_scripts = get_files_and_dirs(
|
|
169
|
+
main_directory,
|
|
170
|
+
allowed_exts='.py',
|
|
171
|
+
exclude_dirs=['depriciate', 'junk'],
|
|
172
|
+
files_only=True
|
|
173
|
+
)
|
|
174
|
+
for file_path in all_local_scripts:
|
|
175
|
+
deps = get_all_pkg_paths(file_path)
|
|
176
|
+
for dep in deps:
|
|
177
|
+
if dep and os.path.isfile(dep):
|
|
178
|
+
graph[file_path].append(dep)
|
|
179
|
+
return graph
|
|
180
|
+
|
|
181
|
+
|
|
182
|
+
def find_circular_chains(graph):
|
|
183
|
+
"""Detect circular imports and return their full dependency paths."""
|
|
184
|
+
visited, cycles = set(), []
|
|
185
|
+
|
|
186
|
+
def dfs(node, path):
|
|
187
|
+
visited.add(node)
|
|
188
|
+
path.append(node)
|
|
189
|
+
for dep in graph.get(node, []):
|
|
190
|
+
if dep not in path:
|
|
191
|
+
dfs(dep, path.copy())
|
|
192
|
+
else:
|
|
193
|
+
# Found a circular import
|
|
194
|
+
cycle_start = path.index(dep)
|
|
195
|
+
cycle = path[cycle_start:] + [dep]
|
|
196
|
+
if cycle not in cycles:
|
|
197
|
+
cycles.append(cycle)
|
|
198
|
+
return
|
|
199
|
+
|
|
200
|
+
for start in graph:
|
|
201
|
+
dfs(start, [])
|
|
202
|
+
return cycles
|
|
203
|
+
|
|
204
|
+
|
|
205
|
+
def explain_circular_imports(cycles):
|
|
206
|
+
"""Pretty-print circular import chains with file names and import lines."""
|
|
207
|
+
for i, cycle in enumerate(cycles, 1):
|
|
208
|
+
print(f"\n🔁 Circular import {i}:")
|
|
209
|
+
for j in range(len(cycle) - 1):
|
|
210
|
+
src, dst = cycle[j], cycle[j + 1]
|
|
211
|
+
print(f" {os.path.basename(src)} → {os.path.basename(dst)}")
|
|
212
|
+
print(f" ^ back to {os.path.basename(cycle[0])}")
|
|
213
|
+
main_directory = "/home/flerb/Documents/pythonTools/modules/src/modules/abstract_utilities/src/abstract_utilities"
|
|
214
|
+
|
|
215
|
+
graph = build_dependency_graph(main_directory)
|
|
216
|
+
cycles = find_circular_chains(graph)
|
|
217
|
+
|
|
218
|
+
if not cycles:
|
|
219
|
+
print("✅ No circular imports found.")
|
|
220
|
+
else:
|
|
221
|
+
print(f"❌ Found {len(cycles)} circular import(s).")
|
|
222
|
+
explain_circular_imports(cycles)
|
|
@@ -0,0 +1,118 @@
|
|
|
1
|
+
from abstract_utilities import *
|
|
2
|
+
import os
|
|
3
|
+
from collections import defaultdict
|
|
4
|
+
|
|
5
|
+
def get_path_or_init(pkg_info):
|
|
6
|
+
root_dirname = pkg_info.get("root_dirname")
|
|
7
|
+
pkg = pkg_info.get("pkg")
|
|
8
|
+
rel_path = pkg.replace('.', '/')
|
|
9
|
+
dirname = os.path.dirname(root_dirname)
|
|
10
|
+
pkg_path = os.path.join(dirname, rel_path)
|
|
11
|
+
pkg_py_path = f"{pkg_path}.py"
|
|
12
|
+
if os.path.isfile(pkg_py_path):
|
|
13
|
+
return pkg_py_path
|
|
14
|
+
pkg_init_path = os.path.join(pkg_path, '__init__.py')
|
|
15
|
+
if os.path.isdir(pkg_path) and os.path.isfile(pkg_init_path):
|
|
16
|
+
return pkg_init_path
|
|
17
|
+
# optional: silence instead of blocking input()
|
|
18
|
+
print(f"⚠️ not found == {pkg_info}")
|
|
19
|
+
return None
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
def get_dot_fro_line(line, dirname=None, file_path=None, get_info=False):
|
|
23
|
+
info_js = {"nuline": line, "og_line": line, "pkg": line, "dirname": dirname,
|
|
24
|
+
"file_path": file_path, "root_dirname": None, "local": False}
|
|
25
|
+
if dirname and is_file(dirname):
|
|
26
|
+
file_path = dirname
|
|
27
|
+
dirname = os.path.dirname(dirname)
|
|
28
|
+
info_js["file_path"] = file_path
|
|
29
|
+
info_js["dirname"] = dirname
|
|
30
|
+
|
|
31
|
+
from_line = line.split(FROM_TAG)[-1]
|
|
32
|
+
dot_fro = ""
|
|
33
|
+
for char in from_line:
|
|
34
|
+
if char != '.':
|
|
35
|
+
pkg = f"{dot_fro}{eatAll(from_line, '.')}"
|
|
36
|
+
nuline = f"from {pkg}"
|
|
37
|
+
info_js["nuline"] = nuline
|
|
38
|
+
info_js["pkg"] = pkg
|
|
39
|
+
break
|
|
40
|
+
if dirname:
|
|
41
|
+
info_js["root_dirname"] = dirname
|
|
42
|
+
dirbase = os.path.basename(dirname)
|
|
43
|
+
dirname = os.path.dirname(dirname)
|
|
44
|
+
dot_fro = f"{dirbase}.{dot_fro}"
|
|
45
|
+
|
|
46
|
+
if get_info:
|
|
47
|
+
if dot_fro and os.path.isdir(info_js.get("root_dirname") or ""):
|
|
48
|
+
info_js["local"] = True
|
|
49
|
+
info_js["pkg_path"] = get_path_or_init(info_js)
|
|
50
|
+
return info_js
|
|
51
|
+
return line
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
def return_local_imps(file_path):
|
|
55
|
+
local_imps = []
|
|
56
|
+
dirname = os.path.dirname(file_path)
|
|
57
|
+
imports_js = get_all_imports(file_path)
|
|
58
|
+
for pkg, imps in imports_js.items():
|
|
59
|
+
if pkg not in ['context', 'nulines']:
|
|
60
|
+
full_imp_info = get_dot_fro_line(pkg, dirname, file_path=file_path, get_info=True)
|
|
61
|
+
if full_imp_info.get("local"):
|
|
62
|
+
local_imps.append(full_imp_info)
|
|
63
|
+
return local_imps
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
def get_all_pkg_paths(file_path):
|
|
67
|
+
pkg_paths = []
|
|
68
|
+
local_imps = return_local_imps(file_path)
|
|
69
|
+
for local_imp in local_imps:
|
|
70
|
+
pkg_path = local_imp.get('pkg_path')
|
|
71
|
+
if pkg_path:
|
|
72
|
+
pkg_paths.append(pkg_path)
|
|
73
|
+
return pkg_paths
|
|
74
|
+
|
|
75
|
+
|
|
76
|
+
# --- NEW: Build dependency graph and detect circular imports ---
|
|
77
|
+
|
|
78
|
+
def build_graph(main_directory):
|
|
79
|
+
dirs, all_local_scripts = get_files_and_dirs(main_directory, allowd_exts='.py', files_only=True)
|
|
80
|
+
graph = defaultdict(set)
|
|
81
|
+
for file_path in all_local_scripts:
|
|
82
|
+
deps = get_all_pkg_paths(file_path)
|
|
83
|
+
for dep in deps:
|
|
84
|
+
if dep: # only valid files
|
|
85
|
+
graph[file_path].add(dep)
|
|
86
|
+
return graph
|
|
87
|
+
|
|
88
|
+
|
|
89
|
+
def find_cycles(graph):
|
|
90
|
+
visited, stack, cycles = set(), [], []
|
|
91
|
+
|
|
92
|
+
def dfs(node, path):
|
|
93
|
+
visited.add(node)
|
|
94
|
+
path.append(node)
|
|
95
|
+
for dep in graph.get(node, []):
|
|
96
|
+
if dep not in visited:
|
|
97
|
+
dfs(dep, path.copy())
|
|
98
|
+
elif dep in path:
|
|
99
|
+
cycle_start = path.index(dep)
|
|
100
|
+
cycles.append(path[cycle_start:] + [dep])
|
|
101
|
+
|
|
102
|
+
for node in graph:
|
|
103
|
+
if node not in visited:
|
|
104
|
+
dfs(node, [])
|
|
105
|
+
return cycles
|
|
106
|
+
|
|
107
|
+
|
|
108
|
+
if __name__ == "__main__":
|
|
109
|
+
main_directory = "/home/flerb/Documents/pythonTools/modules/src/modules/abstract_utilities/src/abstract_utilities"
|
|
110
|
+
graph = build_graph(main_directory)
|
|
111
|
+
cycles = find_cycles(graph)
|
|
112
|
+
|
|
113
|
+
if not cycles:
|
|
114
|
+
print("✅ No circular imports found.")
|
|
115
|
+
else:
|
|
116
|
+
print("❌ Circular imports detected:")
|
|
117
|
+
for cycle in cycles:
|
|
118
|
+
print(" → ".join(cycle))
|
|
@@ -1,6 +1,8 @@
|
|
|
1
1
|
from ...read_write_utils import read_from_file,write_to_file,get_text_or_read
|
|
2
2
|
from ...string_utils import eatAll,eatInner,eatElse,clean_line
|
|
3
|
-
from ...class_utils import get_caller_path
|
|
3
|
+
from ...class_utils import get_caller_path,get_caller_dir,if_none_default,get_initial_caller_dir
|
|
4
4
|
from ...list_utils import make_list
|
|
5
5
|
from ...path_utils import get_file_parts
|
|
6
6
|
from ...type_utils import is_number,make_list
|
|
7
|
+
from ...file_utils import collect_filepaths,collect_globs
|
|
8
|
+
from ...directory_utils import get_shortest_path,get_common_root
|