megadetector 10.0.13__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of megadetector might be problematic. Click here for more details.

Files changed (147) hide show
  1. megadetector/__init__.py +0 -0
  2. megadetector/api/__init__.py +0 -0
  3. megadetector/api/batch_processing/integration/digiKam/setup.py +6 -0
  4. megadetector/api/batch_processing/integration/digiKam/xmp_integration.py +465 -0
  5. megadetector/api/batch_processing/integration/eMammal/test_scripts/config_template.py +5 -0
  6. megadetector/api/batch_processing/integration/eMammal/test_scripts/push_annotations_to_emammal.py +125 -0
  7. megadetector/api/batch_processing/integration/eMammal/test_scripts/select_images_for_testing.py +55 -0
  8. megadetector/classification/__init__.py +0 -0
  9. megadetector/classification/aggregate_classifier_probs.py +108 -0
  10. megadetector/classification/analyze_failed_images.py +227 -0
  11. megadetector/classification/cache_batchapi_outputs.py +198 -0
  12. megadetector/classification/create_classification_dataset.py +626 -0
  13. megadetector/classification/crop_detections.py +516 -0
  14. megadetector/classification/csv_to_json.py +226 -0
  15. megadetector/classification/detect_and_crop.py +853 -0
  16. megadetector/classification/efficientnet/__init__.py +9 -0
  17. megadetector/classification/efficientnet/model.py +415 -0
  18. megadetector/classification/efficientnet/utils.py +608 -0
  19. megadetector/classification/evaluate_model.py +520 -0
  20. megadetector/classification/identify_mislabeled_candidates.py +152 -0
  21. megadetector/classification/json_to_azcopy_list.py +63 -0
  22. megadetector/classification/json_validator.py +696 -0
  23. megadetector/classification/map_classification_categories.py +276 -0
  24. megadetector/classification/merge_classification_detection_output.py +509 -0
  25. megadetector/classification/prepare_classification_script.py +194 -0
  26. megadetector/classification/prepare_classification_script_mc.py +228 -0
  27. megadetector/classification/run_classifier.py +287 -0
  28. megadetector/classification/save_mislabeled.py +110 -0
  29. megadetector/classification/train_classifier.py +827 -0
  30. megadetector/classification/train_classifier_tf.py +725 -0
  31. megadetector/classification/train_utils.py +323 -0
  32. megadetector/data_management/__init__.py +0 -0
  33. megadetector/data_management/animl_to_md.py +161 -0
  34. megadetector/data_management/annotations/__init__.py +0 -0
  35. megadetector/data_management/annotations/annotation_constants.py +33 -0
  36. megadetector/data_management/camtrap_dp_to_coco.py +270 -0
  37. megadetector/data_management/cct_json_utils.py +566 -0
  38. megadetector/data_management/cct_to_md.py +184 -0
  39. megadetector/data_management/cct_to_wi.py +293 -0
  40. megadetector/data_management/coco_to_labelme.py +284 -0
  41. megadetector/data_management/coco_to_yolo.py +702 -0
  42. megadetector/data_management/databases/__init__.py +0 -0
  43. megadetector/data_management/databases/add_width_and_height_to_db.py +107 -0
  44. megadetector/data_management/databases/combine_coco_camera_traps_files.py +210 -0
  45. megadetector/data_management/databases/integrity_check_json_db.py +528 -0
  46. megadetector/data_management/databases/subset_json_db.py +195 -0
  47. megadetector/data_management/generate_crops_from_cct.py +200 -0
  48. megadetector/data_management/get_image_sizes.py +164 -0
  49. megadetector/data_management/labelme_to_coco.py +559 -0
  50. megadetector/data_management/labelme_to_yolo.py +349 -0
  51. megadetector/data_management/lila/__init__.py +0 -0
  52. megadetector/data_management/lila/create_lila_blank_set.py +556 -0
  53. megadetector/data_management/lila/create_lila_test_set.py +187 -0
  54. megadetector/data_management/lila/create_links_to_md_results_files.py +106 -0
  55. megadetector/data_management/lila/download_lila_subset.py +182 -0
  56. megadetector/data_management/lila/generate_lila_per_image_labels.py +777 -0
  57. megadetector/data_management/lila/get_lila_annotation_counts.py +174 -0
  58. megadetector/data_management/lila/get_lila_image_counts.py +112 -0
  59. megadetector/data_management/lila/lila_common.py +319 -0
  60. megadetector/data_management/lila/test_lila_metadata_urls.py +164 -0
  61. megadetector/data_management/mewc_to_md.py +344 -0
  62. megadetector/data_management/ocr_tools.py +873 -0
  63. megadetector/data_management/read_exif.py +964 -0
  64. megadetector/data_management/remap_coco_categories.py +195 -0
  65. megadetector/data_management/remove_exif.py +156 -0
  66. megadetector/data_management/rename_images.py +194 -0
  67. megadetector/data_management/resize_coco_dataset.py +663 -0
  68. megadetector/data_management/speciesnet_to_md.py +41 -0
  69. megadetector/data_management/wi_download_csv_to_coco.py +247 -0
  70. megadetector/data_management/yolo_output_to_md_output.py +594 -0
  71. megadetector/data_management/yolo_to_coco.py +876 -0
  72. megadetector/data_management/zamba_to_md.py +188 -0
  73. megadetector/detection/__init__.py +0 -0
  74. megadetector/detection/change_detection.py +840 -0
  75. megadetector/detection/process_video.py +479 -0
  76. megadetector/detection/pytorch_detector.py +1451 -0
  77. megadetector/detection/run_detector.py +1267 -0
  78. megadetector/detection/run_detector_batch.py +2159 -0
  79. megadetector/detection/run_inference_with_yolov5_val.py +1314 -0
  80. megadetector/detection/run_md_and_speciesnet.py +1494 -0
  81. megadetector/detection/run_tiled_inference.py +1038 -0
  82. megadetector/detection/tf_detector.py +209 -0
  83. megadetector/detection/video_utils.py +1379 -0
  84. megadetector/postprocessing/__init__.py +0 -0
  85. megadetector/postprocessing/add_max_conf.py +72 -0
  86. megadetector/postprocessing/categorize_detections_by_size.py +166 -0
  87. megadetector/postprocessing/classification_postprocessing.py +1752 -0
  88. megadetector/postprocessing/combine_batch_outputs.py +249 -0
  89. megadetector/postprocessing/compare_batch_results.py +2110 -0
  90. megadetector/postprocessing/convert_output_format.py +403 -0
  91. megadetector/postprocessing/create_crop_folder.py +629 -0
  92. megadetector/postprocessing/detector_calibration.py +570 -0
  93. megadetector/postprocessing/generate_csv_report.py +522 -0
  94. megadetector/postprocessing/load_api_results.py +223 -0
  95. megadetector/postprocessing/md_to_coco.py +428 -0
  96. megadetector/postprocessing/md_to_labelme.py +351 -0
  97. megadetector/postprocessing/md_to_wi.py +41 -0
  98. megadetector/postprocessing/merge_detections.py +392 -0
  99. megadetector/postprocessing/postprocess_batch_results.py +2077 -0
  100. megadetector/postprocessing/remap_detection_categories.py +226 -0
  101. megadetector/postprocessing/render_detection_confusion_matrix.py +677 -0
  102. megadetector/postprocessing/repeat_detection_elimination/find_repeat_detections.py +206 -0
  103. megadetector/postprocessing/repeat_detection_elimination/remove_repeat_detections.py +82 -0
  104. megadetector/postprocessing/repeat_detection_elimination/repeat_detections_core.py +1665 -0
  105. megadetector/postprocessing/separate_detections_into_folders.py +795 -0
  106. megadetector/postprocessing/subset_json_detector_output.py +964 -0
  107. megadetector/postprocessing/top_folders_to_bottom.py +238 -0
  108. megadetector/postprocessing/validate_batch_results.py +332 -0
  109. megadetector/taxonomy_mapping/__init__.py +0 -0
  110. megadetector/taxonomy_mapping/map_lila_taxonomy_to_wi_taxonomy.py +491 -0
  111. megadetector/taxonomy_mapping/map_new_lila_datasets.py +213 -0
  112. megadetector/taxonomy_mapping/prepare_lila_taxonomy_release.py +165 -0
  113. megadetector/taxonomy_mapping/preview_lila_taxonomy.py +543 -0
  114. megadetector/taxonomy_mapping/retrieve_sample_image.py +71 -0
  115. megadetector/taxonomy_mapping/simple_image_download.py +224 -0
  116. megadetector/taxonomy_mapping/species_lookup.py +1008 -0
  117. megadetector/taxonomy_mapping/taxonomy_csv_checker.py +159 -0
  118. megadetector/taxonomy_mapping/taxonomy_graph.py +346 -0
  119. megadetector/taxonomy_mapping/validate_lila_category_mappings.py +83 -0
  120. megadetector/tests/__init__.py +0 -0
  121. megadetector/tests/test_nms_synthetic.py +335 -0
  122. megadetector/utils/__init__.py +0 -0
  123. megadetector/utils/ct_utils.py +1857 -0
  124. megadetector/utils/directory_listing.py +199 -0
  125. megadetector/utils/extract_frames_from_video.py +307 -0
  126. megadetector/utils/gpu_test.py +125 -0
  127. megadetector/utils/md_tests.py +2072 -0
  128. megadetector/utils/path_utils.py +2832 -0
  129. megadetector/utils/process_utils.py +172 -0
  130. megadetector/utils/split_locations_into_train_val.py +237 -0
  131. megadetector/utils/string_utils.py +234 -0
  132. megadetector/utils/url_utils.py +825 -0
  133. megadetector/utils/wi_platform_utils.py +968 -0
  134. megadetector/utils/wi_taxonomy_utils.py +1759 -0
  135. megadetector/utils/write_html_image_list.py +239 -0
  136. megadetector/visualization/__init__.py +0 -0
  137. megadetector/visualization/plot_utils.py +309 -0
  138. megadetector/visualization/render_images_with_thumbnails.py +243 -0
  139. megadetector/visualization/visualization_utils.py +1940 -0
  140. megadetector/visualization/visualize_db.py +630 -0
  141. megadetector/visualization/visualize_detector_output.py +479 -0
  142. megadetector/visualization/visualize_video_output.py +705 -0
  143. megadetector-10.0.13.dist-info/METADATA +134 -0
  144. megadetector-10.0.13.dist-info/RECORD +147 -0
  145. megadetector-10.0.13.dist-info/WHEEL +5 -0
  146. megadetector-10.0.13.dist-info/licenses/LICENSE +19 -0
  147. megadetector-10.0.13.dist-info/top_level.txt +1 -0
@@ -0,0 +1,2832 @@
1
+ """
2
+
3
+ path_utils.py
4
+
5
+ Miscellaneous useful utils for path manipulation, i.e. things that could *almost*
6
+ be in os.path, but aren't.
7
+
8
+ """
9
+
10
+ #%% Imports and constants
11
+
12
+ import glob
13
+ import ntpath
14
+ import os
15
+ import sys
16
+ import platform
17
+ import string
18
+ import json
19
+ import shutil
20
+ import hashlib
21
+ import unicodedata
22
+ import zipfile
23
+ import tarfile
24
+ import webbrowser
25
+ import subprocess
26
+ import re
27
+
28
+ from zipfile import ZipFile
29
+ from datetime import datetime
30
+ from collections import defaultdict
31
+ from multiprocessing.pool import Pool, ThreadPool
32
+ from functools import partial
33
+ from shutil import which
34
+ from tqdm import tqdm
35
+
36
+ from megadetector.utils.ct_utils import is_iterable
37
+ from megadetector.utils.ct_utils import make_test_folder
38
+ from megadetector.utils.ct_utils import sort_dictionary_by_value
39
+ from megadetector.utils.ct_utils import environment_is_wsl
40
+
41
+ # Should all be lower-case
42
+ IMG_EXTENSIONS = ('.jpg', '.jpeg', '.gif', '.png', '.tif', '.tiff', '.bmp')
43
+
44
+ VALID_FILENAME_CHARS = f"~-_.() {string.ascii_letters}{string.digits}"
45
+ SEPARATOR_CHARS = r":\/"
46
+ VALID_PATH_CHARS = VALID_FILENAME_CHARS + SEPARATOR_CHARS
47
+ CHAR_LIMIT = 255
48
+
49
+
50
+ #%% General path functions
51
+
52
+ def recursive_file_list(base_dir,
53
+ convert_slashes=True,
54
+ return_relative_paths=False,
55
+ sort_files=True,
56
+ recursive=True):
57
+ r"""
58
+ Enumerates files (not directories) in [base_dir].
59
+
60
+ Args:
61
+ base_dir (str): folder to enumerate
62
+ convert_slashes (bool, optional): force forward slashes; if this is False, will use
63
+ the native path separator
64
+ return_relative_paths (bool, optional): return paths that are relative to [base_dir],
65
+ rather than absolute paths
66
+ sort_files (bool, optional): force files to be sorted, otherwise uses the sorting
67
+ provided by os.walk()
68
+ recursive (bool, optional): enumerate recursively
69
+
70
+ Returns:
71
+ list: list of filenames
72
+ """
73
+
74
+ assert os.path.isdir(base_dir), '{} is not a folder'.format(base_dir)
75
+
76
+ all_files = []
77
+
78
+ if recursive:
79
+ for root, _, filenames in os.walk(base_dir):
80
+ for filename in filenames:
81
+ full_path = os.path.join(root, filename)
82
+ all_files.append(full_path)
83
+ else:
84
+ all_files_relative = os.listdir(base_dir)
85
+ all_files = [os.path.join(base_dir,fn) for fn in all_files_relative]
86
+ all_files = [fn for fn in all_files if os.path.isfile(fn)]
87
+
88
+ if return_relative_paths:
89
+ all_files = [os.path.relpath(fn,base_dir) for fn in all_files]
90
+
91
+ if convert_slashes:
92
+ all_files = [fn.replace('\\', '/') for fn in all_files]
93
+
94
+ if sort_files:
95
+ all_files = sorted(all_files)
96
+
97
+ return all_files
98
+
99
+
100
+ def file_list(base_dir,
101
+ convert_slashes=True,
102
+ return_relative_paths=False,
103
+ sort_files=True,
104
+ recursive=False):
105
+ """
106
+ Trivial wrapper for recursive_file_list, which was a poor function name choice
107
+ at the time, since I later wanted to add non-recursive lists, but it doesn't
108
+ make sense to have a "recursive" option in a function called "recursive_file_list".
109
+
110
+ Args:
111
+ base_dir (str): folder to enumerate
112
+ convert_slashes (bool, optional): force forward slashes; if this is False, will use
113
+ the native path separator
114
+ return_relative_paths (bool, optional): return paths that are relative to [base_dir],
115
+ rather than absolute paths
116
+ sort_files (bool, optional): force files to be sorted, otherwise uses the sorting
117
+ provided by os.walk()
118
+ recursive (bool, optional): enumerate recursively
119
+
120
+ Returns:
121
+ list: list of filenames
122
+ """
123
+
124
+ return recursive_file_list(base_dir,convert_slashes,return_relative_paths,sort_files,
125
+ recursive=recursive)
126
+
127
+
128
+ def folder_list(base_dir,
129
+ convert_slashes=True,
130
+ return_relative_paths=False,
131
+ sort_folders=True,
132
+ recursive=False):
133
+ """
134
+ Enumerates folders (not files) in [base_dir].
135
+
136
+ Args:
137
+ base_dir (str): folder to enumerate
138
+ convert_slashes (bool, optional): force forward slashes; if this is False, will use
139
+ the native path separator
140
+ return_relative_paths (bool, optional): return paths that are relative to [base_dir],
141
+ rather than absolute paths
142
+ sort_folders (bool, optional): force folders to be sorted, otherwise uses the sorting
143
+ provided by os.walk()
144
+ recursive (bool, optional): enumerate recursively
145
+
146
+ Returns:
147
+ list: list of folder names
148
+ """
149
+
150
+ assert os.path.isdir(base_dir), '{} is not a folder'.format(base_dir)
151
+
152
+ folders = []
153
+
154
+ if recursive:
155
+ for root, dirs, _ in os.walk(base_dir):
156
+ for d in dirs:
157
+ folders.append(os.path.join(root, d))
158
+ else:
159
+ folders = os.listdir(base_dir)
160
+ folders = [os.path.join(base_dir,fn) for fn in folders]
161
+ folders = [fn for fn in folders if os.path.isdir(fn)]
162
+
163
+ if return_relative_paths:
164
+ folders = [os.path.relpath(fn,base_dir) for fn in folders]
165
+
166
+ if convert_slashes:
167
+ folders = [fn.replace('\\', '/') for fn in folders]
168
+
169
+ if sort_folders:
170
+ folders = sorted(folders)
171
+
172
+ return folders
173
+
174
+
175
+ def folder_summary(folder,print_summary=True):
176
+ """
177
+ Returns (and optionally prints) a summary of [folder], including:
178
+
179
+ * The total number of files
180
+ * The total number of folders
181
+ * The number of files for each extension
182
+
183
+ Args:
184
+ folder (str): folder to summarize
185
+ print_summary (bool, optional): whether to print the summary
186
+
187
+ Returns:
188
+ dict: with fields "n_files", "n_folders", and "extension_to_count"
189
+ """
190
+
191
+ assert os.path.isdir(folder), '{} is not a folder'.format(folder)
192
+
193
+ folders_relative = folder_list(folder,return_relative_paths=True,recursive=True)
194
+ files_relative = file_list(folder,return_relative_paths=True,recursive=True)
195
+
196
+ extension_to_count = defaultdict(int)
197
+
198
+ for fn in files_relative:
199
+ ext = os.path.splitext(fn)[1]
200
+ extension_to_count[ext] += 1
201
+
202
+ extension_to_count = sort_dictionary_by_value(extension_to_count,reverse=True)
203
+
204
+ if print_summary:
205
+ for extension in extension_to_count.keys():
206
+ print('{}: {}'.format(extension,extension_to_count[extension]))
207
+ print('')
208
+ print('Total files: {}'.format(len(files_relative)))
209
+ print('Total folders: {}'.format(len(folders_relative)))
210
+
211
+ to_return = {}
212
+ to_return['n_files'] = len(files_relative)
213
+ to_return['n_folders'] = len(folders_relative)
214
+ to_return['extension_to_count'] = extension_to_count
215
+
216
+ return to_return
217
+
218
+
219
+ def fileparts(path):
220
+ r"""
221
+ Breaks down a path into the directory path, filename, and extension.
222
+
223
+ Note that the '.' lives with the extension, and separators are removed.
224
+
225
+ Examples:
226
+
227
+ .. code-block:: none
228
+
229
+ >>> fileparts('file')
230
+ ('', 'file', '')
231
+ >>> fileparts(r'c:/dir/file.jpg')
232
+ ('c:/dir', 'file', '.jpg')
233
+ >>> fileparts('/dir/subdir/file.jpg')
234
+ ('/dir/subdir', 'file', '.jpg')
235
+
236
+ Args:
237
+ path (str): path name to separate into parts
238
+ Returns:
239
+ tuple: tuple containing (p,n,e):
240
+ - p: str, directory path
241
+ - n: str, filename without extension
242
+ - e: str, extension including the '.'
243
+ """
244
+
245
+ # ntpath seems to do the right thing for both Windows and Unix paths
246
+ p = ntpath.dirname(path)
247
+ basename = ntpath.basename(path)
248
+ n, e = ntpath.splitext(basename)
249
+ return p, n, e
250
+
251
+
252
+ def insert_before_extension(filename, s=None, separator='.'):
253
+ """
254
+ Insert string [s] before the extension in [filename], separated with [separator].
255
+
256
+ If [s] is empty, generates a date/timestamp. If [filename] has no extension,
257
+ appends [s].
258
+
259
+ Examples:
260
+
261
+ .. code-block:: none
262
+
263
+ >>> insert_before_extension('/dir/subdir/file.ext', 'insert')
264
+ '/dir/subdir/file.insert.ext'
265
+ >>> insert_before_extension('/dir/subdir/file', 'insert')
266
+ '/dir/subdir/file.insert'
267
+ >>> insert_before_extension('/dir/subdir/file')
268
+ '/dir/subdir/file.2020.07.20.10.54.38'
269
+
270
+ Args:
271
+ filename (str): filename to manipulate
272
+ s (str, optional): string to insert before the extension in [filename], or
273
+ None to insert a datestamp
274
+ separator (str, optional): separator to place between the filename base
275
+ and the inserted string
276
+
277
+ Returns:
278
+ str: modified string
279
+ """
280
+
281
+ assert len(filename) > 0
282
+ if s is None or len(s) == 0:
283
+ s = datetime.now().strftime('%Y.%m.%d.%H.%M.%S')
284
+ name, ext = os.path.splitext(filename)
285
+ return f'{name}{separator}{s}{ext}'
286
+
287
+
288
+ def split_path(path):
289
+ r"""
290
+ Splits [path] into all its constituent file/folder tokens.
291
+
292
+ Examples:
293
+
294
+ .. code-block:: none
295
+
296
+ >>> split_path(r'c:\dir\subdir\file.txt')
297
+ ['c:\\', 'dir', 'subdir', 'file.txt']
298
+ >>> split_path('/dir/subdir/file.jpg')
299
+ ['/', 'dir', 'subdir', 'file.jpg']
300
+ >>> split_path('c:\\')
301
+ ['c:\\']
302
+ >>> split_path('/')
303
+ ['/']
304
+
305
+ Args:
306
+ path (str): path to split into tokens
307
+
308
+ Returns:
309
+ list: list of path tokens
310
+ """
311
+
312
+ # Edge cases
313
+ if path == '':
314
+ return ''
315
+ if path is None:
316
+ return None
317
+
318
+ parts = []
319
+ while True:
320
+ # ntpath seems to do the right thing for both Windows and Unix paths
321
+ head, tail = ntpath.split(path)
322
+ if head == '' or head == path:
323
+ break
324
+ parts.append(tail)
325
+ path = head
326
+ parts.append(head or tail)
327
+ return parts[::-1] # reverse
328
+
329
+
330
+ def path_is_abs(p):
331
+ """
332
+ Determines whether [p] is an absolute path. An absolute path is defined as
333
+ one that starts with slash, backslash, or a letter followed by a colon.
334
+
335
+ Args:
336
+ p (str): path to evaluate
337
+
338
+ Returns:
339
+ bool: True if [p] is an absolute path, else False
340
+ """
341
+
342
+ return (len(p) > 1) and (p[0] == '/' or p[1] == ':' or p[0] == '\\')
343
+
344
+
345
+ def safe_create_link(link_exists,link_new):
346
+ """
347
+ Creates a symlink at [link_new] pointing to [link_exists].
348
+
349
+ If [link_new] already exists, make sure it's a link (not a file),
350
+ and if it has a different target than [link_exists], removes and re-creates
351
+ it.
352
+
353
+ Creates a *real* directory if necessary.
354
+
355
+ Errors if [link_new] already exists but it's not a link.
356
+
357
+ Args:
358
+ link_exists (str): the source of the (possibly-new) symlink
359
+ link_new (str): the target of the (possibly-new) symlink
360
+ """
361
+
362
+ # If the new file already exists...
363
+ if os.path.exists(link_new) or os.path.islink(link_new):
364
+ # Error if it's not already a link
365
+ assert os.path.islink(link_new)
366
+ # If it's already a link, and it points to the "exists" file,
367
+ # leave it alone, otherwise redirect it.
368
+ if not os.readlink(link_new) == link_exists:
369
+ os.remove(link_new)
370
+ os.symlink(link_exists,link_new)
371
+ else:
372
+ link_new_dir = os.path.dirname(link_new)
373
+ if len(link_new_dir) > 0:
374
+ os.makedirs(link_new_dir,exist_ok=True)
375
+ os.symlink(link_exists,link_new)
376
+
377
+ # ...def safe_create_link(...)
378
+
379
+
380
+ def remove_empty_folders(path, remove_root=False):
381
+ """
382
+ Recursively removes empty folders within the specified path.
383
+
384
+ Args:
385
+ path (str): the folder from which we should recursively remove
386
+ empty folders.
387
+ remove_root (bool, optional): whether to remove the root directory if
388
+ it's empty after removing all empty subdirectories. This will always
389
+ be True during recursive calls.
390
+
391
+ Returns:
392
+ bool: True if the directory is empty after processing, False otherwise
393
+ """
394
+
395
+ # Verify that [path] is a directory
396
+ if not os.path.isdir(path):
397
+ return False
398
+
399
+ # Track whether the current directory is empty
400
+ is_empty = True
401
+
402
+ # Iterate through all items in the directory
403
+ for item in os.listdir(path):
404
+
405
+ item_path = os.path.join(path, item)
406
+
407
+ # If it's a directory, process it recursively
408
+ if os.path.isdir(item_path):
409
+ # If the subdirectory is empty after processing, it will be removed
410
+ if not remove_empty_folders(item_path, True):
411
+ # If the subdirectory is not empty, the current directory isn't empty either
412
+ is_empty = False
413
+ else:
414
+ # If there's a file, the directory is not empty
415
+ is_empty = False
416
+
417
+ # If the directory is empty and we're supposed to remove it
418
+ if is_empty and remove_root:
419
+ try:
420
+ os.rmdir(path)
421
+ except Exception as e:
422
+ print('Error removing directory {}: {}'.format(path,str(e)))
423
+ is_empty = False
424
+
425
+ return is_empty
426
+
427
+ # ...def remove_empty_folders(...)
428
+
429
+
430
+ def path_join(*paths, convert_slashes=True):
431
+ r"""
432
+ Wrapper for os.path.join that optionally converts backslashes to forward slashes.
433
+
434
+ Args:
435
+ *paths (variable-length set of strings): Path components to be joined.
436
+ convert_slashes (bool, optional): whether to convert \\ to /
437
+
438
+ Returns:
439
+ A string with the joined path components.
440
+ """
441
+
442
+ joined_path = os.path.join(*paths)
443
+ if convert_slashes:
444
+ return joined_path.replace('\\', '/')
445
+ else:
446
+ return joined_path
447
+
448
+
449
+ #%% Image-related path functions
450
+
451
+ def is_image_file(s, img_extensions=IMG_EXTENSIONS):
452
+ """
453
+ Checks a file's extension against a hard-coded set of image file
454
+ extensions. Uses case-insensitive comparison.
455
+
456
+ Does not check whether the file exists, only determines whether the filename
457
+ implies it's an image file.
458
+
459
+ Args:
460
+ s (str): filename to evaluate for image-ness
461
+ img_extensions (list, optional): list of known image file extensions
462
+
463
+ Returns:
464
+ bool: True if [s] appears to be an image file, else False
465
+ """
466
+
467
+ ext = os.path.splitext(s)[1]
468
+ return ext.lower() in img_extensions
469
+
470
+
471
+ def find_image_strings(strings):
472
+ """
473
+ Given a list of strings that are potentially image file names, looks for
474
+ strings that actually look like image file names (based on extension).
475
+
476
+ Args:
477
+ strings (list): list of filenames to check for image-ness
478
+
479
+ Returns:
480
+ list: the subset of [strings] that appear to be image filenames
481
+ """
482
+
483
+ return [s for s in strings if is_image_file(s)]
484
+
485
+
486
+ def find_images(dirname,
487
+ recursive=False,
488
+ return_relative_paths=False,
489
+ convert_slashes=True):
490
+ """
491
+ Finds all files in a directory that look like image file names. Returns
492
+ absolute paths unless return_relative_paths is set. Uses the OS-native
493
+ path separator unless convert_slashes is set, in which case will always
494
+ use '/'.
495
+
496
+ Args:
497
+ dirname (str): the folder to search for images
498
+ recursive (bool, optional): whether to search recursively
499
+ return_relative_paths (str, optional): return paths that are relative
500
+ to [dirname], rather than absolute paths
501
+ convert_slashes (bool, optional): force forward slashes in return values
502
+
503
+ Returns:
504
+ list: list of image filenames found in [dirname]
505
+ """
506
+
507
+ assert os.path.isdir(dirname), '{} is not a folder'.format(dirname)
508
+
509
+ if recursive:
510
+ strings = glob.glob(os.path.join(dirname, '**', '*.*'), recursive=True)
511
+ else:
512
+ strings = glob.glob(os.path.join(dirname, '*.*'))
513
+
514
+ image_files = find_image_strings(strings)
515
+
516
+ if return_relative_paths:
517
+ image_files = [os.path.relpath(fn,dirname) for fn in image_files]
518
+
519
+ image_files = sorted(image_files)
520
+
521
+ if convert_slashes:
522
+ image_files = [fn.replace('\\', '/') for fn in image_files]
523
+
524
+ return image_files
525
+
526
+
527
+ #%% Filename cleaning functions
528
+
529
+ def clean_filename(filename,
530
+ allow_list=VALID_FILENAME_CHARS,
531
+ char_limit=CHAR_LIMIT,
532
+ force_lower=False,
533
+ remove_trailing_leading_whitespace=True):
534
+ r"""
535
+ Removes non-ASCII and other invalid filename characters (on any
536
+ reasonable OS) from a filename, then optionally trims to a maximum length.
537
+
538
+ Does not allow :\/ by default, use clean_path if you want to preserve those.
539
+
540
+ Adapted from
541
+ https://gist.github.com/wassname/1393c4a57cfcbf03641dbc31886123b8
542
+
543
+ Args:
544
+ filename (str): filename to clean
545
+ allow_list (str, optional): string containing all allowable filename characters
546
+ char_limit (int, optional): maximum allowable filename length, if None will skip this
547
+ step
548
+ force_lower (bool, optional): convert the resulting filename to lowercase
549
+ remove_trailing_leading_whitespace (bool, optional): remove trailing and
550
+ leading whitespace from each component of a path, e.g. does not allow
551
+ a/b/c /d.jpg
552
+ Returns:
553
+ str: cleaned version of [filename]
554
+ """
555
+
556
+ if remove_trailing_leading_whitespace:
557
+
558
+ # Best effort to preserve the original separator
559
+ separator = '/'
560
+ if '\\' in filename:
561
+ separator = '\\'
562
+
563
+ filename = filename.replace('\\','/')
564
+ components = filename.split('/')
565
+ clean_components = [c.strip() for c in components]
566
+ filename = separator.join(clean_components)
567
+ if separator == '\\':
568
+ filename = filename.replace('/','\\')
569
+
570
+ # keep only valid ascii chars
571
+ cleaned_filename = (unicodedata.normalize('NFKD', filename)
572
+ .encode('ASCII', 'ignore').decode())
573
+
574
+ # keep only allow-listed chars
575
+ cleaned_filename = ''.join([c for c in cleaned_filename if c in allow_list])
576
+ if char_limit is not None:
577
+ cleaned_filename = cleaned_filename[:char_limit]
578
+ if force_lower:
579
+ cleaned_filename = cleaned_filename.lower()
580
+ return cleaned_filename
581
+
582
+
583
+ def clean_path(pathname,
584
+ allow_list=VALID_PATH_CHARS,
585
+ char_limit=CHAR_LIMIT,
586
+ force_lower=False,
587
+ remove_trailing_leading_whitespace=True):
588
+ """
589
+ Removes non-ASCII and other invalid path characters (on any reasonable
590
+ OS) from a path, then optionally trims to a maximum length.
591
+
592
+ Args:
593
+ pathname (str): path name to clean
594
+ allow_list (str, optional): string containing all allowable filename characters
595
+ char_limit (int, optional): maximum allowable filename length, if None will skip this
596
+ step
597
+ force_lower (bool, optional): convert the resulting filename to lowercase
598
+ remove_trailing_leading_whitespace (bool, optional): remove trailing and
599
+ leading whitespace from each component of a path, e.g. does not allow
600
+ a/b/c /d.jpg
601
+
602
+ Returns:
603
+ str: cleaned version of [filename]
604
+ """
605
+
606
+ return clean_filename(pathname,
607
+ allow_list=allow_list,
608
+ char_limit=char_limit,
609
+ force_lower=force_lower,
610
+ remove_trailing_leading_whitespace=\
611
+ remove_trailing_leading_whitespace)
612
+
613
+
614
+ def flatten_path(pathname,separator_chars=SEPARATOR_CHARS,separator_char_replacement='~'):
615
+ r"""
616
+ Removes non-ASCII and other invalid path characters (on any reasonable
617
+ OS) from a path, then trims to a maximum length. Replaces all valid
618
+ separators with [separator_char_replacement.]
619
+
620
+ Args:
621
+ pathname (str): path name to flatten
622
+ separator_chars (str, optional): string containing all known path separators
623
+ separator_char_replacement (str, optional): string to insert in place of
624
+ path separators.
625
+
626
+ Returns:
627
+ str: flattened version of [pathname]
628
+ """
629
+
630
+ s = clean_path(pathname)
631
+ for c in separator_chars:
632
+ s = s.replace(c, separator_char_replacement)
633
+ return s
634
+
635
+
636
+ def is_executable(filename):
637
+ """
638
+ Checks whether [filename] is on the system path and marked as executable.
639
+
640
+ Args:
641
+ filename (str): filename to check for executable status
642
+
643
+ Returns:
644
+ bool: True if [filename] is on the system path and marked as executable, otherwise False
645
+ """
646
+
647
+ # https://stackoverflow.com/questions/11210104/check-if-a-program-exists-from-a-python-script
648
+
649
+ return which(filename) is not None
650
+
651
+
652
+ #%% WSL utilities
653
+
654
+ def wsl_path_to_windows_path(filename, failure_behavior='none'):
655
+ r"""
656
+ Converts a WSL path to a Windows path. For example, converts:
657
+
658
+ /mnt/e/a/b/c
659
+
660
+ ...to:
661
+
662
+ e:\a\b\c
663
+
664
+ Args:
665
+ filename (str): filename to convert
666
+ failure_behavior (str, optional): what to do if the path can't be processed as a
667
+ WSL path. 'none' to return None in this case, 'original' to return the original path.
668
+
669
+ Returns:
670
+ str: Windows equivalent to the WSL path [filename]
671
+ """
672
+
673
+ assert failure_behavior in ('none','original'), \
674
+ 'Unrecognized failure_behavior value {}'.format(failure_behavior)
675
+
676
+ # Check whether the path follows the standard WSL mount pattern
677
+ wsl_path_pattern = r'^/mnt/([a-zA-Z])(/.*)?$'
678
+ match = re.match(wsl_path_pattern, filename)
679
+
680
+ if match:
681
+
682
+ # Extract the drive letter and the rest of the path
683
+ drive_letter = match.group(1)
684
+ path_remainder = match.group(2) if match.group(2) else ''
685
+
686
+ # Convert forward slashes to backslashes for Windows
687
+ path_remainder = path_remainder.replace('/', '\\')
688
+
689
+ # Format the Windows path
690
+ windows_path = f"{drive_letter}:{path_remainder}"
691
+ return windows_path
692
+
693
+ if failure_behavior == 'none':
694
+ return None
695
+ else:
696
+ return filename
697
+
698
+ # ...def wsl_path_to_windows_path(...)
699
+
700
+
701
+ def windows_path_to_wsl_path(filename, failure_behavior='none'):
702
+ r"""
703
+ Converts a Windows path to a WSL path, or returns None if that's not possible. E.g.
704
+ converts:
705
+
706
+ e:\a\b\c
707
+
708
+ ...to:
709
+
710
+ /mnt/e/a/b/c
711
+
712
+ Args:
713
+ filename (str): filename to convert
714
+ failure_behavior (str, optional): what to do if the path can't be processed as a Windows path.
715
+ 'none' to return None in this case, 'original' to return the original path.
716
+
717
+ Returns:
718
+ str: WSL equivalent to the Windows path [filename]
719
+ """
720
+
721
+ assert failure_behavior in ('none','original'), \
722
+ 'Unrecognized failure_behavior value {}'.format(failure_behavior)
723
+
724
+ filename = filename.replace('\\', '/')
725
+
726
+ # Check whether the path follows a Windows drive letter pattern
727
+ windows_path_pattern = r'^([a-zA-Z]):(/.*)?$'
728
+ match = re.match(windows_path_pattern, filename)
729
+
730
+ if match:
731
+ # Extract the drive letter and the rest of the path
732
+ drive_letter = match.group(1).lower() # Convert to lowercase for WSL
733
+ path_remainder = match.group(2) if match.group(2) else ''
734
+
735
+ # Format the WSL path
736
+ wsl_path = f"/mnt/{drive_letter}{path_remainder}"
737
+ return wsl_path
738
+
739
+ if failure_behavior == 'none':
740
+ return None
741
+ else:
742
+ return filename
743
+
744
+ # ...def window_path_to_wsl_path(...)
745
+
746
+
747
+ #%% Platform-independent file openers
748
+
749
+ def open_file_in_chrome(filename):
750
+ """
751
+ Open a file in chrome, regardless of file type. I typically use this to open
752
+ .md files in Chrome.
753
+
754
+ Args:
755
+ filename (str): file to open
756
+
757
+ Return:
758
+ bool: whether the operation was successful
759
+ """
760
+
761
+ # Create URL
762
+ abs_path = os.path.abspath(filename)
763
+
764
+ system = platform.system()
765
+ if system == 'Windows':
766
+ url = f'file:///{abs_path.replace(os.sep, "/")}'
767
+ else: # macOS and Linux
768
+ url = f'file://{abs_path}'
769
+
770
+ # Determine the Chrome path
771
+ if system == 'Windows':
772
+
773
+ # This is a native Python module, but it only exists on Windows
774
+ import winreg
775
+
776
+ chrome_paths = [
777
+ os.path.expanduser("~") + r"\AppData\Local\Google\Chrome\Application\chrome.exe",
778
+ r"C:\Program Files\Google\Chrome\Application\chrome.exe",
779
+ r"C:\Program Files (x86)\Google\Chrome\Application\chrome.exe"
780
+ ]
781
+
782
+ # Default approach: run from a typical chrome location
783
+ for path in chrome_paths:
784
+ if os.path.exists(path):
785
+ subprocess.run([path, url])
786
+ return True
787
+
788
+ # Method 2: Check registry for Chrome path
789
+ try:
790
+ with winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE,
791
+ r"SOFTWARE\Microsoft\Windows\CurrentVersion\App Paths\chrome.exe") as key:
792
+ chrome_path = winreg.QueryValue(key, None)
793
+ if chrome_path and os.path.exists(chrome_path):
794
+ subprocess.run([chrome_path, url])
795
+ return True
796
+ except Exception:
797
+ pass
798
+
799
+ # Method 3: Try alternate registry location
800
+ try:
801
+ with winreg.OpenKey(winreg.HKEY_CURRENT_USER,
802
+ r"Software\Google\Chrome\BLBeacon") as key:
803
+ chrome_path = os.path.join(os.path.dirname(winreg.QueryValueEx(key, "version")[0]), "chrome.exe")
804
+ if os.path.exists(chrome_path):
805
+ subprocess.run([chrome_path, url])
806
+ return True
807
+ except Exception:
808
+ pass
809
+
810
+ # Method 4: Try system path or command
811
+ for chrome_cmd in ["chrome", "chrome.exe", "googlechrome", "google-chrome"]:
812
+ try:
813
+ subprocess.run([chrome_cmd, url], shell=True)
814
+ return True
815
+ except Exception:
816
+ continue
817
+
818
+ # Method 5: Use Windows URL protocol handler
819
+ try:
820
+ os.startfile(url)
821
+ return True
822
+ except Exception:
823
+ pass
824
+
825
+ # Method 6: Use rundll32
826
+ try:
827
+ cmd = f'rundll32 url.dll,FileProtocolHandler {url}'
828
+ subprocess.run(cmd, shell=True)
829
+ return True
830
+ except Exception:
831
+ pass
832
+
833
+ elif system == 'Darwin':
834
+
835
+ chrome_paths = [
836
+ '/Applications/Google Chrome.app/Contents/MacOS/Google Chrome',
837
+ os.path.expanduser('~/Applications/Google Chrome.app/Contents/MacOS/Google Chrome')
838
+ ]
839
+
840
+ for path in chrome_paths:
841
+ if os.path.exists(path):
842
+ subprocess.run([path, url])
843
+ return True
844
+
845
+ # Fallback to 'open' command with Chrome as the app
846
+ try:
847
+ subprocess.run(['open', '-a', 'Google Chrome', url])
848
+ return True
849
+ except Exception:
850
+ pass
851
+
852
+ elif system == 'Linux':
853
+
854
+ chrome_commands = ['google-chrome', 'chrome', 'chromium', 'chromium-browser']
855
+
856
+ for cmd in chrome_commands:
857
+ try:
858
+ subprocess.run([cmd, url], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
859
+ return True
860
+ except Exception:
861
+ continue
862
+
863
+ print(f"Could not open {filename} in Chrome on {system}.")
864
+ return False
865
+
866
+
867
+ def open_file(filename,
868
+ attempt_to_open_in_wsl_host=False,
869
+ browser_name=None):
870
+ """
871
+ Opens [filename] in the default OS file handler for this file type.
872
+
873
+ If browser_name is not None, uses the webbrowser module to open the filename
874
+ in the specified browser; see https://docs.python.org/3/library/webbrowser.html
875
+ for supported browsers. Falls back to the default file handler if webbrowser.open()
876
+ fails. In this case, attempt_to_open_in_wsl_host is ignored unless webbrowser.open() fails.
877
+
878
+ If browser_name is 'default', uses the system default. This is different from the
879
+ parameter to webbrowser.get(), where None implies the system default.
880
+
881
+ Args:
882
+ filename (str): file to open
883
+ attempt_to_open_in_wsl_host (bool, optional): if this is True, and we're in WSL, attempts
884
+ to open [filename] in the Windows host environment
885
+ browser_name (str, optional): see above
886
+ """
887
+
888
+ if browser_name is not None:
889
+ if browser_name == 'chrome':
890
+ browser_name = 'google-chrome'
891
+ elif browser_name == 'default':
892
+ browser_name = None
893
+ try:
894
+ result = webbrowser.get(using=browser_name).open(filename)
895
+ except Exception:
896
+ result = False
897
+ if result:
898
+ return
899
+
900
+ if sys.platform == 'win32':
901
+
902
+ os.startfile(filename)
903
+
904
+ elif sys.platform == 'darwin':
905
+
906
+ opener = 'open'
907
+ subprocess.call([opener, filename])
908
+
909
+ elif attempt_to_open_in_wsl_host and environment_is_wsl():
910
+
911
+ windows_path = wsl_path_to_windows_path(filename)
912
+
913
+ # Fall back to xdg-open
914
+ if windows_path is None:
915
+ subprocess.call(['xdg-open', filename])
916
+
917
+ if os.path.isdir(filename):
918
+ subprocess.run(["explorer.exe", windows_path])
919
+ else:
920
+ os.system("cmd.exe /C start {}".format(re.escape(windows_path)))
921
+
922
+ else:
923
+
924
+ opener = 'xdg-open'
925
+ subprocess.call([opener, filename])
926
+
927
+ # ...def open_file(...)
928
+
929
+
930
+ #%% File list functions (as in, files that are lists of other filenames)
931
+
932
+ def write_list_to_file(output_file,strings):
933
+ """
934
+ Writes a list of strings to either a JSON file or text file,
935
+ depending on extension of the given file name.
936
+
937
+ Args:
938
+ output_file (str): file to write
939
+ strings (list): list of strings to write to [output_file]
940
+ """
941
+
942
+ with open(output_file, 'w') as f:
943
+ if output_file.endswith('.json'):
944
+ json.dump(strings, f, indent=1)
945
+ else:
946
+ f.write('\n'.join(strings))
947
+
948
+
949
+ def read_list_from_file(filename):
950
+ """
951
+ Reads a json-formatted list of strings from a file.
952
+
953
+ Args:
954
+ filename (str): .json filename to read
955
+
956
+ Returns:
957
+ list: list of strings read from [filename]
958
+ """
959
+
960
+ assert filename.endswith('.json')
961
+ with open(filename, 'r') as f:
962
+ file_list = json.load(f)
963
+ assert isinstance(file_list, list)
964
+ for s in file_list:
965
+ assert isinstance(s, str)
966
+ return file_list
967
+
968
+
969
+ #%% File copying functions
970
+
971
+ def _copy_file(input_output_tuple,overwrite=True,verbose=False,move=False):
972
+ """
973
+ Internal function for copying files from within parallel_copy_files.
974
+ """
975
+
976
+ assert len(input_output_tuple) == 2
977
+ source_fn = input_output_tuple[0]
978
+ target_fn = input_output_tuple[1]
979
+ if (not overwrite) and (os.path.isfile(target_fn)):
980
+ if verbose:
981
+ print('Skipping existing target file {}'.format(target_fn))
982
+ return
983
+
984
+ if move:
985
+ action_string = 'Moving'
986
+ else:
987
+ action_string = 'Copying'
988
+
989
+ if verbose:
990
+ print('{} to {}'.format(action_string,target_fn))
991
+
992
+ target_dir = os.path.dirname(target_fn)
993
+ if len(target_dir) > 0:
994
+ os.makedirs(target_dir,exist_ok=True)
995
+ if move:
996
+ shutil.move(source_fn, target_fn)
997
+ else:
998
+ shutil.copyfile(source_fn,target_fn)
999
+
1000
+
1001
+ def parallel_copy_files(input_file_to_output_file,
1002
+ max_workers=16,
1003
+ use_threads=True,
1004
+ overwrite=False,
1005
+ verbose=False,
1006
+ move=False):
1007
+ """
1008
+ Copy (or move) files from source to target according to the dict input_file_to_output_file.
1009
+
1010
+ Args:
1011
+ input_file_to_output_file (dict): dictionary mapping source files to the target files
1012
+ to which they should be copied
1013
+ max_workers (int, optional): number of concurrent workers; set to <=1 to disable parallelism
1014
+ use_threads (bool, optional): whether to use threads (True) or processes (False) for
1015
+ parallel copying; ignored if max_workers <= 1
1016
+ overwrite (bool, optional): whether to overwrite existing destination files
1017
+ verbose (bool, optional): enable additional debug output
1018
+ move (bool, optional): move instead of copying
1019
+ """
1020
+
1021
+ n_workers = min(max_workers,len(input_file_to_output_file))
1022
+
1023
+ # Package the dictionary as a set of 2-tuples
1024
+ input_output_tuples = []
1025
+ for input_fn in input_file_to_output_file:
1026
+ input_output_tuples.append((input_fn,input_file_to_output_file[input_fn]))
1027
+
1028
+ pool = None
1029
+
1030
+ try:
1031
+ if use_threads:
1032
+ pool = ThreadPool(n_workers)
1033
+ else:
1034
+ pool = Pool(n_workers)
1035
+
1036
+ with tqdm(total=len(input_output_tuples)) as pbar:
1037
+ for i,_ in enumerate(pool.imap_unordered(partial(_copy_file,
1038
+ overwrite=overwrite,
1039
+ verbose=verbose,
1040
+ move=move),
1041
+ input_output_tuples)):
1042
+ pbar.update()
1043
+ finally:
1044
+ if pool is not None:
1045
+ pool.close()
1046
+ pool.join()
1047
+ if verbose:
1048
+ print('Pool closed and joined for parallel file copying')
1049
+
1050
+ # ...def parallel_copy_files(...)
1051
+
1052
+
1053
+ #%% File deletion functions
1054
+
1055
+ def delete_file(input_file, verbose=False):
1056
+ """
1057
+ Deletes a single file.
1058
+
1059
+ Args:
1060
+ input_file (str): file to delete
1061
+ verbose (bool, optional): enable additional debug console output
1062
+
1063
+ Returns:
1064
+ bool: True if file was deleted successfully, False otherwise
1065
+ """
1066
+
1067
+ try:
1068
+ if verbose:
1069
+ print('Deleting file {}'.format(input_file))
1070
+
1071
+ if os.path.isfile(input_file):
1072
+ os.remove(input_file)
1073
+ return True
1074
+ else:
1075
+ if verbose:
1076
+ print('File {} does not exist'.format(input_file))
1077
+ return False
1078
+
1079
+ except Exception as e:
1080
+ if verbose:
1081
+ print('Error deleting file {}: {}'.format(input_file, str(e)))
1082
+ return False
1083
+
1084
+ # ...def delete_file(...)
1085
+
1086
+
1087
+ def parallel_delete_files(input_files,
1088
+ max_workers=16,
1089
+ use_threads=True,
1090
+ verbose=False):
1091
+ """
1092
+ Deletes one or more files in parallel.
1093
+
1094
+ Args:
1095
+ input_files (list): list of files to delete
1096
+ max_workers (int, optional): number of concurrent workers, set to <= 1 to disable parallelism
1097
+ use_threads (bool, optional): whether to use threads (True) or processes (False); ignored if
1098
+ max_workers <= 1
1099
+ verbose (bool, optional): enable additional debug console output
1100
+ """
1101
+
1102
+ if len(input_files) == 0:
1103
+ return
1104
+
1105
+ n_workers = min(max_workers, len(input_files))
1106
+
1107
+ pool = None
1108
+
1109
+ try:
1110
+ if use_threads:
1111
+ pool = ThreadPool(n_workers)
1112
+ else:
1113
+ pool = Pool(n_workers)
1114
+
1115
+ with tqdm(total=len(input_files)) as pbar:
1116
+ for i, _ in enumerate(pool.imap_unordered(partial(delete_file, verbose=verbose),
1117
+ input_files)):
1118
+ pbar.update()
1119
+ finally:
1120
+ if pool is not None:
1121
+ pool.close()
1122
+ pool.join()
1123
+ if verbose:
1124
+ print('Pool closed and joined for file deletion')
1125
+
1126
+ # ...def parallel_delete_files(...)
1127
+
1128
+
1129
+ #%% File size functions
1130
+
1131
+ def get_file_sizes(base_dir, convert_slashes=True):
1132
+ """
1133
+ Gets sizes recursively for all files in base_dir, returning a dict mapping
1134
+ relative filenames to size.
1135
+
1136
+ TODO: merge the functionality here with parallel_get_file_sizes, which uses slightly
1137
+ different semantics.
1138
+
1139
+ Args:
1140
+ base_dir (str): folder within which we want all file sizes
1141
+ convert_slashes (bool, optional): force forward slashes in return strings,
1142
+ otherwise uses the native path separator
1143
+
1144
+ Returns:
1145
+ dict: dictionary mapping filenames to file sizes in bytes
1146
+ """
1147
+
1148
+ relative_filenames = recursive_file_list(base_dir, convert_slashes=convert_slashes,
1149
+ return_relative_paths=True)
1150
+
1151
+ fn_to_size = {}
1152
+ for fn_relative in tqdm(relative_filenames):
1153
+ fn_abs = os.path.join(base_dir,fn_relative)
1154
+ fn_to_size[fn_relative] = os.path.getsize(fn_abs)
1155
+
1156
+ return fn_to_size
1157
+
1158
+
1159
+ def _get_file_size(filename,verbose=False):
1160
+ """
1161
+ Internal function for safely getting the size of a file. Returns a (filename,size)
1162
+ tuple, where size is None if there is an error.
1163
+ """
1164
+
1165
+ try:
1166
+ size = os.path.getsize(filename)
1167
+ except Exception as e:
1168
+ if verbose:
1169
+ print('Error reading file size for {}: {}'.format(filename,str(e)))
1170
+ size = None
1171
+ return (filename,size)
1172
+
1173
+
1174
+ def parallel_get_file_sizes(filenames,
1175
+ max_workers=16,
1176
+ use_threads=True,
1177
+ verbose=False,
1178
+ recursive=True,
1179
+ convert_slashes=True,
1180
+ return_relative_paths=False):
1181
+ """
1182
+ Returns a dictionary mapping every file in [filenames] to the corresponding file size,
1183
+ or None for errors. If [filenames] is a folder, will enumerate the folder (optionally recursively).
1184
+
1185
+ Args:
1186
+ filenames (list or str): list of filenames for which we should read sizes, or a folder
1187
+ within which we should read all file sizes recursively
1188
+ max_workers (int, optional): number of concurrent workers; set to <=1 to disable parallelism
1189
+ use_threads (bool, optional): whether to use threads (True) or processes (False) for
1190
+ parallel copying; ignored if max_workers <= 1
1191
+ verbose (bool, optional): enable additional debug output
1192
+ recursive (bool, optional): enumerate recursively, only relevant if [filenames] is a folder.
1193
+ convert_slashes (bool, optional): convert backslashes to forward slashes
1194
+ return_relative_paths (bool, optional): return relative paths; only relevant if [filenames]
1195
+ is a folder.
1196
+
1197
+ Returns:
1198
+ dict: dictionary mapping filenames to file sizes in bytes
1199
+ """
1200
+
1201
+ folder_name = None
1202
+
1203
+ if isinstance(filenames,str):
1204
+
1205
+ folder_name = filenames
1206
+ assert os.path.isdir(filenames), 'Could not find folder {}'.format(folder_name)
1207
+
1208
+ if verbose:
1209
+ print('Enumerating files in {}'.format(folder_name))
1210
+
1211
+ # Enumerate absolute paths here, we'll convert to relative later if requested
1212
+ filenames = recursive_file_list(folder_name,recursive=recursive,return_relative_paths=False)
1213
+
1214
+ else:
1215
+
1216
+ assert is_iterable(filenames), '[filenames] argument is neither a folder nor an iterable'
1217
+
1218
+ n_workers = min(max_workers,len(filenames))
1219
+
1220
+ if verbose:
1221
+ print('Creating worker pool')
1222
+
1223
+ pool = None
1224
+
1225
+ try:
1226
+
1227
+ if use_threads:
1228
+ pool_string = 'thread'
1229
+ pool = ThreadPool(n_workers)
1230
+ else:
1231
+ pool_string = 'process'
1232
+ pool = Pool(n_workers)
1233
+
1234
+ if verbose:
1235
+ print('Created a {} pool of {} workers'.format(
1236
+ pool_string,n_workers))
1237
+
1238
+ # This returns (filename,size) tuples
1239
+ get_size_results = list(tqdm(pool.imap(
1240
+ partial(_get_file_size,verbose=verbose),filenames), total=len(filenames)))
1241
+
1242
+ finally:
1243
+
1244
+ if pool is not None:
1245
+ pool.close()
1246
+ pool.join()
1247
+ if verbose:
1248
+ print('Pool closed and join for file size collection')
1249
+
1250
+ to_return = {}
1251
+ for r in get_size_results:
1252
+ fn = r[0]
1253
+ if return_relative_paths and (folder_name is not None):
1254
+ fn = os.path.relpath(fn,folder_name)
1255
+ if convert_slashes:
1256
+ fn = fn.replace('\\','/')
1257
+ size = r[1]
1258
+ to_return[fn] = size
1259
+
1260
+ return to_return
1261
+
1262
+ # ...def parallel_get_file_sizes(...)
1263
+
1264
+
1265
+ #%% Compression (zip/tar) functions
1266
+
1267
+ def zip_file(input_fn, output_fn=None, overwrite=False, verbose=False, compress_level=9):
1268
+ """
1269
+ Zips a single file.
1270
+
1271
+ Args:
1272
+ input_fn (str): file to zip
1273
+ output_fn (str, optional): target zipfile; if this is None, we'll use
1274
+ [input_fn].zip
1275
+ overwrite (bool, optional): whether to overwrite an existing target file
1276
+ verbose (bool, optional): enable existing debug console output
1277
+ compress_level (int, optional): compression level to use, between 0 and 9
1278
+
1279
+ Returns:
1280
+ str: the output zipfile, whether we created it or determined that it already exists
1281
+ """
1282
+
1283
+ basename = os.path.basename(input_fn)
1284
+
1285
+ if output_fn is None:
1286
+ output_fn = input_fn + '.zip'
1287
+
1288
+ if (not overwrite) and (os.path.isfile(output_fn)):
1289
+ print('Skipping existing file {}'.format(output_fn))
1290
+ return output_fn
1291
+
1292
+ if verbose:
1293
+ print('Zipping {} to {} with level {}'.format(input_fn,output_fn,compress_level))
1294
+
1295
+ with ZipFile(output_fn,'w',zipfile.ZIP_DEFLATED) as zipf:
1296
+ zipf.write(input_fn,
1297
+ arcname=basename,
1298
+ compresslevel=compress_level,
1299
+ compress_type=zipfile.ZIP_DEFLATED)
1300
+
1301
+ return output_fn
1302
+
1303
+ # ...def zip_file(...)
1304
+
1305
+
1306
+ def add_files_to_single_tar_file(input_files, output_fn, arc_name_base,
1307
+ overwrite=False, verbose=False, mode='x'):
1308
+ """
1309
+ Adds all the files in [input_files] to the tar file [output_fn].
1310
+ Archive names are relative to arc_name_base.
1311
+
1312
+ Args:
1313
+ input_files (list): list of absolute filenames to include in the .tar file
1314
+ output_fn (str): .tar file to create
1315
+ arc_name_base (str): absolute folder from which relative paths should be determined;
1316
+ behavior is undefined if there are files in [input_files] that don't live within
1317
+ [arc_name_base]
1318
+ overwrite (bool, optional): whether to overwrite an existing .tar file
1319
+ verbose (bool, optional): enable additional debug console output
1320
+ mode (str, optional): compression type, can be 'x' (no compression), 'x:gz', or 'x:bz2'.
1321
+
1322
+ Returns:
1323
+ str: the output tar file, whether we created it or determined that it already exists
1324
+ """
1325
+
1326
+ if os.path.isfile(output_fn):
1327
+ if not overwrite:
1328
+ print('Tar file {} exists, skipping'.format(output_fn))
1329
+ return output_fn
1330
+ else:
1331
+ print('Tar file {} exists, deleting and re-creating'.format(output_fn))
1332
+ os.remove(output_fn)
1333
+
1334
+ if verbose:
1335
+ print('Adding {} files to {} (mode {})'.format(
1336
+ len(input_files),output_fn,mode))
1337
+
1338
+ with tarfile.open(output_fn,mode) as tarf:
1339
+ for input_fn_abs in tqdm(input_files,disable=(not verbose)):
1340
+ input_fn_relative = os.path.relpath(input_fn_abs,arc_name_base)
1341
+ tarf.add(input_fn_abs,arcname=input_fn_relative)
1342
+
1343
+ return output_fn
1344
+
1345
+ # ...def add_files_to_single_tar_file(...)
1346
+
1347
+
1348
+ def zip_files_into_single_zipfile(input_files,
1349
+ output_fn,
1350
+ arc_name_base,
1351
+ overwrite=False,
1352
+ verbose=False,
1353
+ compress_level=9):
1354
+ """
1355
+ Zip all the files in [input_files] into [output_fn]. Archive names are relative to
1356
+ arc_name_base.
1357
+
1358
+ Args:
1359
+ input_files (list): list of absolute filenames to include in the .tar file
1360
+ output_fn (str): .tar file to create
1361
+ arc_name_base (str): absolute folder from which relative paths should be determined;
1362
+ behavior is undefined if there are files in [input_files] that don't live within
1363
+ [arc_name_base]
1364
+ overwrite (bool, optional): whether to overwrite an existing .tar file
1365
+ verbose (bool, optional): enable additional debug console output
1366
+ compress_level (int, optional): compression level to use, between 0 and 9
1367
+
1368
+ Returns:
1369
+ str: the output zipfile, whether we created it or determined that it already exists
1370
+ """
1371
+
1372
+ if not overwrite:
1373
+ if os.path.isfile(output_fn):
1374
+ print('Zip file {} exists, skipping'.format(output_fn))
1375
+ return output_fn
1376
+
1377
+ if verbose:
1378
+ print('Zipping {} files to {} (compression level {})'.format(
1379
+ len(input_files),output_fn,compress_level))
1380
+
1381
+ with ZipFile(output_fn,'w',zipfile.ZIP_DEFLATED) as zipf:
1382
+ for input_fn_abs in tqdm(input_files,disable=(not verbose)):
1383
+ input_fn_relative = os.path.relpath(input_fn_abs,arc_name_base)
1384
+ zipf.write(input_fn_abs,
1385
+ arcname=input_fn_relative,
1386
+ compresslevel=compress_level,
1387
+ compress_type=zipfile.ZIP_DEFLATED)
1388
+
1389
+ return output_fn
1390
+
1391
+ # ...def zip_files_into_single_zipfile(...)
1392
+
1393
+
1394
+ def zip_folder(input_folder, output_fn=None, overwrite=False, verbose=False, compress_level=9):
1395
+ """
1396
+ Recursively zip everything in [input_folder] into a single zipfile, storing files as paths
1397
+ relative to [input_folder].
1398
+
1399
+ Args:
1400
+ input_folder (str): folder to zip
1401
+ output_fn (str, optional): output filename; if this is None, we'll write to [input_folder].zip
1402
+ overwrite (bool, optional): whether to overwrite an existing .tar file
1403
+ verbose (bool, optional): enable additional debug console output
1404
+ compress_level (int, optional): compression level to use, between 0 and 9
1405
+
1406
+ Returns:
1407
+ str: the output zipfile, whether we created it or determined that it already exists
1408
+ """
1409
+
1410
+ if output_fn is None:
1411
+ output_fn = input_folder + '.zip'
1412
+
1413
+ if not overwrite:
1414
+ if os.path.isfile(output_fn):
1415
+ print('Zip file {} exists, skipping'.format(output_fn))
1416
+ return output_fn
1417
+
1418
+ if verbose:
1419
+ print('Zipping {} to {} (compression level {})'.format(
1420
+ input_folder,output_fn,compress_level))
1421
+
1422
+ relative_filenames = recursive_file_list(input_folder,return_relative_paths=True)
1423
+
1424
+ with ZipFile(output_fn,'w',zipfile.ZIP_DEFLATED) as zipf:
1425
+ for input_fn_relative in tqdm(relative_filenames,disable=(not verbose)):
1426
+ input_fn_abs = os.path.join(input_folder,input_fn_relative)
1427
+ zipf.write(input_fn_abs,
1428
+ arcname=input_fn_relative,
1429
+ compresslevel=compress_level,
1430
+ compress_type=zipfile.ZIP_DEFLATED)
1431
+
1432
+ return output_fn
1433
+
1434
+ # ...def zip_folder(...)
1435
+
1436
+
1437
+ def parallel_zip_files(input_files,
1438
+ max_workers=16,
1439
+ use_threads=True,
1440
+ compress_level=9,
1441
+ overwrite=False,
1442
+ verbose=False):
1443
+ """
1444
+ Zips one or more files to separate output files in parallel, leaving the
1445
+ original files in place. Each file is zipped to [filename].zip.
1446
+
1447
+ Args:
1448
+ input_files (str): list of files to zip
1449
+ max_workers (int, optional): number of concurrent workers, set to <= 1 to disable parallelism
1450
+ use_threads (bool, optional): whether to use threads (True) or processes (False); ignored if
1451
+ max_workers <= 1
1452
+ compress_level (int, optional): zip compression level between 0 and 9
1453
+ overwrite (bool, optional): whether to overwrite an existing .tar file
1454
+ verbose (bool, optional): enable additional debug console output
1455
+ """
1456
+
1457
+ n_workers = min(max_workers,len(input_files))
1458
+
1459
+ if use_threads:
1460
+ pool = ThreadPool(n_workers)
1461
+ else:
1462
+ pool = Pool(n_workers)
1463
+
1464
+ try:
1465
+
1466
+ with tqdm(total=len(input_files)) as pbar:
1467
+ for i,_ in enumerate(pool.imap_unordered(partial(zip_file,
1468
+ output_fn=None,overwrite=overwrite,verbose=verbose,compress_level=compress_level),
1469
+ input_files)):
1470
+ pbar.update()
1471
+
1472
+ finally:
1473
+
1474
+ pool.close()
1475
+ pool.join()
1476
+ if verbose:
1477
+ print('Pool closed and joined for parallel zipping')
1478
+
1479
+ # ...def parallel_zip_files(...)
1480
+
1481
+
1482
+ def parallel_zip_folders(input_folders,
1483
+ max_workers=16,
1484
+ use_threads=True,
1485
+ compress_level=9,
1486
+ overwrite=False,
1487
+ verbose=False):
1488
+ """
1489
+ Zips one or more folders to separate output files in parallel, leaving the
1490
+ original folders in place. Each folder is zipped to [folder_name].zip.
1491
+
1492
+ Args:
1493
+ input_folders (list): list of folders to zip
1494
+ max_workers (int, optional): number of concurrent workers, set to <= 1 to disable parallelism
1495
+ use_threads (bool, optional): whether to use threads (True) or processes (False); ignored if
1496
+ max_workers <= 1
1497
+ compress_level (int, optional): zip compression level between 0 and 9
1498
+ overwrite (bool, optional): whether to overwrite an existing .tar file
1499
+ verbose (bool, optional): enable additional debug console output
1500
+ """
1501
+
1502
+ n_workers = min(max_workers,len(input_folders))
1503
+
1504
+ if use_threads:
1505
+ pool = ThreadPool(n_workers)
1506
+ else:
1507
+ pool = Pool(n_workers)
1508
+
1509
+ try:
1510
+
1511
+ with tqdm(total=len(input_folders)) as pbar:
1512
+ for i,_ in enumerate(pool.imap_unordered(
1513
+ partial(zip_folder,overwrite=overwrite,
1514
+ compress_level=compress_level,verbose=verbose),
1515
+ input_folders)):
1516
+ pbar.update()
1517
+
1518
+ finally:
1519
+
1520
+ pool.close()
1521
+ pool.join()
1522
+ if verbose:
1523
+ print('Pool closed and joined for parallel folder zipping')
1524
+
1525
+ # ...def parallel_zip_folders(...)
1526
+
1527
+
1528
+ def zip_each_file_in_folder(folder_name,
1529
+ recursive=False,
1530
+ max_workers=16,
1531
+ use_threads=True,
1532
+ compress_level=9,
1533
+ overwrite=False,
1534
+ required_token=None,
1535
+ verbose=False,
1536
+ exclude_zip=True):
1537
+ """
1538
+ Zips each file in [folder_name] to its own zipfile (filename.zip), optionally recursing. To
1539
+ zip a whole folder into a single zipfile, use zip_folder().
1540
+
1541
+ Args:
1542
+ folder_name (str): the folder within which we should zip files
1543
+ recursive (bool, optional): whether to recurse within [folder_name]
1544
+ max_workers (int, optional): number of concurrent workers, set to <= 1 to disable parallelism
1545
+ use_threads (bool, optional): whether to use threads (True) or processes (False); ignored if
1546
+ max_workers <= 1
1547
+ compress_level (int, optional): zip compression level between 0 and 9
1548
+ overwrite (bool, optional): whether to overwrite an existing .tar file
1549
+ required_token (str, optional): only zip files whose names contain this string
1550
+ verbose (bool, optional): enable additional debug console output
1551
+ exclude_zip (bool, optional): skip files ending in .zip
1552
+ """
1553
+
1554
+ assert os.path.isdir(folder_name), '{} is not a folder'.format(folder_name)
1555
+
1556
+ input_files = recursive_file_list(folder_name,recursive=recursive,return_relative_paths=False)
1557
+
1558
+ if required_token is not None:
1559
+ input_files = [fn for fn in input_files if required_token in fn]
1560
+
1561
+ if exclude_zip:
1562
+ input_files = [fn for fn in input_files if (not fn.endswith('.zip'))]
1563
+
1564
+ parallel_zip_files(input_files=input_files,max_workers=max_workers,
1565
+ use_threads=use_threads,compress_level=compress_level,
1566
+ overwrite=overwrite,verbose=verbose)
1567
+
1568
+ # ...def zip_each_file_in_folder(...)
1569
+
1570
+
1571
+ def unzip_file(input_file, output_folder=None):
1572
+ """
1573
+ Unzips a zipfile to the specified output folder, defaulting to the same location as
1574
+ the input file.
1575
+
1576
+ Args:
1577
+ input_file (str): zipfile to unzip
1578
+ output_folder (str, optional): folder to which we should unzip [input_file], defaults
1579
+ to unzipping to the folder where [input_file] lives
1580
+ """
1581
+
1582
+ if output_folder is None:
1583
+ output_folder = os.path.dirname(input_file)
1584
+
1585
+ with zipfile.ZipFile(input_file, 'r') as zf:
1586
+ zf.extractall(output_folder)
1587
+
1588
+
1589
+ #%% File hashing functions
1590
+
1591
+ def compute_file_hash(file_path, algorithm='sha256', allow_failures=True):
1592
+ """
1593
+ Compute the hash of a file.
1594
+
1595
+ Adapted from:
1596
+
1597
+ https://www.geeksforgeeks.org/python-program-to-find-hash-of-file/
1598
+
1599
+ Args:
1600
+ file_path (str): the file to hash
1601
+ algorithm (str, optional): the hashing algorithm to use (e.g. md5, sha256)
1602
+ allow_failures (bool, optional): if True, read failures will silently return
1603
+ None; if false, read failures will raise exceptions
1604
+
1605
+ Returns:
1606
+ str: the hash value for this file
1607
+ """
1608
+
1609
+ try:
1610
+
1611
+ hash_func = hashlib.new(algorithm)
1612
+
1613
+ with open(file_path, 'rb') as file:
1614
+ while chunk := file.read(8192): # Read the file in chunks of 8192 bytes
1615
+ hash_func.update(chunk)
1616
+
1617
+ return str(hash_func.hexdigest())
1618
+
1619
+ except Exception:
1620
+
1621
+ if allow_failures:
1622
+ return None
1623
+ else:
1624
+ raise
1625
+
1626
+ # ...def compute_file_hash(...)
1627
+
1628
+
1629
+ def parallel_compute_file_hashes(filenames,
1630
+ max_workers=16,
1631
+ use_threads=True,
1632
+ recursive=True,
1633
+ algorithm='sha256',
1634
+ verbose=False):
1635
+ """
1636
+ Compute file hashes for a list or folder of images.
1637
+
1638
+ Args:
1639
+ filenames (list or str): a list of filenames or a folder
1640
+ max_workers (int, optional): the number of parallel workers to use; set to <=1 to disable
1641
+ parallelization
1642
+ use_threads (bool, optional): whether to use threads (True) or processes (False) for
1643
+ parallelization
1644
+ algorithm (str, optional): the hashing algorithm to use (e.g. md5, sha256)
1645
+ recursive (bool, optional): if [filenames] is a folder, whether to enumerate recursively.
1646
+ Ignored if [filenames] is a list.
1647
+ verbose (bool, optional): enable additional debug output
1648
+
1649
+ Returns:
1650
+ dict: a dict mapping filenames to hash values; values will be None for files that fail
1651
+ to load.
1652
+ """
1653
+
1654
+ if isinstance(filenames,str) and os.path.isdir(filenames):
1655
+ if verbose:
1656
+ print('Enumerating files in {}'.format(filenames))
1657
+ filenames = recursive_file_list(filenames,recursive=recursive,return_relative_paths=False)
1658
+
1659
+ n_workers = min(max_workers,len(filenames))
1660
+
1661
+ if verbose:
1662
+ print('Computing hashes for {} files on {} workers'.format(len(filenames),n_workers))
1663
+
1664
+ if n_workers <= 1:
1665
+
1666
+ results = []
1667
+ for filename in filenames:
1668
+ results.append(compute_file_hash(filename,algorithm=algorithm,allow_failures=True))
1669
+
1670
+ else:
1671
+
1672
+ if use_threads:
1673
+ pool = ThreadPool(n_workers)
1674
+ else:
1675
+ pool = Pool(n_workers)
1676
+
1677
+ try:
1678
+
1679
+ results = list(tqdm(pool.imap(
1680
+ partial(compute_file_hash,algorithm=algorithm,allow_failures=True),
1681
+ filenames), total=len(filenames)))
1682
+
1683
+ finally:
1684
+
1685
+ pool.close()
1686
+ pool.join()
1687
+ if verbose:
1688
+ print('Pool closed and joined for parallel zipping')
1689
+
1690
+ # ...if we are/aren't parallelizing
1691
+
1692
+ assert len(filenames) == len(results), 'Internal error in parallel_compute_file_hashes'
1693
+
1694
+ to_return = {}
1695
+ for i_file,filename in enumerate(filenames):
1696
+ to_return[filename] = results[i_file]
1697
+
1698
+ return to_return
1699
+
1700
+ # ...def parallel_compute_file_hashes(...)
1701
+
1702
+
1703
+ #%% Tests
1704
+
1705
+ class TestPathUtils:
1706
+ """
1707
+ Tests for path_utils.py
1708
+ """
1709
+
1710
+ def set_up(self):
1711
+ """
1712
+ Create a temporary directory for testing.
1713
+ """
1714
+
1715
+ self.test_dir = make_test_folder(subfolder='megadetector/path_utils_tests')
1716
+ print('Using temporary folder {} for path utils testing'.format(self.test_dir))
1717
+ os.makedirs(self.test_dir, exist_ok=True)
1718
+
1719
+
1720
+ def tear_down(self):
1721
+ """
1722
+ Remove the temporary directory after tests.
1723
+ """
1724
+
1725
+ if os.path.exists(self.test_dir):
1726
+ shutil.rmtree(self.test_dir)
1727
+
1728
+
1729
+ def test_is_image_file(self):
1730
+ """
1731
+ Test the is_image_file function.
1732
+ """
1733
+
1734
+ assert is_image_file('test.jpg')
1735
+ assert is_image_file('test.jpeg')
1736
+ assert is_image_file('test.png')
1737
+ assert is_image_file('test.gif')
1738
+ assert is_image_file('test.bmp')
1739
+ assert is_image_file('test.tiff')
1740
+ assert is_image_file('test.TIF')
1741
+ assert not is_image_file('test.txt')
1742
+ assert not is_image_file('test.doc')
1743
+ assert is_image_file('path/to/image.JPG')
1744
+ assert not is_image_file('image')
1745
+ assert is_image_file('test.custom', img_extensions=['.custom'])
1746
+ assert not is_image_file('test.jpg', img_extensions=['.custom'])
1747
+
1748
+
1749
+ def test_find_image_strings(self):
1750
+ """
1751
+ Test the find_image_strings function.
1752
+ """
1753
+
1754
+ strings = ['a.jpg', 'b.txt', 'c.PNG', 'd.gif', 'e.jpeg', 'f.doc']
1755
+ expected = ['a.jpg', 'c.PNG', 'd.gif', 'e.jpeg']
1756
+ assert sorted(find_image_strings(strings)) == sorted(expected)
1757
+ assert find_image_strings([]) == []
1758
+ assert find_image_strings(['no_image.txt', 'another.doc']) == []
1759
+
1760
+
1761
+ def test_find_images(self):
1762
+ """
1763
+ Test the find_images function.
1764
+ """
1765
+
1766
+ # Create some dummy files
1767
+ img1_abs = os.path.join(self.test_dir, 'img1.jpg')
1768
+ img2_abs = os.path.join(self.test_dir, 'img2.PNG')
1769
+ txt1_abs = os.path.join(self.test_dir, 'text1.txt')
1770
+ open(img1_abs, 'w').close()
1771
+ open(img2_abs, 'w').close()
1772
+ open(txt1_abs, 'w').close()
1773
+
1774
+ subdir = os.path.join(self.test_dir, 'subdir')
1775
+ os.makedirs(subdir, exist_ok=True)
1776
+ img3_abs = os.path.join(subdir, 'img3.jpeg')
1777
+ txt2_abs = os.path.join(subdir, 'text2.txt')
1778
+ open(img3_abs, 'w').close()
1779
+ open(txt2_abs, 'w').close()
1780
+
1781
+ # Test non-recursive
1782
+ expected_non_recursive_abs = sorted([img1_abs.replace('\\', '/'), img2_abs.replace('\\', '/')])
1783
+ found_non_recursive_abs = find_images(self.test_dir, recursive=False, return_relative_paths=False)
1784
+ assert sorted(found_non_recursive_abs) == expected_non_recursive_abs
1785
+
1786
+ # Test non-recursive, relative paths
1787
+ expected_non_recursive_rel = sorted(['img1.jpg', 'img2.PNG'])
1788
+ found_non_recursive_rel = find_images(self.test_dir, recursive=False, return_relative_paths=True)
1789
+ assert sorted(found_non_recursive_rel) == expected_non_recursive_rel
1790
+
1791
+ # Test recursive
1792
+ expected_recursive_abs = sorted([
1793
+ img1_abs.replace('\\', '/'),
1794
+ img2_abs.replace('\\', '/'),
1795
+ img3_abs.replace('\\', '/')
1796
+ ])
1797
+ found_recursive_abs = find_images(self.test_dir, recursive=True, return_relative_paths=False)
1798
+ assert sorted(found_recursive_abs) == expected_recursive_abs
1799
+
1800
+ # Test recursive, relative paths
1801
+ expected_recursive_rel = sorted([
1802
+ 'img1.jpg',
1803
+ 'img2.PNG',
1804
+ os.path.join('subdir', 'img3.jpeg').replace('\\', '/')
1805
+ ])
1806
+ found_recursive_rel = find_images(self.test_dir, recursive=True, return_relative_paths=True)
1807
+ assert sorted(found_recursive_rel) == expected_recursive_rel
1808
+
1809
+ # Test with an empty directory
1810
+ empty_dir = os.path.join(self.test_dir, 'empty_dir')
1811
+ os.makedirs(empty_dir, exist_ok=True)
1812
+ assert find_images(empty_dir, recursive=True) == []
1813
+
1814
+ # Test with a directory that doesn't exist (should assert)
1815
+ try:
1816
+ find_images(os.path.join(self.test_dir, 'non_existent_dir'))
1817
+ raise AssertionError("AssertionError not raised for non_existent_dir")
1818
+ except AssertionError:
1819
+ pass
1820
+
1821
+
1822
+ def test_recursive_file_list_and_file_list(self):
1823
+ """
1824
+ Test the recursive_file_list and file_list functions.
1825
+ """
1826
+
1827
+ # Setup directory structure
1828
+ # test_dir/
1829
+ # file1.txt
1830
+ # file2.jpg
1831
+ # subdir1/
1832
+ # file3.txt
1833
+ # subsubdir/
1834
+ # file4.png
1835
+ # subdir2/
1836
+ # file5.doc
1837
+
1838
+ list_dir = os.path.join(self.test_dir,'recursive_list')
1839
+
1840
+ f1 = os.path.join(list_dir, 'file1.txt')
1841
+ f2 = os.path.join(list_dir, 'file2.jpg')
1842
+ subdir1 = os.path.join(list_dir, 'subdir1')
1843
+ os.makedirs(subdir1, exist_ok=True)
1844
+ f3 = os.path.join(subdir1, 'file3.txt')
1845
+ subsubdir = os.path.join(subdir1, 'subsubdir')
1846
+ os.makedirs(subsubdir, exist_ok=True)
1847
+ f4 = os.path.join(subsubdir, 'file4.png')
1848
+ subdir2 = os.path.join(list_dir, 'subdir2')
1849
+ os.makedirs(subdir2, exist_ok=True)
1850
+ f5 = os.path.join(subdir2, 'file5.doc')
1851
+
1852
+ for filepath in [f1, f2, f3, f4, f5]:
1853
+ with open(filepath, 'w') as f:
1854
+ f.write('test')
1855
+
1856
+ # Test recursive_file_list (recursive=True by default)
1857
+ expected_all_files_abs = sorted([
1858
+ f1.replace('\\', '/'), f2.replace('\\', '/'), f3.replace('\\', '/'),
1859
+ f4.replace('\\', '/'), f5.replace('\\', '/')
1860
+ ])
1861
+ all_files_abs = recursive_file_list(list_dir, convert_slashes=True,
1862
+ return_relative_paths=False)
1863
+ assert sorted(all_files_abs) == expected_all_files_abs
1864
+
1865
+ # Test recursive_file_list with relative paths
1866
+ expected_all_files_rel = sorted([
1867
+ 'file1.txt', 'file2.jpg',
1868
+ os.path.join('subdir1', 'file3.txt').replace('\\', '/'),
1869
+ os.path.join('subdir1', 'subsubdir', 'file4.png').replace('\\', '/'),
1870
+ os.path.join('subdir2', 'file5.doc').replace('\\', '/')
1871
+ ])
1872
+ all_files_rel = recursive_file_list(list_dir, convert_slashes=True,
1873
+ return_relative_paths=True)
1874
+ assert sorted(all_files_rel) == expected_all_files_rel
1875
+
1876
+ # Test file_list (non-recursive by default via wrapper)
1877
+ expected_top_level_files_abs = sorted([f1.replace('\\', '/'), f2.replace('\\', '/')])
1878
+ top_level_files_abs = file_list(list_dir, convert_slashes=True,
1879
+ return_relative_paths=False, recursive=False)
1880
+ assert sorted(top_level_files_abs) == expected_top_level_files_abs
1881
+
1882
+ # Test file_list (recursive explicitly) - should be same as recursive_file_list
1883
+ recursive_via_file_list = file_list(list_dir, convert_slashes=True,
1884
+ return_relative_paths=False, recursive=True)
1885
+ assert sorted(recursive_via_file_list) == expected_all_files_abs
1886
+
1887
+ # Test with convert_slashes=False (use os.sep)
1888
+ #
1889
+ # Note: This test might be tricky if os.sep is '/', as no replacement happens. We'll check
1890
+ # that backslashes remain on Windows.
1891
+ if os.sep == '\\':
1892
+ f1_raw = os.path.join(list_dir, 'file1.txt')
1893
+ # Only one file for simplicity
1894
+ files_no_slash_conversion = file_list(list_dir, convert_slashes=False, recursive=False)
1895
+ assert any(f1_raw in s for s in files_no_slash_conversion)
1896
+
1897
+ # Test with an empty directory
1898
+ empty_dir = os.path.join(list_dir, "empty_dir_for_files")
1899
+ os.makedirs(empty_dir, exist_ok=True)
1900
+ assert recursive_file_list(empty_dir) == []
1901
+ assert file_list(empty_dir, recursive=False) == []
1902
+
1903
+ # Test with a non-existent directory
1904
+ try:
1905
+ recursive_file_list(os.path.join(list_dir, "non_existent_dir"))
1906
+ raise AssertionError("AssertionError not raised for non_existent_dir in recursive_file_list")
1907
+ except AssertionError:
1908
+ pass
1909
+
1910
+
1911
+ def test_folder_list(self):
1912
+ """
1913
+ Test the folder_list function.
1914
+ """
1915
+
1916
+ # Setup directory structure
1917
+ # test_dir/
1918
+ # subdir1/
1919
+ # subsubdir1/
1920
+ # subdir2/
1921
+ # file1.txt (should be ignored)
1922
+
1923
+ folder_list_dir = os.path.join(self.test_dir,'folder_list')
1924
+
1925
+ subdir1 = os.path.join(folder_list_dir, 'subdir1')
1926
+ subsubdir1 = os.path.join(subdir1, 'subsubdir1')
1927
+ subdir2 = os.path.join(folder_list_dir, 'subdir2')
1928
+ os.makedirs(subdir1, exist_ok=True)
1929
+ os.makedirs(subsubdir1, exist_ok=True)
1930
+ os.makedirs(subdir2, exist_ok=True)
1931
+ with open(os.path.join(folder_list_dir, 'file1.txt'), 'w') as f:
1932
+ f.write('test')
1933
+
1934
+ # Test non-recursive
1935
+ expected_folders_non_recursive_abs = sorted([
1936
+ subdir1.replace('\\', '/'), subdir2.replace('\\', '/')
1937
+ ])
1938
+ folders_non_recursive_abs = folder_list(folder_list_dir, recursive=False,
1939
+ return_relative_paths=False)
1940
+ assert sorted(folders_non_recursive_abs) == expected_folders_non_recursive_abs, \
1941
+ 'Non-recursive folder list failured, expected:\n\n{}\n\nFound:\n\n{}'.format(
1942
+ str(expected_folders_non_recursive_abs),
1943
+ str(folders_non_recursive_abs)
1944
+ )
1945
+
1946
+ # Test non-recursive, relative paths
1947
+ expected_folders_non_recursive_rel = sorted(['subdir1', 'subdir2'])
1948
+ folders_non_recursive_rel = folder_list(folder_list_dir, recursive=False,
1949
+ return_relative_paths=True)
1950
+ assert sorted(folders_non_recursive_rel) == expected_folders_non_recursive_rel
1951
+
1952
+ # Test recursive
1953
+ expected_folders_recursive_abs = sorted([
1954
+ subdir1.replace('\\', '/'),
1955
+ subsubdir1.replace('\\', '/'),
1956
+ subdir2.replace('\\', '/')
1957
+ ])
1958
+ folders_recursive_abs = folder_list(folder_list_dir, recursive=True,
1959
+ return_relative_paths=False)
1960
+ assert sorted(folders_recursive_abs) == expected_folders_recursive_abs
1961
+
1962
+ # Test recursive, relative paths
1963
+ expected_folders_recursive_rel = sorted([
1964
+ 'subdir1',
1965
+ os.path.join('subdir1', 'subsubdir1').replace('\\', '/'),
1966
+ 'subdir2'
1967
+ ])
1968
+ folders_recursive_rel = folder_list(folder_list_dir, recursive=True,
1969
+ return_relative_paths=True)
1970
+ assert sorted(folders_recursive_rel) == expected_folders_recursive_rel
1971
+
1972
+ # Test with an empty directory (except for the file)
1973
+ empty_dir_for_folders = os.path.join(folder_list_dir, "empty_for_folders")
1974
+ os.makedirs(empty_dir_for_folders, exist_ok=True)
1975
+ with open(os.path.join(empty_dir_for_folders, 'temp.txt'), 'w') as f: f.write('t')
1976
+ assert folder_list(empty_dir_for_folders, recursive=True) == []
1977
+ assert folder_list(empty_dir_for_folders, recursive=False) == []
1978
+
1979
+ # Test with a non-existent directory
1980
+ try:
1981
+ folder_list(os.path.join(self.test_dir, "non_existent_dir"))
1982
+ raise AssertionError("AssertionError not raised for non_existent_dir in folder_list")
1983
+ except AssertionError:
1984
+ pass
1985
+
1986
+
1987
+ def test_folder_summary(self):
1988
+ """
1989
+ Test the folder_summary function.
1990
+ """
1991
+
1992
+ # test_dir/
1993
+ # file1.txt
1994
+ # img1.jpg
1995
+ # subdir/
1996
+ # file2.txt
1997
+ # img2.png
1998
+ # img3.png
1999
+
2000
+ folder_summary_dir = os.path.join(self.test_dir,'folder_summary')
2001
+
2002
+ f1 = os.path.join(folder_summary_dir, 'file1.txt')
2003
+ img1 = os.path.join(folder_summary_dir, 'img1.jpg')
2004
+ subdir = os.path.join(folder_summary_dir, 'subdir')
2005
+ os.makedirs(subdir, exist_ok=True)
2006
+ f2 = os.path.join(subdir, 'file2.txt')
2007
+ img2 = os.path.join(subdir, 'img2.png')
2008
+ img3 = os.path.join(subdir, 'img3.png')
2009
+
2010
+ for filepath in [f1, img1, f2, img2, img3]:
2011
+ with open(filepath, 'w') as f:
2012
+ f.write('test')
2013
+
2014
+ summary = folder_summary(folder_summary_dir, print_summary=False)
2015
+
2016
+ assert summary['n_files'] == 5
2017
+ assert summary['n_folders'] == 1 # 'subdir'
2018
+ assert summary['extension_to_count']['.txt'] == 2
2019
+ assert summary['extension_to_count']['.jpg'] == 1
2020
+ assert summary['extension_to_count']['.png'] == 2
2021
+
2022
+ # Check order (sorted by value, desc)
2023
+ #
2024
+ # The specific order of keys with the same counts can vary based on file system list
2025
+ # order. We'll check that the counts are correct and the number of unique extensions is
2026
+ # right.
2027
+ assert len(summary['extension_to_count']) == 3
2028
+
2029
+
2030
+ empty_dir = os.path.join(folder_summary_dir, "empty_summary_dir")
2031
+ os.makedirs(empty_dir, exist_ok=True)
2032
+ empty_summary = folder_summary(empty_dir, print_summary=False)
2033
+ assert empty_summary['n_files'] == 0
2034
+ assert empty_summary['n_folders'] == 0
2035
+ assert empty_summary['extension_to_count'] == {}
2036
+
2037
+
2038
+ def test_fileparts(self):
2039
+ """
2040
+ Test the fileparts function.
2041
+ """
2042
+
2043
+ assert fileparts('file') == ('', 'file', '')
2044
+ assert fileparts('file.txt') == ('', 'file', '.txt')
2045
+ assert fileparts(r'c:/dir/file.jpg') == ('c:/dir', 'file', '.jpg')
2046
+ assert fileparts('/dir/subdir/file.jpg') == ('/dir/subdir', 'file', '.jpg')
2047
+ assert fileparts(r'c:\dir\file') == (r'c:\dir', 'file', '')
2048
+ assert fileparts(r'c:\dir\file.tar.gz') == (r'c:\dir', 'file.tar', '.gz')
2049
+ assert fileparts('.bashrc') == ('', '.bashrc', '') # Hidden file, no extension
2050
+ assert fileparts('nodir/.bashrc') == ('nodir', '.bashrc', '')
2051
+ assert fileparts('a/b/c.d.e') == ('a/b', 'c.d', '.e')
2052
+
2053
+
2054
+ def test_insert_before_extension(self):
2055
+ """
2056
+ Test the insert_before_extension function.
2057
+ """
2058
+
2059
+ assert insert_before_extension('file.ext', 'inserted') == 'file.inserted.ext'
2060
+ assert insert_before_extension('file', 'inserted') == 'file.inserted'
2061
+ assert insert_before_extension('path/to/file.ext', 'tag') == 'path/to/file.tag.ext'
2062
+ assert insert_before_extension('path/to/file', 'tag') == 'path/to/file.tag'
2063
+ assert insert_before_extension('file.tar.gz', 'new') == 'file.tar.new.gz'
2064
+
2065
+ # Test with custom separator
2066
+ assert insert_before_extension('file.ext', 'inserted', separator='_') == 'file_inserted.ext'
2067
+
2068
+ # Test with s=None (timestamp) - check format roughly
2069
+ fname_with_ts = insert_before_extension('file.ext', None)
2070
+ parts = fname_with_ts.split('.')
2071
+ # file.YYYY.MM.DD.HH.MM.SS.ext
2072
+ assert len(parts) >= 8 # file, Y, M, D, H, M, S, ext
2073
+ assert parts[0] == 'file'
2074
+ assert parts[-1] == 'ext'
2075
+ assert all(p.isdigit() for p in parts[1:-1])
2076
+
2077
+ fname_no_ext_ts = insert_before_extension('file', '') # s is empty string, should also use timestamp
2078
+ parts_no_ext = fname_no_ext_ts.split('.')
2079
+ assert len(parts_no_ext) >= 7 # file, Y, M, D, H, M, S
2080
+ assert parts_no_ext[0] == 'file'
2081
+ assert all(p.isdigit() for p in parts_no_ext[1:])
2082
+
2083
+
2084
+ def test_split_path(self):
2085
+ """
2086
+ Test the split_path function.
2087
+ """
2088
+
2089
+ if os.name == 'nt':
2090
+ assert split_path(r'c:\dir\subdir\file.txt') == ['c:\\', 'dir', 'subdir', 'file.txt']
2091
+ assert split_path('c:\\') == ['c:\\']
2092
+ # Test with mixed slashes, ntpath.split handles them
2093
+ assert split_path(r'c:/dir/subdir/file.txt') == ['c:/', 'dir', 'subdir', 'file.txt']
2094
+ else: # POSIX
2095
+ assert split_path('/dir/subdir/file.jpg') == ['/', 'dir', 'subdir', 'file.jpg']
2096
+ assert split_path('/') == ['/']
2097
+
2098
+ assert split_path('dir/file.txt') == ['dir', 'file.txt']
2099
+ assert split_path('file.txt') == ['file.txt']
2100
+ assert split_path('') == ''
2101
+ assert split_path('.') == ['.']
2102
+ assert split_path('..') == ['..']
2103
+ assert split_path('../a/b') == ['..', 'a', 'b']
2104
+
2105
+
2106
+ def test_path_is_abs(self):
2107
+ """
2108
+ Test the path_is_abs function.
2109
+ """
2110
+
2111
+ assert path_is_abs('/absolute/path')
2112
+ assert path_is_abs('c:/absolute/path')
2113
+ assert path_is_abs('C:\\absolute\\path')
2114
+ assert path_is_abs('\\\\server\\share\\path') # UNC path
2115
+ assert path_is_abs('c:file_without_slash_after_drive')
2116
+
2117
+ assert not path_is_abs('relative/path')
2118
+ assert not path_is_abs('file.txt')
2119
+ assert not path_is_abs('../relative')
2120
+ assert not path_is_abs('')
2121
+
2122
+
2123
+
2124
+ def test_safe_create_link_unix(self):
2125
+ """
2126
+ Test the safe_create_link function on Unix-like systems.
2127
+ """
2128
+
2129
+ if os.name == 'nt':
2130
+ # print("Skipping test_safe_create_link_unix on Windows.")
2131
+ return
2132
+
2133
+ source_file_path = os.path.join(self.test_dir, 'source.txt')
2134
+ link_path = os.path.join(self.test_dir, 'link.txt')
2135
+ other_source_path = os.path.join(self.test_dir, 'other_source.txt')
2136
+
2137
+ with open(source_file_path, 'w') as f:
2138
+ f.write('source data')
2139
+ with open(other_source_path, 'w') as f:
2140
+ f.write('other data')
2141
+
2142
+ # Create new link
2143
+ safe_create_link(source_file_path, link_path)
2144
+ assert os.path.islink(link_path)
2145
+ assert os.readlink(link_path) == source_file_path
2146
+
2147
+ # Link already exists and points to the correct source
2148
+ safe_create_link(source_file_path, link_path) # Should do nothing
2149
+ assert os.path.islink(link_path)
2150
+ assert os.readlink(link_path) == source_file_path
2151
+
2152
+ # Link already exists but points to a different source
2153
+ safe_create_link(other_source_path, link_path) # Should remove and re-create
2154
+ assert os.path.islink(link_path)
2155
+ assert os.readlink(link_path) == other_source_path
2156
+
2157
+ # Link_new path exists and is a file (not a link)
2158
+ file_path_conflict = os.path.join(self.test_dir, 'conflict_file.txt')
2159
+ with open(file_path_conflict, 'w') as f:
2160
+ f.write('actual file')
2161
+ try:
2162
+ safe_create_link(source_file_path, file_path_conflict)
2163
+ raise AssertionError("AssertionError not raised for file conflict")
2164
+ except AssertionError:
2165
+ pass
2166
+ os.remove(file_path_conflict)
2167
+
2168
+ # Link_new path exists and is a directory
2169
+ dir_path_conflict = os.path.join(self.test_dir, 'conflict_dir')
2170
+ os.makedirs(dir_path_conflict, exist_ok=True)
2171
+ try:
2172
+ safe_create_link(source_file_path, dir_path_conflict)
2173
+ raise AssertionError("AssertionError not raised for directory conflict")
2174
+ except AssertionError: # islink will be false
2175
+ pass
2176
+ shutil.rmtree(dir_path_conflict)
2177
+
2178
+
2179
+ def test_remove_empty_folders(self):
2180
+ """
2181
+ Test the remove_empty_folders function.
2182
+ """
2183
+
2184
+ # test_dir/
2185
+ # empty_top/
2186
+ # empty_mid/
2187
+ # empty_leaf/
2188
+ # mixed_top/
2189
+ # empty_mid_in_mixed/
2190
+ # empty_leaf_in_mixed/
2191
+ # non_empty_mid/
2192
+ # file.txt
2193
+ # non_empty_top/
2194
+ # file_in_top.txt
2195
+
2196
+ empty_top = os.path.join(self.test_dir, 'empty_top')
2197
+ empty_mid = os.path.join(empty_top, 'empty_mid')
2198
+ empty_leaf = os.path.join(empty_mid, 'empty_leaf')
2199
+ os.makedirs(empty_leaf, exist_ok=True)
2200
+
2201
+ mixed_top = os.path.join(self.test_dir, 'mixed_top')
2202
+ empty_mid_in_mixed = os.path.join(mixed_top, 'empty_mid_in_mixed')
2203
+ empty_leaf_in_mixed = os.path.join(empty_mid_in_mixed, 'empty_leaf_in_mixed')
2204
+ os.makedirs(empty_leaf_in_mixed, exist_ok=True)
2205
+ non_empty_mid = os.path.join(mixed_top, 'non_empty_mid')
2206
+ os.makedirs(non_empty_mid, exist_ok=True)
2207
+ with open(os.path.join(non_empty_mid, 'file.txt'), 'w') as f:
2208
+ f.write('data')
2209
+
2210
+ non_empty_top = os.path.join(self.test_dir, 'non_empty_top')
2211
+ os.makedirs(non_empty_top, exist_ok=True)
2212
+ with open(os.path.join(non_empty_top, 'file_in_top.txt'), 'w') as f:
2213
+ f.write('data')
2214
+
2215
+ # Process empty_top - should remove all three
2216
+ remove_empty_folders(empty_top, remove_root=True)
2217
+ assert not os.path.exists(empty_top)
2218
+ assert not os.path.exists(empty_mid)
2219
+ assert not os.path.exists(empty_leaf)
2220
+
2221
+ # Process mixed_top; should remove empty_leaf_in_mixed and empty_mid_in_mixed
2222
+ # but not mixed_top or non_empty_mid.
2223
+ remove_empty_folders(mixed_top, remove_root=True)
2224
+ assert os.path.exists(mixed_top) # mixed_top itself should remain
2225
+ assert not os.path.exists(empty_mid_in_mixed)
2226
+ assert not os.path.exists(empty_leaf_in_mixed)
2227
+ assert os.path.exists(non_empty_mid)
2228
+ assert os.path.exists(os.path.join(non_empty_mid, 'file.txt'))
2229
+
2230
+ # Process non_empty_top; should remove nothing.
2231
+ remove_empty_folders(non_empty_top, remove_root=True)
2232
+ assert os.path.exists(non_empty_top)
2233
+ assert os.path.exists(os.path.join(non_empty_top, 'file_in_top.txt'))
2234
+
2235
+ # Test with a file path (should do nothing and return False)
2236
+ file_path_for_removal = os.path.join(self.test_dir, 'a_file.txt')
2237
+ with open(file_path_for_removal, 'w') as f: f.write('t')
2238
+ assert not remove_empty_folders(file_path_for_removal, remove_root=True)
2239
+ assert os.path.exists(file_path_for_removal)
2240
+
2241
+ # Test with remove_root=False for the top level
2242
+ another_empty_top = os.path.join(self.test_dir, 'another_empty_top')
2243
+ another_empty_mid = os.path.join(another_empty_top, 'another_empty_mid')
2244
+ os.makedirs(another_empty_mid)
2245
+ remove_empty_folders(another_empty_top, remove_root=False)
2246
+ assert os.path.exists(another_empty_top) # Root not removed
2247
+ assert not os.path.exists(another_empty_mid) # Mid removed
2248
+
2249
+
2250
+ def test_path_join(self):
2251
+ """
2252
+ Test the path_join function.
2253
+ """
2254
+
2255
+ assert path_join('a', 'b', 'c') == 'a/b/c'
2256
+ assert path_join('a/b', 'c', 'd.txt') == 'a/b/c/d.txt'
2257
+ if os.name == 'nt':
2258
+ # On Windows, os.path.join uses '\', so convert_slashes=True should change it
2259
+ assert path_join('a', 'b', convert_slashes=True) == 'a/b'
2260
+ assert path_join('a', 'b', convert_slashes=False) == 'a\\b'
2261
+ assert path_join('c:\\', 'foo', 'bar', convert_slashes=True) == 'c:/foo/bar'
2262
+ assert path_join('c:\\', 'foo', 'bar', convert_slashes=False) == 'c:\\foo\\bar'
2263
+ else:
2264
+ # On POSIX, os.path.join uses '/', so convert_slashes=False should still be '/'
2265
+ assert path_join('a', 'b', convert_slashes=False) == 'a/b'
2266
+
2267
+ assert path_join('a', '', 'b') == 'a/b' # os.path.join behavior
2268
+ assert path_join('/a', 'b') == '/a/b'
2269
+ assert path_join('a', '/b') == '/b' # '/b' is absolute
2270
+
2271
+
2272
+ def test_filename_cleaning(self):
2273
+ """
2274
+ Test clean_filename, clean_path, and flatten_path functions.
2275
+ """
2276
+
2277
+ # clean_filename
2278
+ assert clean_filename("test file.txt") == "test file.txt"
2279
+ assert clean_filename("test*file?.txt", char_limit=10) == "testfile.t"
2280
+ assert clean_filename("TestFile.TXT", force_lower=True) == "testfile.txt"
2281
+ assert clean_filename("file:with<illegal>chars.txt") == "filewithillegalchars.txt"
2282
+
2283
+ s = " accented_name_éà.txt"
2284
+
2285
+ assert clean_filename(s,
2286
+ remove_trailing_leading_whitespace=False) == " accented_name_ea.txt", \
2287
+ 'clean_filename with remove_trailing_leading_whitespace=False: {}'.format(
2288
+ clean_filename(s, remove_trailing_leading_whitespace=False))
2289
+
2290
+ assert clean_filename(s, remove_trailing_leading_whitespace=True) == "accented_name_ea.txt", \
2291
+ 'clean_filename with remove_trailing_leading_whitespace=False: {}'.format(
2292
+ clean_filename(s, remove_trailing_leading_whitespace=True))
2293
+
2294
+ # Separators are not allowed by default in clean_filename
2295
+ assert clean_filename("path/to/file.txt") == "pathtofile.txt"
2296
+
2297
+ # clean_path
2298
+ assert clean_path("path/to/file.txt") == "path/to/file.txt" # slashes allowed
2299
+ assert clean_path("path\\to\\file.txt") == "path\\to\\file.txt" # backslashes allowed
2300
+ assert clean_path("path:to:file.txt") == "path:to:file.txt" # colons allowed
2301
+ assert clean_path("path/to<illegal>/file.txt") == "path/toillegal/file.txt"
2302
+
2303
+ # flatten_path
2304
+ assert flatten_path("path/to/file.txt") == "path~to~file.txt"
2305
+ assert flatten_path("path:to:file.txt", separator_char_replacement='_') == "path_to_file.txt"
2306
+ assert flatten_path("path\\to/file:name.txt") == "path~to~file~name.txt"
2307
+ assert flatten_path("path/to<illegal>/file.txt") == "path~toillegal~file.txt"
2308
+
2309
+
2310
+ def test_is_executable(self):
2311
+ """
2312
+ Test the is_executable function.
2313
+ This is a basic test; comprehensive testing is environment-dependent.
2314
+ """
2315
+
2316
+ # Hard to test reliably across all systems without knowing what's on PATH.
2317
+ if os.name == 'nt':
2318
+ assert is_executable('cmd.exe')
2319
+ assert not is_executable('non_existent_executable_blah_blah')
2320
+ else:
2321
+ assert is_executable('ls')
2322
+ assert is_executable('sh')
2323
+ assert not is_executable('non_existent_executable_blah_blah')
2324
+
2325
+
2326
+ def test_write_read_list_to_file(self):
2327
+ """
2328
+ Test write_list_to_file and read_list_from_file functions.
2329
+ """
2330
+
2331
+ test_list = ["item1", "item2 with space", "item3/with/slash"]
2332
+
2333
+ # Test with .json
2334
+ json_file_path = os.path.join(self.test_dir, "test_list.json")
2335
+ write_list_to_file(json_file_path, test_list)
2336
+ read_list_json = read_list_from_file(json_file_path)
2337
+ assert test_list == read_list_json
2338
+
2339
+ # Test with .txt
2340
+ txt_file_path = os.path.join(self.test_dir, "test_list.txt")
2341
+ write_list_to_file(txt_file_path, test_list)
2342
+ # read_list_from_file is specifically for JSON, so we read .txt manually
2343
+ with open(txt_file_path, 'r') as f:
2344
+ read_list_txt = [line.strip() for line in f.readlines()]
2345
+ assert test_list == read_list_txt
2346
+
2347
+ # Test reading non-existent json
2348
+ try:
2349
+ read_list_from_file(os.path.join(self.test_dir,"non_existent.json"))
2350
+ raise AssertionError("FileNotFoundError not raised")
2351
+ except FileNotFoundError:
2352
+ pass
2353
+
2354
+ # Test reading a non-json file with read_list_from_file (should fail parsing)
2355
+ non_json_path = os.path.join(self.test_dir, "not_a_list.json")
2356
+ with open(non_json_path, 'w') as f: f.write("this is not json")
2357
+ try:
2358
+ read_list_from_file(non_json_path)
2359
+ raise AssertionError("json.JSONDecodeError not raised")
2360
+ except json.JSONDecodeError:
2361
+ pass
2362
+
2363
+
2364
+ def test_parallel_copy_files(self):
2365
+ """
2366
+ Test the parallel_copy_files function (with max_workers=1 for test simplicity).
2367
+ """
2368
+
2369
+ source_dir = os.path.join(self.test_dir, "copy_source")
2370
+ target_dir = os.path.join(self.test_dir, "copy_target")
2371
+ os.makedirs(source_dir, exist_ok=True)
2372
+
2373
+ file_mappings = {}
2374
+ source_files_content = {}
2375
+
2376
+ for i in range(3):
2377
+ src_fn = f"file{i}.txt"
2378
+ src_path = os.path.join(source_dir, src_fn)
2379
+ if i == 0:
2380
+ tgt_fn = f"copied_file{i}.txt"
2381
+ tgt_path = os.path.join(target_dir, tgt_fn)
2382
+ else:
2383
+ tgt_fn = f"copied_file{i}_subdir.txt"
2384
+ tgt_path = os.path.join(target_dir, f"sub{i}", tgt_fn)
2385
+
2386
+ content = f"content of file {i}"
2387
+ with open(src_path, 'w') as f:
2388
+ f.write(content)
2389
+
2390
+ file_mappings[src_path] = tgt_path
2391
+ source_files_content[tgt_path] = content
2392
+
2393
+ # Test copy
2394
+ parallel_copy_files(file_mappings, max_workers=1, use_threads=True, overwrite=False)
2395
+ for tgt_path, expected_content in source_files_content.items():
2396
+ assert os.path.exists(tgt_path)
2397
+ with open(tgt_path, 'r') as f:
2398
+ assert f.read() == expected_content
2399
+
2400
+ existing_target_path = list(source_files_content.keys())[0]
2401
+ with open(existing_target_path, 'w') as f:
2402
+ f.write("old content")
2403
+
2404
+ parallel_copy_files(file_mappings, max_workers=1, use_threads=True, overwrite=False)
2405
+ with open(existing_target_path, 'r') as f:
2406
+ assert f.read() == "old content"
2407
+
2408
+ parallel_copy_files(file_mappings, max_workers=1, use_threads=True, overwrite=True)
2409
+ with open(existing_target_path, 'r') as f:
2410
+ assert f.read() == source_files_content[existing_target_path]
2411
+
2412
+ for src_path_orig, tgt_path_orig in file_mappings.items(): # Re-create source for move
2413
+ with open(src_path_orig, 'w') as f:
2414
+ f.write(source_files_content[tgt_path_orig])
2415
+
2416
+ parallel_copy_files(file_mappings, max_workers=1, use_threads=True, move=True, overwrite=True)
2417
+ for src_path, tgt_path in file_mappings.items():
2418
+ assert not os.path.exists(src_path)
2419
+ assert os.path.exists(tgt_path)
2420
+ with open(tgt_path, 'r') as f:
2421
+ assert f.read() == source_files_content[tgt_path]
2422
+
2423
+
2424
+ def test_get_file_sizes(self):
2425
+ """
2426
+ Test get_file_sizes and parallel_get_file_sizes functions.
2427
+ """
2428
+
2429
+ file_sizes_test_dir = os.path.join(self.test_dir,'file_sizes')
2430
+ os.makedirs(file_sizes_test_dir,exist_ok=True)
2431
+
2432
+ f1_path = os.path.join(file_sizes_test_dir, 'file1.txt')
2433
+ content1 = "0123456789" # 10 bytes
2434
+ with open(f1_path, 'w') as f:
2435
+ f.write(content1)
2436
+
2437
+ subdir_path = os.path.join(file_sizes_test_dir, 'subdir')
2438
+ os.makedirs(subdir_path, exist_ok=True)
2439
+ f2_path = os.path.join(subdir_path, 'file2.txt')
2440
+ content2 = "01234567890123456789" # 20 bytes
2441
+ with open(f2_path, 'w') as f:
2442
+ f.write(content2)
2443
+
2444
+ sizes_relative = get_file_sizes(file_sizes_test_dir)
2445
+ expected_sizes_relative = {
2446
+ 'file1.txt': len(content1),
2447
+ os.path.join('subdir', 'file2.txt').replace('\\','/'): len(content2)
2448
+ }
2449
+ assert sizes_relative == expected_sizes_relative
2450
+
2451
+ file_list_abs = [f1_path, f2_path]
2452
+ sizes_parallel_abs = parallel_get_file_sizes(file_list_abs, max_workers=1)
2453
+ expected_sizes_parallel_abs = {
2454
+ f1_path.replace('\\','/'): len(content1),
2455
+ f2_path.replace('\\','/'): len(content2)
2456
+ }
2457
+ assert sizes_parallel_abs == expected_sizes_parallel_abs
2458
+
2459
+ sizes_parallel_folder_abs = parallel_get_file_sizes(file_sizes_test_dir,
2460
+ max_workers=1,
2461
+ return_relative_paths=False)
2462
+ assert sizes_parallel_folder_abs == expected_sizes_parallel_abs
2463
+
2464
+ sizes_parallel_folder_rel = parallel_get_file_sizes(file_sizes_test_dir,
2465
+ max_workers=1,
2466
+ return_relative_paths=True)
2467
+ assert sizes_parallel_folder_rel == expected_sizes_relative
2468
+
2469
+ non_existent_file = os.path.join(file_sizes_test_dir, "no_such_file.txt")
2470
+ sizes_with_error = parallel_get_file_sizes([f1_path, non_existent_file],
2471
+ max_workers=1)
2472
+ expected_with_error = {
2473
+ f1_path.replace('\\','/'): len(content1),
2474
+ non_existent_file.replace('\\','/'): None
2475
+ }
2476
+ assert sizes_with_error == expected_with_error
2477
+
2478
+
2479
+ def test_zip_file_and_unzip_file(self):
2480
+ """
2481
+ Test zip_file and unzip_file functions.
2482
+ """
2483
+
2484
+ file_to_zip_name = "test_zip_me.txt"
2485
+ file_to_zip_path = os.path.join(self.test_dir, file_to_zip_name)
2486
+ content = "This is the content to be zipped."
2487
+ with open(file_to_zip_path, 'w') as f:
2488
+ f.write(content)
2489
+
2490
+ default_zip_output_path = file_to_zip_path + ".zip"
2491
+ returned_zip_path = zip_file(file_to_zip_path)
2492
+ assert returned_zip_path == default_zip_output_path
2493
+ assert os.path.exists(default_zip_output_path)
2494
+
2495
+ unzip_dir_default = os.path.join(self.test_dir, "unzip_default")
2496
+ os.makedirs(unzip_dir_default, exist_ok=True)
2497
+ unzip_file(default_zip_output_path, unzip_dir_default)
2498
+ unzipped_file_path_default = os.path.join(unzip_dir_default, file_to_zip_name)
2499
+ assert os.path.exists(unzipped_file_path_default)
2500
+ with open(unzipped_file_path_default, 'r') as f:
2501
+ assert f.read() == content
2502
+
2503
+ custom_zip_output_name = "custom_archive.zip"
2504
+ custom_zip_output_path = os.path.join(self.test_dir, custom_zip_output_name)
2505
+ zip_file(file_to_zip_path, output_fn=custom_zip_output_path, overwrite=True)
2506
+ assert os.path.exists(custom_zip_output_path)
2507
+
2508
+ zip_in_subdir_path = os.path.join(self.test_dir, "subdir_zip", "my.zip")
2509
+ file_in_subdir_name = "file_for_subdir_zip.txt"
2510
+ file_in_subdir_path = os.path.join(self.test_dir,"subdir_zip", file_in_subdir_name)
2511
+ os.makedirs(os.path.dirname(zip_in_subdir_path), exist_ok=True)
2512
+ with open(file_in_subdir_path, "w") as f: f.write("sub dir content")
2513
+ zip_file(file_in_subdir_path, output_fn=zip_in_subdir_path)
2514
+
2515
+ unzip_file(zip_in_subdir_path, output_folder=None)
2516
+ unzipped_in_same_dir_path = os.path.join(os.path.dirname(zip_in_subdir_path), file_in_subdir_name)
2517
+ assert os.path.exists(unzipped_in_same_dir_path)
2518
+ with open(unzipped_in_same_dir_path, 'r') as f:
2519
+ assert f.read() == "sub dir content"
2520
+
2521
+
2522
+ def test_zip_folder(self):
2523
+ """
2524
+ Test the zip_folder function.
2525
+ """
2526
+
2527
+ folder_to_zip = os.path.join(self.test_dir, "folder_to_zip")
2528
+ os.makedirs(folder_to_zip, exist_ok=True)
2529
+
2530
+ file1_name = "file1.txt"; path1 = os.path.join(folder_to_zip, file1_name)
2531
+ file2_name = "file2.log"; path2 = os.path.join(folder_to_zip, file2_name)
2532
+ subdir_name = "sub"; subdir_path = os.path.join(folder_to_zip, subdir_name)
2533
+ os.makedirs(subdir_path, exist_ok=True)
2534
+ file3_name = "file3.dat"; path3 = os.path.join(subdir_path, file3_name)
2535
+
2536
+ content1 = "content1"; content2 = "content2"; content3 = "content3"
2537
+ with open(path1, 'w') as f: f.write(content1)
2538
+ with open(path2, 'w') as f: f.write(content2)
2539
+ with open(path3, 'w') as f: f.write(content3)
2540
+
2541
+ default_zip_path = folder_to_zip + ".zip"
2542
+ zip_folder(folder_to_zip, output_fn=None, overwrite=True)
2543
+ assert os.path.exists(default_zip_path)
2544
+
2545
+ unzip_output_dir = os.path.join(self.test_dir, "unzipped_folder_content")
2546
+ os.makedirs(unzip_output_dir, exist_ok=True)
2547
+ unzip_file(default_zip_path, unzip_output_dir)
2548
+
2549
+ assert os.path.exists(os.path.join(unzip_output_dir, file1_name))
2550
+ assert os.path.exists(os.path.join(unzip_output_dir, file2_name))
2551
+ assert os.path.exists(os.path.join(unzip_output_dir, subdir_name, file3_name))
2552
+ with open(os.path.join(unzip_output_dir, file1_name), 'r')as f: assert f.read() == content1
2553
+ with open(os.path.join(unzip_output_dir, file2_name), 'r')as f: assert f.read() == content2
2554
+ with open(os.path.join(unzip_output_dir, subdir_name, file3_name), 'r')as f: assert f.read() == content3
2555
+
2556
+ mtime_before = os.path.getmtime(default_zip_path)
2557
+ zip_folder(folder_to_zip, output_fn=None, overwrite=False)
2558
+ mtime_after = os.path.getmtime(default_zip_path)
2559
+ assert mtime_before == mtime_after
2560
+
2561
+
2562
+ def test_zip_files_into_single_zipfile(self):
2563
+ """
2564
+ Test zip_files_into_single_zipfile.
2565
+ """
2566
+
2567
+ file1_path = os.path.join(self.test_dir, "zfs_file1.txt")
2568
+ content1 = "content for zfs1"
2569
+ with open(file1_path, 'w') as f: f.write(content1)
2570
+
2571
+ subdir_for_zfs = os.path.join(self.test_dir, "zfs_subdir")
2572
+ os.makedirs(subdir_for_zfs, exist_ok=True)
2573
+ file2_path = os.path.join(subdir_for_zfs, "zfs_file2.log")
2574
+ content2 = "content for zfs2"
2575
+ with open(file2_path, 'w') as f: f.write(content2)
2576
+
2577
+ input_files = [file1_path, file2_path]
2578
+ output_zip_path = os.path.join(self.test_dir, "multi_file_archive.zip")
2579
+ zip_files_into_single_zipfile(input_files, output_zip_path, arc_name_base=self.test_dir, overwrite=True)
2580
+ assert os.path.exists(output_zip_path)
2581
+
2582
+ unzip_dir = os.path.join(self.test_dir, "unzip_multi_file")
2583
+ os.makedirs(unzip_dir, exist_ok=True)
2584
+ unzip_file(output_zip_path, unzip_dir)
2585
+
2586
+ expected_unzipped_file1 = os.path.join(unzip_dir, os.path.relpath(file1_path, self.test_dir))
2587
+ expected_unzipped_file2 = os.path.join(unzip_dir, os.path.relpath(file2_path, self.test_dir))
2588
+
2589
+ assert os.path.exists(expected_unzipped_file1)
2590
+ with open(expected_unzipped_file1, 'r') as f: assert f.read() == content1
2591
+ assert os.path.exists(expected_unzipped_file2)
2592
+ assert os.path.basename(expected_unzipped_file2) == "zfs_file2.log"
2593
+ assert os.path.basename(os.path.dirname(expected_unzipped_file2)) == "zfs_subdir"
2594
+ with open(expected_unzipped_file2, 'r') as f: assert f.read() == content2
2595
+
2596
+
2597
+ def test_add_files_to_single_tar_file(self):
2598
+ """
2599
+ Test add_files_to_single_tar_file.
2600
+ """
2601
+
2602
+ file1_path = os.path.join(self.test_dir, "tar_file1.txt")
2603
+ content1 = "content for tar1"
2604
+ with open(file1_path, 'w') as f: f.write(content1)
2605
+
2606
+ subdir_for_tar = os.path.join(self.test_dir, "tar_subdir")
2607
+ os.makedirs(subdir_for_tar, exist_ok=True)
2608
+ file2_path = os.path.join(subdir_for_tar, "tar_file2.log")
2609
+ content2 = "content for tar2"
2610
+ with open(file2_path, 'w') as f: f.write(content2)
2611
+
2612
+ input_files = [file1_path, file2_path]
2613
+ output_tar_path = os.path.join(self.test_dir, "archive.tar.gz")
2614
+
2615
+ add_files_to_single_tar_file(input_files, output_tar_path, arc_name_base=self.test_dir,
2616
+ overwrite=True, mode='x:gz')
2617
+ assert os.path.exists(output_tar_path)
2618
+
2619
+ un_tar_dir = os.path.join(self.test_dir, "un_tar_contents")
2620
+ os.makedirs(un_tar_dir, exist_ok=True)
2621
+ with tarfile.open(output_tar_path, 'r:gz') as tf:
2622
+ # The "filter" option was added as of Python 3.12, and *not* specifying
2623
+ # filter=None will change behavior as of Python 3.14. We want the unmodified
2624
+ # behavior, but we want to support Python <3.12, so we do a version check.
2625
+ if sys.version_info >= (3, 12):
2626
+ tf.extractall(path=un_tar_dir, filter=None)
2627
+ else:
2628
+ tf.extractall(path=un_tar_dir)
2629
+
2630
+ expected_untarred_file1 = os.path.join(un_tar_dir, os.path.relpath(file1_path, self.test_dir))
2631
+ expected_untarred_file2 = os.path.join(un_tar_dir, os.path.relpath(file2_path, self.test_dir))
2632
+
2633
+ assert os.path.exists(expected_untarred_file1)
2634
+ with open(expected_untarred_file1, 'r') as f: assert f.read() == content1
2635
+ assert os.path.exists(expected_untarred_file2)
2636
+ with open(expected_untarred_file2, 'r') as f: assert f.read() == content2
2637
+
2638
+
2639
+ def test_parallel_zip_individual_files_and_folders(self):
2640
+ """
2641
+ Test parallel_zip_files, parallel_zip_folders, and zip_each_file_in_folder.
2642
+ """
2643
+
2644
+ file1_to_zip = os.path.join(self.test_dir, "pz_file1.txt")
2645
+ file2_to_zip = os.path.join(self.test_dir, "pz_file2.txt")
2646
+ with open(file1_to_zip, 'w') as f: f.write("pz_content1")
2647
+ with open(file2_to_zip, 'w') as f: f.write("pz_content2")
2648
+
2649
+ parallel_zip_files([file1_to_zip, file2_to_zip], max_workers=1, overwrite=True)
2650
+ assert os.path.exists(file1_to_zip + ".zip")
2651
+ assert os.path.exists(file2_to_zip + ".zip")
2652
+ unzip_dir_pz = os.path.join(self.test_dir, "unzip_pz")
2653
+ unzip_file(file1_to_zip + ".zip", unzip_dir_pz)
2654
+ assert os.path.exists(os.path.join(unzip_dir_pz, os.path.basename(file1_to_zip)))
2655
+
2656
+ folder1_to_zip = os.path.join(self.test_dir, "pz_folder1")
2657
+ os.makedirs(folder1_to_zip, exist_ok=True)
2658
+ with open(os.path.join(folder1_to_zip, "pf1.txt"), 'w') as f: f.write("pf1_content")
2659
+ folder2_to_zip = os.path.join(self.test_dir, "pz_folder2")
2660
+ os.makedirs(folder2_to_zip, exist_ok=True)
2661
+ with open(os.path.join(folder2_to_zip, "pf2.txt"), 'w') as f: f.write("pf2_content")
2662
+
2663
+ parallel_zip_folders([folder1_to_zip, folder2_to_zip], max_workers=1, overwrite=True)
2664
+ assert os.path.exists(folder1_to_zip + ".zip")
2665
+ assert os.path.exists(folder2_to_zip + ".zip")
2666
+ unzip_dir_pzf = os.path.join(self.test_dir, "unzip_pzf")
2667
+ unzip_file(folder1_to_zip + ".zip", unzip_dir_pzf)
2668
+ assert os.path.exists(os.path.join(unzip_dir_pzf, "pf1.txt"))
2669
+
2670
+ zef_folder = os.path.join(self.test_dir, "zef_test_folder")
2671
+ os.makedirs(zef_folder, exist_ok=True)
2672
+ zef_file1 = os.path.join(zef_folder, "zef1.txt")
2673
+ zef_file2_png = os.path.join(zef_folder, "zef2.png")
2674
+ zef_file3_zip = os.path.join(zef_folder, "zef3.zip")
2675
+ zef_subdir = os.path.join(zef_folder, "zef_sub")
2676
+ os.makedirs(zef_subdir, exist_ok=True)
2677
+ zef_file_in_sub = os.path.join(zef_subdir, "zef_subfile.txt")
2678
+
2679
+ for p_path in [zef_file1, zef_file2_png, zef_file3_zip, zef_file_in_sub]:
2680
+ with open(p_path, 'w') as f: f.write(f"content of {os.path.basename(p_path)}")
2681
+
2682
+ zip_each_file_in_folder(zef_folder, recursive=False, max_workers=1, overwrite=True)
2683
+ assert os.path.exists(zef_file1 + ".zip")
2684
+ assert os.path.exists(zef_file2_png + ".zip")
2685
+ assert not os.path.exists(zef_file3_zip + ".zip")
2686
+ assert not os.path.exists(zef_file_in_sub + ".zip")
2687
+
2688
+ if os.path.exists(zef_file1 + ".zip"): os.remove(zef_file1 + ".zip")
2689
+ if os.path.exists(zef_file2_png + ".zip"): os.remove(zef_file2_png + ".zip")
2690
+
2691
+ zip_each_file_in_folder(zef_folder, recursive=True, max_workers=1, overwrite=True)
2692
+ assert os.path.exists(zef_file1 + ".zip")
2693
+ assert os.path.exists(zef_file2_png + ".zip")
2694
+ assert not os.path.exists(zef_file3_zip + ".zip")
2695
+ assert os.path.exists(zef_file_in_sub + ".zip")
2696
+
2697
+ if os.path.exists(zef_file1 + ".zip"): os.remove(zef_file1 + ".zip")
2698
+ if os.path.exists(zef_file2_png + ".zip"): os.remove(zef_file2_png + ".zip")
2699
+ if os.path.exists(zef_file_in_sub + ".zip"): os.remove(zef_file_in_sub + ".zip")
2700
+ zip_each_file_in_folder(zef_folder, recursive=True, required_token="zef1", max_workers=1, overwrite=True)
2701
+ assert os.path.exists(zef_file1 + ".zip")
2702
+ assert not os.path.exists(zef_file2_png + ".zip")
2703
+ assert not os.path.exists(zef_file_in_sub + ".zip")
2704
+
2705
+ if os.path.exists(zef_file1 + ".zip"): os.remove(zef_file1 + ".zip")
2706
+ dummy_to_zip = os.path.join(zef_folder,"dummy.txt")
2707
+ with open(dummy_to_zip,'w') as f: f.write('d')
2708
+ zip_each_file_in_folder(zef_folder, recursive=False, exclude_zip=False, max_workers=1, overwrite=True)
2709
+ assert os.path.exists(dummy_to_zip + ".zip")
2710
+ assert os.path.exists(zef_file3_zip + ".zip")
2711
+ if os.path.exists(dummy_to_zip + ".zip"): os.remove(dummy_to_zip + ".zip")
2712
+ if os.path.exists(zef_file3_zip + ".zip"): os.remove(zef_file3_zip + ".zip")
2713
+
2714
+
2715
+ def test_compute_file_hash(self):
2716
+ """
2717
+ Test compute_file_hash and parallel_compute_file_hashes.
2718
+ """
2719
+
2720
+ file1_name = "hash_me1.txt"
2721
+ file1_path = os.path.join(self.test_dir, file1_name)
2722
+ content1 = "This is a test string for hashing."
2723
+ with open(file1_path, 'w') as f:
2724
+ f.write(content1)
2725
+
2726
+ file2_name = "hash_me2.txt"
2727
+ file2_path = os.path.join(self.test_dir, file2_name)
2728
+ with open(file2_path, 'w') as f:
2729
+ f.write(content1)
2730
+
2731
+ file3_name = "hash_me3.txt"
2732
+ file3_path = os.path.join(self.test_dir, file3_name)
2733
+ content3 = "This is a different test string for hashing."
2734
+ with open(file3_path, 'w') as f:
2735
+ f.write(content3)
2736
+
2737
+ expected_hash_content1_sha256 = \
2738
+ "c56f19d76df6a09e49fe0d9ce7b1bc7f1dbd582f668742bede65c54c47d5bcf4".lower()
2739
+ expected_hash_content3_sha256 = \
2740
+ "23013ff7e93264317f7b2fc0e9a217649f2dc0b11ca7e0bd49632424b70b6680".lower()
2741
+
2742
+ hash1 = compute_file_hash(file1_path)
2743
+ hash2 = compute_file_hash(file2_path)
2744
+ hash3 = compute_file_hash(file3_path)
2745
+ assert hash1 == expected_hash_content1_sha256
2746
+ assert hash2 == expected_hash_content1_sha256
2747
+ assert hash1 != hash3
2748
+ assert hash3 == expected_hash_content3_sha256
2749
+
2750
+ expected_hash_content1_md5 = "94b971f1f8cdb23c2af82af73160d4b0".lower()
2751
+ hash1_md5 = compute_file_hash(file1_path, algorithm='md5')
2752
+ assert hash1_md5 == expected_hash_content1_md5
2753
+
2754
+ non_existent_path = os.path.join(self.test_dir, "no_such_file.txt")
2755
+ assert compute_file_hash(non_existent_path, allow_failures=True) is None
2756
+ try:
2757
+ compute_file_hash(non_existent_path, allow_failures=False)
2758
+ raise AssertionError("FileNotFoundError not raised for compute_file_hash")
2759
+ except FileNotFoundError:
2760
+ pass
2761
+
2762
+ files_to_hash = [file1_path, file3_path, non_existent_path]
2763
+ hashes_parallel = parallel_compute_file_hashes(files_to_hash, max_workers=1)
2764
+
2765
+ norm_f1 = file1_path.replace('\\','/')
2766
+ norm_f3 = file3_path.replace('\\','/')
2767
+ norm_non = non_existent_path.replace('\\','/')
2768
+
2769
+ expected_parallel_hashes = {
2770
+ norm_f1: expected_hash_content1_sha256,
2771
+ norm_f3: expected_hash_content3_sha256,
2772
+ norm_non: None
2773
+ }
2774
+ hashes_parallel_norm = {k.replace('\\','/'): v for k,v in hashes_parallel.items()}
2775
+ assert hashes_parallel_norm == expected_parallel_hashes
2776
+
2777
+ hash_folder = os.path.join(self.test_dir, "hash_test_folder")
2778
+ os.makedirs(hash_folder, exist_ok=True)
2779
+ h_f1_name = "h_f1.txt"; h_f1_path = os.path.join(hash_folder, h_f1_name)
2780
+ h_f2_name = "h_f2.txt"; h_f2_path = os.path.join(hash_folder, h_f2_name)
2781
+ with open(h_f1_path, 'w') as f: f.write(content1)
2782
+ with open(h_f2_path, 'w') as f: f.write(content3)
2783
+
2784
+ hashes_folder_parallel = parallel_compute_file_hashes(hash_folder, recursive=False, max_workers=1)
2785
+ norm_hf1 = h_f1_path.replace('\\','/')
2786
+ norm_hf2 = h_f2_path.replace('\\','/')
2787
+ expected_folder_hashes = {
2788
+ norm_hf1: expected_hash_content1_sha256,
2789
+ norm_hf2: expected_hash_content3_sha256
2790
+ }
2791
+ hashes_folder_parallel_norm = {k.replace('\\','/'): v for k,v in hashes_folder_parallel.items()}
2792
+ assert hashes_folder_parallel_norm == expected_folder_hashes
2793
+
2794
+
2795
+ def test_path_utils():
2796
+ """
2797
+ Runs all tests in the TestPathUtils class.
2798
+ """
2799
+
2800
+ test_instance = TestPathUtils()
2801
+ test_instance.set_up()
2802
+
2803
+ try:
2804
+
2805
+ test_instance.test_is_image_file()
2806
+ test_instance.test_find_image_strings()
2807
+ test_instance.test_find_images()
2808
+ test_instance.test_recursive_file_list_and_file_list()
2809
+ test_instance.test_folder_list()
2810
+ test_instance.test_folder_summary()
2811
+ test_instance.test_fileparts()
2812
+ test_instance.test_insert_before_extension()
2813
+ test_instance.test_split_path()
2814
+ test_instance.test_path_is_abs()
2815
+ test_instance.test_safe_create_link_unix()
2816
+ test_instance.test_remove_empty_folders()
2817
+ test_instance.test_path_join()
2818
+ test_instance.test_filename_cleaning()
2819
+ test_instance.test_is_executable()
2820
+ test_instance.test_write_read_list_to_file()
2821
+ test_instance.test_parallel_copy_files()
2822
+ test_instance.test_get_file_sizes()
2823
+ test_instance.test_zip_file_and_unzip_file()
2824
+ test_instance.test_zip_folder()
2825
+ test_instance.test_zip_files_into_single_zipfile()
2826
+ test_instance.test_add_files_to_single_tar_file()
2827
+ test_instance.test_parallel_zip_individual_files_and_folders()
2828
+ test_instance.test_compute_file_hash()
2829
+
2830
+ finally:
2831
+
2832
+ test_instance.tear_down()