megadetector 5.0.9__py3-none-any.whl → 5.0.11__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of megadetector might be problematic. Click here for more details.
- {megadetector-5.0.9.dist-info → megadetector-5.0.11.dist-info}/LICENSE +0 -0
- {megadetector-5.0.9.dist-info → megadetector-5.0.11.dist-info}/METADATA +12 -11
- megadetector-5.0.11.dist-info/RECORD +5 -0
- megadetector-5.0.11.dist-info/top_level.txt +1 -0
- api/__init__.py +0 -0
- api/batch_processing/__init__.py +0 -0
- api/batch_processing/api_core/__init__.py +0 -0
- api/batch_processing/api_core/batch_service/__init__.py +0 -0
- api/batch_processing/api_core/batch_service/score.py +0 -439
- api/batch_processing/api_core/server.py +0 -294
- api/batch_processing/api_core/server_api_config.py +0 -98
- api/batch_processing/api_core/server_app_config.py +0 -55
- api/batch_processing/api_core/server_batch_job_manager.py +0 -220
- api/batch_processing/api_core/server_job_status_table.py +0 -152
- api/batch_processing/api_core/server_orchestration.py +0 -360
- api/batch_processing/api_core/server_utils.py +0 -92
- api/batch_processing/api_core_support/__init__.py +0 -0
- api/batch_processing/api_core_support/aggregate_results_manually.py +0 -46
- api/batch_processing/api_support/__init__.py +0 -0
- api/batch_processing/api_support/summarize_daily_activity.py +0 -152
- api/batch_processing/data_preparation/__init__.py +0 -0
- api/batch_processing/data_preparation/manage_local_batch.py +0 -2391
- api/batch_processing/data_preparation/manage_video_batch.py +0 -327
- api/batch_processing/integration/digiKam/setup.py +0 -6
- api/batch_processing/integration/digiKam/xmp_integration.py +0 -465
- api/batch_processing/integration/eMammal/test_scripts/config_template.py +0 -5
- api/batch_processing/integration/eMammal/test_scripts/push_annotations_to_emammal.py +0 -126
- api/batch_processing/integration/eMammal/test_scripts/select_images_for_testing.py +0 -55
- api/batch_processing/postprocessing/__init__.py +0 -0
- api/batch_processing/postprocessing/add_max_conf.py +0 -64
- api/batch_processing/postprocessing/categorize_detections_by_size.py +0 -163
- api/batch_processing/postprocessing/combine_api_outputs.py +0 -249
- api/batch_processing/postprocessing/compare_batch_results.py +0 -958
- api/batch_processing/postprocessing/convert_output_format.py +0 -397
- api/batch_processing/postprocessing/load_api_results.py +0 -195
- api/batch_processing/postprocessing/md_to_coco.py +0 -310
- api/batch_processing/postprocessing/md_to_labelme.py +0 -330
- api/batch_processing/postprocessing/merge_detections.py +0 -401
- api/batch_processing/postprocessing/postprocess_batch_results.py +0 -1904
- api/batch_processing/postprocessing/remap_detection_categories.py +0 -170
- api/batch_processing/postprocessing/render_detection_confusion_matrix.py +0 -661
- api/batch_processing/postprocessing/repeat_detection_elimination/find_repeat_detections.py +0 -211
- api/batch_processing/postprocessing/repeat_detection_elimination/remove_repeat_detections.py +0 -82
- api/batch_processing/postprocessing/repeat_detection_elimination/repeat_detections_core.py +0 -1631
- api/batch_processing/postprocessing/separate_detections_into_folders.py +0 -731
- api/batch_processing/postprocessing/subset_json_detector_output.py +0 -696
- api/batch_processing/postprocessing/top_folders_to_bottom.py +0 -223
- api/synchronous/__init__.py +0 -0
- api/synchronous/api_core/animal_detection_api/__init__.py +0 -0
- api/synchronous/api_core/animal_detection_api/api_backend.py +0 -152
- api/synchronous/api_core/animal_detection_api/api_frontend.py +0 -266
- api/synchronous/api_core/animal_detection_api/config.py +0 -35
- api/synchronous/api_core/animal_detection_api/data_management/annotations/annotation_constants.py +0 -47
- api/synchronous/api_core/animal_detection_api/detection/detector_training/copy_checkpoints.py +0 -43
- api/synchronous/api_core/animal_detection_api/detection/detector_training/model_main_tf2.py +0 -114
- api/synchronous/api_core/animal_detection_api/detection/process_video.py +0 -543
- api/synchronous/api_core/animal_detection_api/detection/pytorch_detector.py +0 -304
- api/synchronous/api_core/animal_detection_api/detection/run_detector.py +0 -627
- api/synchronous/api_core/animal_detection_api/detection/run_detector_batch.py +0 -1029
- api/synchronous/api_core/animal_detection_api/detection/run_inference_with_yolov5_val.py +0 -581
- api/synchronous/api_core/animal_detection_api/detection/run_tiled_inference.py +0 -754
- api/synchronous/api_core/animal_detection_api/detection/tf_detector.py +0 -165
- api/synchronous/api_core/animal_detection_api/detection/video_utils.py +0 -495
- api/synchronous/api_core/animal_detection_api/md_utils/azure_utils.py +0 -174
- api/synchronous/api_core/animal_detection_api/md_utils/ct_utils.py +0 -262
- api/synchronous/api_core/animal_detection_api/md_utils/directory_listing.py +0 -251
- api/synchronous/api_core/animal_detection_api/md_utils/matlab_porting_tools.py +0 -97
- api/synchronous/api_core/animal_detection_api/md_utils/path_utils.py +0 -416
- api/synchronous/api_core/animal_detection_api/md_utils/process_utils.py +0 -110
- api/synchronous/api_core/animal_detection_api/md_utils/sas_blob_utils.py +0 -509
- api/synchronous/api_core/animal_detection_api/md_utils/string_utils.py +0 -59
- api/synchronous/api_core/animal_detection_api/md_utils/url_utils.py +0 -144
- api/synchronous/api_core/animal_detection_api/md_utils/write_html_image_list.py +0 -226
- api/synchronous/api_core/animal_detection_api/md_visualization/visualization_utils.py +0 -841
- api/synchronous/api_core/tests/__init__.py +0 -0
- api/synchronous/api_core/tests/load_test.py +0 -110
- classification/__init__.py +0 -0
- classification/aggregate_classifier_probs.py +0 -108
- classification/analyze_failed_images.py +0 -227
- classification/cache_batchapi_outputs.py +0 -198
- classification/create_classification_dataset.py +0 -627
- classification/crop_detections.py +0 -516
- classification/csv_to_json.py +0 -226
- classification/detect_and_crop.py +0 -855
- classification/efficientnet/__init__.py +0 -9
- classification/efficientnet/model.py +0 -415
- classification/efficientnet/utils.py +0 -610
- classification/evaluate_model.py +0 -520
- classification/identify_mislabeled_candidates.py +0 -152
- classification/json_to_azcopy_list.py +0 -63
- classification/json_validator.py +0 -695
- classification/map_classification_categories.py +0 -276
- classification/merge_classification_detection_output.py +0 -506
- classification/prepare_classification_script.py +0 -194
- classification/prepare_classification_script_mc.py +0 -228
- classification/run_classifier.py +0 -286
- classification/save_mislabeled.py +0 -110
- classification/train_classifier.py +0 -825
- classification/train_classifier_tf.py +0 -724
- classification/train_utils.py +0 -322
- data_management/__init__.py +0 -0
- data_management/annotations/__init__.py +0 -0
- data_management/annotations/annotation_constants.py +0 -34
- data_management/camtrap_dp_to_coco.py +0 -238
- data_management/cct_json_utils.py +0 -395
- data_management/cct_to_md.py +0 -176
- data_management/cct_to_wi.py +0 -289
- data_management/coco_to_labelme.py +0 -272
- data_management/coco_to_yolo.py +0 -662
- data_management/databases/__init__.py +0 -0
- data_management/databases/add_width_and_height_to_db.py +0 -33
- data_management/databases/combine_coco_camera_traps_files.py +0 -206
- data_management/databases/integrity_check_json_db.py +0 -477
- data_management/databases/subset_json_db.py +0 -115
- data_management/generate_crops_from_cct.py +0 -149
- data_management/get_image_sizes.py +0 -188
- data_management/importers/add_nacti_sizes.py +0 -52
- data_management/importers/add_timestamps_to_icct.py +0 -79
- data_management/importers/animl_results_to_md_results.py +0 -158
- data_management/importers/auckland_doc_test_to_json.py +0 -372
- data_management/importers/auckland_doc_to_json.py +0 -200
- data_management/importers/awc_to_json.py +0 -189
- data_management/importers/bellevue_to_json.py +0 -273
- data_management/importers/cacophony-thermal-importer.py +0 -796
- data_management/importers/carrizo_shrubfree_2018.py +0 -268
- data_management/importers/carrizo_trail_cam_2017.py +0 -287
- data_management/importers/cct_field_adjustments.py +0 -57
- data_management/importers/channel_islands_to_cct.py +0 -913
- data_management/importers/eMammal/copy_and_unzip_emammal.py +0 -180
- data_management/importers/eMammal/eMammal_helpers.py +0 -249
- data_management/importers/eMammal/make_eMammal_json.py +0 -223
- data_management/importers/ena24_to_json.py +0 -275
- data_management/importers/filenames_to_json.py +0 -385
- data_management/importers/helena_to_cct.py +0 -282
- data_management/importers/idaho-camera-traps.py +0 -1407
- data_management/importers/idfg_iwildcam_lila_prep.py +0 -294
- data_management/importers/jb_csv_to_json.py +0 -150
- data_management/importers/mcgill_to_json.py +0 -250
- data_management/importers/missouri_to_json.py +0 -489
- data_management/importers/nacti_fieldname_adjustments.py +0 -79
- data_management/importers/noaa_seals_2019.py +0 -181
- data_management/importers/pc_to_json.py +0 -365
- data_management/importers/plot_wni_giraffes.py +0 -123
- data_management/importers/prepare-noaa-fish-data-for-lila.py +0 -359
- data_management/importers/prepare_zsl_imerit.py +0 -131
- data_management/importers/rspb_to_json.py +0 -356
- data_management/importers/save_the_elephants_survey_A.py +0 -320
- data_management/importers/save_the_elephants_survey_B.py +0 -332
- data_management/importers/snapshot_safari_importer.py +0 -758
- data_management/importers/snapshot_safari_importer_reprise.py +0 -665
- data_management/importers/snapshot_serengeti_lila.py +0 -1067
- data_management/importers/snapshotserengeti/make_full_SS_json.py +0 -150
- data_management/importers/snapshotserengeti/make_per_season_SS_json.py +0 -153
- data_management/importers/sulross_get_exif.py +0 -65
- data_management/importers/timelapse_csv_set_to_json.py +0 -490
- data_management/importers/ubc_to_json.py +0 -399
- data_management/importers/umn_to_json.py +0 -507
- data_management/importers/wellington_to_json.py +0 -263
- data_management/importers/wi_to_json.py +0 -441
- data_management/importers/zamba_results_to_md_results.py +0 -181
- data_management/labelme_to_coco.py +0 -548
- data_management/labelme_to_yolo.py +0 -272
- data_management/lila/__init__.py +0 -0
- data_management/lila/add_locations_to_island_camera_traps.py +0 -97
- data_management/lila/add_locations_to_nacti.py +0 -147
- data_management/lila/create_lila_blank_set.py +0 -557
- data_management/lila/create_lila_test_set.py +0 -151
- data_management/lila/create_links_to_md_results_files.py +0 -106
- data_management/lila/download_lila_subset.py +0 -177
- data_management/lila/generate_lila_per_image_labels.py +0 -515
- data_management/lila/get_lila_annotation_counts.py +0 -170
- data_management/lila/get_lila_image_counts.py +0 -111
- data_management/lila/lila_common.py +0 -300
- data_management/lila/test_lila_metadata_urls.py +0 -132
- data_management/ocr_tools.py +0 -874
- data_management/read_exif.py +0 -681
- data_management/remap_coco_categories.py +0 -84
- data_management/remove_exif.py +0 -66
- data_management/resize_coco_dataset.py +0 -189
- data_management/wi_download_csv_to_coco.py +0 -246
- data_management/yolo_output_to_md_output.py +0 -441
- data_management/yolo_to_coco.py +0 -676
- detection/__init__.py +0 -0
- detection/detector_training/__init__.py +0 -0
- detection/detector_training/model_main_tf2.py +0 -114
- detection/process_video.py +0 -703
- detection/pytorch_detector.py +0 -337
- detection/run_detector.py +0 -779
- detection/run_detector_batch.py +0 -1219
- detection/run_inference_with_yolov5_val.py +0 -917
- detection/run_tiled_inference.py +0 -935
- detection/tf_detector.py +0 -188
- detection/video_utils.py +0 -606
- docs/source/conf.py +0 -43
- md_utils/__init__.py +0 -0
- md_utils/azure_utils.py +0 -174
- md_utils/ct_utils.py +0 -612
- md_utils/directory_listing.py +0 -246
- md_utils/md_tests.py +0 -968
- md_utils/path_utils.py +0 -1044
- md_utils/process_utils.py +0 -157
- md_utils/sas_blob_utils.py +0 -509
- md_utils/split_locations_into_train_val.py +0 -228
- md_utils/string_utils.py +0 -92
- md_utils/url_utils.py +0 -323
- md_utils/write_html_image_list.py +0 -225
- md_visualization/__init__.py +0 -0
- md_visualization/plot_utils.py +0 -293
- md_visualization/render_images_with_thumbnails.py +0 -275
- md_visualization/visualization_utils.py +0 -1537
- md_visualization/visualize_db.py +0 -551
- md_visualization/visualize_detector_output.py +0 -406
- megadetector-5.0.9.dist-info/RECORD +0 -224
- megadetector-5.0.9.dist-info/top_level.txt +0 -8
- taxonomy_mapping/__init__.py +0 -0
- taxonomy_mapping/map_lila_taxonomy_to_wi_taxonomy.py +0 -491
- taxonomy_mapping/map_new_lila_datasets.py +0 -154
- taxonomy_mapping/prepare_lila_taxonomy_release.py +0 -142
- taxonomy_mapping/preview_lila_taxonomy.py +0 -591
- taxonomy_mapping/retrieve_sample_image.py +0 -71
- taxonomy_mapping/simple_image_download.py +0 -218
- taxonomy_mapping/species_lookup.py +0 -834
- taxonomy_mapping/taxonomy_csv_checker.py +0 -159
- taxonomy_mapping/taxonomy_graph.py +0 -346
- taxonomy_mapping/validate_lila_category_mappings.py +0 -83
- {megadetector-5.0.9.dist-info → megadetector-5.0.11.dist-info}/WHEEL +0 -0
|
@@ -1,97 +0,0 @@
|
|
|
1
|
-
########
|
|
2
|
-
#
|
|
3
|
-
# matlab_porting_tools.py
|
|
4
|
-
#
|
|
5
|
-
# Module containing a few ported Matlab functions that made it easier
|
|
6
|
-
# for me to port other, larger Matlab functions.
|
|
7
|
-
#
|
|
8
|
-
########
|
|
9
|
-
|
|
10
|
-
#%% Constants and imports
|
|
11
|
-
|
|
12
|
-
import ntpath
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
#%% sec2hms()
|
|
16
|
-
|
|
17
|
-
def sec2hms(tSeconds):
|
|
18
|
-
"""
|
|
19
|
-
function [str,h,m,s] = sec2hms(tSeconds,separator)
|
|
20
|
-
|
|
21
|
-
Convert a time in seconds to a string of the form:
|
|
22
|
-
|
|
23
|
-
1 hour, 2 minutes, 31.4 seconds
|
|
24
|
-
|
|
25
|
-
I prefer using the humanfriendly package for this, but I use this when
|
|
26
|
-
porting from Matlab.
|
|
27
|
-
"""
|
|
28
|
-
|
|
29
|
-
# https://stackoverflow.com/questions/775049/python-time-seconds-to-hms
|
|
30
|
-
m, s = divmod(tSeconds, 60)
|
|
31
|
-
h, m = divmod(m, 60)
|
|
32
|
-
|
|
33
|
-
# colonString = '%d:%02d:%02d' % (h, m, s)
|
|
34
|
-
# return (colonString,verboseString)
|
|
35
|
-
|
|
36
|
-
hms = ''
|
|
37
|
-
separator = ', '
|
|
38
|
-
if (h > 0):
|
|
39
|
-
pluralString = ''
|
|
40
|
-
if (h > 1):
|
|
41
|
-
pluralString = 's'
|
|
42
|
-
hms = hms + '%d hour%s%s' % (h,pluralString,separator)
|
|
43
|
-
|
|
44
|
-
if (m > 0):
|
|
45
|
-
pluralString = ''
|
|
46
|
-
if (m > 1):
|
|
47
|
-
pluralString = 's'
|
|
48
|
-
hms = hms + '%d min%s%s' % (m,pluralString,separator)
|
|
49
|
-
|
|
50
|
-
hms = hms + '%3.3fsec' % s
|
|
51
|
-
|
|
52
|
-
return hms
|
|
53
|
-
|
|
54
|
-
#%% Test driver for sec2hms()
|
|
55
|
-
|
|
56
|
-
if False:
|
|
57
|
-
|
|
58
|
-
pass
|
|
59
|
-
|
|
60
|
-
#%%
|
|
61
|
-
|
|
62
|
-
TEST_VALUES = [60033, 30.4, 245234523454.1]
|
|
63
|
-
|
|
64
|
-
for n in TEST_VALUES:
|
|
65
|
-
s = sec2hms(n)
|
|
66
|
-
print('{} - {}'.format(n,s))
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
#%% read_lines_from_file()
|
|
70
|
-
|
|
71
|
-
def read_lines_from_file(filename):
|
|
72
|
-
|
|
73
|
-
with open(filename) as f:
|
|
74
|
-
content = f.readlines()
|
|
75
|
-
|
|
76
|
-
# Remove trailing newlines
|
|
77
|
-
content = [x.rstrip() for x in content]
|
|
78
|
-
|
|
79
|
-
return content
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
#%% write_lines_to_file()
|
|
83
|
-
|
|
84
|
-
def write_lines_to_file(lines, filename):
|
|
85
|
-
|
|
86
|
-
with open(filename,'w') as f:
|
|
87
|
-
for line in lines:
|
|
88
|
-
f.write(line+ '\n')
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
#%% string_ends_with()
|
|
92
|
-
|
|
93
|
-
def string_ends_with(s,query):
|
|
94
|
-
return s.endswith(query)
|
|
95
|
-
|
|
96
|
-
def string_starts_with(s,query):
|
|
97
|
-
return s.startswith(query)
|
|
@@ -1,416 +0,0 @@
|
|
|
1
|
-
########
|
|
2
|
-
#
|
|
3
|
-
# path_utils.py
|
|
4
|
-
#
|
|
5
|
-
# Miscellaneous useful utils for path manipulation, things that could *almost*
|
|
6
|
-
# be in os.path, but aren't.
|
|
7
|
-
#
|
|
8
|
-
########
|
|
9
|
-
|
|
10
|
-
#%% Imports and constants
|
|
11
|
-
|
|
12
|
-
import glob
|
|
13
|
-
import ntpath
|
|
14
|
-
import os
|
|
15
|
-
import posixpath
|
|
16
|
-
import string
|
|
17
|
-
import json
|
|
18
|
-
import unicodedata
|
|
19
|
-
import zipfile
|
|
20
|
-
|
|
21
|
-
from zipfile import ZipFile
|
|
22
|
-
from datetime import datetime
|
|
23
|
-
from typing import Container, Iterable, List, Optional, Tuple, Sequence
|
|
24
|
-
from multiprocessing.pool import ThreadPool
|
|
25
|
-
from tqdm import tqdm
|
|
26
|
-
|
|
27
|
-
IMG_EXTENSIONS = ('.jpg', '.jpeg', '.gif', '.png', '.tif', '.tiff', '.bmp')
|
|
28
|
-
|
|
29
|
-
VALID_FILENAME_CHARS = f"~-_.() {string.ascii_letters}{string.digits}"
|
|
30
|
-
SEPARATOR_CHARS = r":\/"
|
|
31
|
-
VALID_PATH_CHARS = VALID_FILENAME_CHARS + SEPARATOR_CHARS
|
|
32
|
-
CHAR_LIMIT = 255
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
#%% General path functions
|
|
36
|
-
|
|
37
|
-
def recursive_file_list(base_dir, convert_slashes=True, return_relative_paths=False):
|
|
38
|
-
"""
|
|
39
|
-
Enumerate files (not directories) in [base_dir], optionally converting
|
|
40
|
-
\ to /
|
|
41
|
-
"""
|
|
42
|
-
|
|
43
|
-
all_files = []
|
|
44
|
-
|
|
45
|
-
for root, _, filenames in os.walk(base_dir):
|
|
46
|
-
for filename in filenames:
|
|
47
|
-
full_path = os.path.join(root, filename)
|
|
48
|
-
if convert_slashes:
|
|
49
|
-
full_path = full_path.replace('\\', '/')
|
|
50
|
-
all_files.append(full_path)
|
|
51
|
-
|
|
52
|
-
if return_relative_paths:
|
|
53
|
-
all_files = [os.path.relpath(fn,base_dir) for fn in all_files]
|
|
54
|
-
|
|
55
|
-
all_files = sorted(all_files)
|
|
56
|
-
return all_files
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
def split_path(path: str) -> List[str]:
|
|
60
|
-
"""
|
|
61
|
-
Splits [path] into all its constituent tokens.
|
|
62
|
-
|
|
63
|
-
Non-recursive version of:
|
|
64
|
-
http://nicks-liquid-soapbox.blogspot.com/2011/03/splitting-path-to-list-in-python.html
|
|
65
|
-
|
|
66
|
-
Examples
|
|
67
|
-
>>> split_path(r'c:\dir\subdir\file.txt')
|
|
68
|
-
['c:\\', 'dir', 'subdir', 'file.txt']
|
|
69
|
-
>>> split_path('/dir/subdir/file.jpg')
|
|
70
|
-
['/', 'dir', 'subdir', 'file.jpg']
|
|
71
|
-
>>> split_path('c:\\')
|
|
72
|
-
['c:\\']
|
|
73
|
-
>>> split_path('/')
|
|
74
|
-
['/']
|
|
75
|
-
"""
|
|
76
|
-
|
|
77
|
-
parts = []
|
|
78
|
-
while True:
|
|
79
|
-
# ntpath seems to do the right thing for both Windows and Unix paths
|
|
80
|
-
head, tail = ntpath.split(path)
|
|
81
|
-
if head == '' or head == path:
|
|
82
|
-
break
|
|
83
|
-
parts.append(tail)
|
|
84
|
-
path = head
|
|
85
|
-
parts.append(head or tail)
|
|
86
|
-
return parts[::-1] # reverse
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
def fileparts(path: str) -> Tuple[str, str, str]:
|
|
90
|
-
"""
|
|
91
|
-
Breaks down a path into the directory path, filename, and extension.
|
|
92
|
-
|
|
93
|
-
Note that the '.' lives with the extension, and separators are removed.
|
|
94
|
-
|
|
95
|
-
Examples
|
|
96
|
-
>>> fileparts('file')
|
|
97
|
-
('', 'file', '')
|
|
98
|
-
>>> fileparts(r'c:\dir\file.jpg')
|
|
99
|
-
('c:\\dir', 'file', '.jpg')
|
|
100
|
-
>>> fileparts('/dir/subdir/file.jpg')
|
|
101
|
-
('/dir/subdir', 'file', '.jpg')
|
|
102
|
-
|
|
103
|
-
Returns:
|
|
104
|
-
p: str, directory path
|
|
105
|
-
n: str, filename without extension
|
|
106
|
-
e: str, extension including the '.'
|
|
107
|
-
"""
|
|
108
|
-
|
|
109
|
-
# ntpath seems to do the right thing for both Windows and Unix paths
|
|
110
|
-
p = ntpath.dirname(path)
|
|
111
|
-
basename = ntpath.basename(path)
|
|
112
|
-
n, e = ntpath.splitext(basename)
|
|
113
|
-
return p, n, e
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
def insert_before_extension(filename: str, s: str = '', separator='.') -> str:
|
|
117
|
-
"""
|
|
118
|
-
Insert string [s] before the extension in [filename], separated with [separator].
|
|
119
|
-
|
|
120
|
-
If [s] is empty, generates a date/timestamp. If [filename] has no extension,
|
|
121
|
-
appends [s].
|
|
122
|
-
|
|
123
|
-
Examples
|
|
124
|
-
>>> insert_before_extension('/dir/subdir/file.ext', 'insert')
|
|
125
|
-
'/dir/subdir/file.insert.ext'
|
|
126
|
-
>>> insert_before_extension('/dir/subdir/file', 'insert')
|
|
127
|
-
'/dir/subdir/file.insert'
|
|
128
|
-
>>> insert_before_extension('/dir/subdir/file')
|
|
129
|
-
'/dir/subdir/file.2020.07.20.10.54.38'
|
|
130
|
-
"""
|
|
131
|
-
|
|
132
|
-
assert len(filename) > 0
|
|
133
|
-
if len(s) == 0:
|
|
134
|
-
s = datetime.now().strftime('%Y.%m.%d.%H.%M.%S')
|
|
135
|
-
name, ext = os.path.splitext(filename)
|
|
136
|
-
return f'{name}{separator}{s}{ext}'
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
def top_level_folder(p: str, windows: Optional[bool] = None) -> str:
|
|
140
|
-
"""
|
|
141
|
-
Gets the top-level folder from path [p].
|
|
142
|
-
|
|
143
|
-
This function behaves differently for Windows vs. Unix paths. Set
|
|
144
|
-
windows=True if [p] is a Windows path. Set windows=None (default) to treat
|
|
145
|
-
[p] as a native system path.
|
|
146
|
-
|
|
147
|
-
On Windows, will use the top-level folder that isn't the drive.
|
|
148
|
-
>>> top_level_folder(r'c:\blah\foo')
|
|
149
|
-
'c:\blah'
|
|
150
|
-
|
|
151
|
-
On Unix, does not include the leaf node.
|
|
152
|
-
>>> top_level_folder('/blah/foo')
|
|
153
|
-
'/blah'
|
|
154
|
-
"""
|
|
155
|
-
|
|
156
|
-
if p == '':
|
|
157
|
-
return ''
|
|
158
|
-
|
|
159
|
-
default_lib = os.path # save default os.path
|
|
160
|
-
if windows is not None:
|
|
161
|
-
os.path = ntpath if windows else posixpath
|
|
162
|
-
|
|
163
|
-
# Path('/blah').parts is ('/', 'blah')
|
|
164
|
-
parts = split_path(p)
|
|
165
|
-
|
|
166
|
-
drive = os.path.splitdrive(p)[0]
|
|
167
|
-
if len(parts) > 1 and (
|
|
168
|
-
parts[0] == drive
|
|
169
|
-
or parts[0] == drive + '/'
|
|
170
|
-
or parts[0] == drive + '\\'
|
|
171
|
-
or parts[0] in ['\\', '/']):
|
|
172
|
-
result = os.path.join(parts[0], parts[1])
|
|
173
|
-
else:
|
|
174
|
-
result = parts[0]
|
|
175
|
-
|
|
176
|
-
os.path = default_lib # restore default os.path
|
|
177
|
-
return result
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
def safe_create_link(link_exists,link_new):
|
|
181
|
-
"""
|
|
182
|
-
Create a symlink at link_new pointing to link_exists.
|
|
183
|
-
|
|
184
|
-
If link_new already exists, make sure it's a link (not a file),
|
|
185
|
-
and if it has a different target than link_exists, remove and re-create
|
|
186
|
-
it.
|
|
187
|
-
|
|
188
|
-
Errors of link_new already exists but it's not a link.
|
|
189
|
-
"""
|
|
190
|
-
if os.path.exists(link_new) or os.path.islink(link_new):
|
|
191
|
-
assert os.path.islink(link_new)
|
|
192
|
-
if not os.readlink(link_new) == link_exists:
|
|
193
|
-
os.remove(link_new)
|
|
194
|
-
os.symlink(link_exists,link_new)
|
|
195
|
-
else:
|
|
196
|
-
os.symlink(link_exists,link_new)
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
#%% Image-related path functions
|
|
200
|
-
|
|
201
|
-
def is_image_file(s: str, img_extensions: Container[str] = IMG_EXTENSIONS
|
|
202
|
-
) -> bool:
|
|
203
|
-
"""
|
|
204
|
-
Checks a file's extension against a hard-coded set of image file
|
|
205
|
-
extensions.
|
|
206
|
-
|
|
207
|
-
Does not check whether the file exists, only determines whether the filename
|
|
208
|
-
implies it's an image file.
|
|
209
|
-
"""
|
|
210
|
-
|
|
211
|
-
ext = os.path.splitext(s)[1]
|
|
212
|
-
return ext.lower() in img_extensions
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
def find_image_strings(strings: Iterable[str]) -> List[str]:
|
|
216
|
-
"""
|
|
217
|
-
Given a list of strings that are potentially image file names, looks for
|
|
218
|
-
strings that actually look like image file names (based on extension).
|
|
219
|
-
"""
|
|
220
|
-
|
|
221
|
-
return [s for s in strings if is_image_file(s)]
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
def find_images(dirname: str, recursive: bool = False, return_relative_paths: bool = False) -> List[str]:
|
|
225
|
-
"""
|
|
226
|
-
Finds all files in a directory that look like image file names. Returns
|
|
227
|
-
absolute paths unless return_relative_paths is set.
|
|
228
|
-
"""
|
|
229
|
-
|
|
230
|
-
if recursive:
|
|
231
|
-
strings = glob.glob(os.path.join(dirname, '**', '*.*'), recursive=True)
|
|
232
|
-
else:
|
|
233
|
-
strings = glob.glob(os.path.join(dirname, '*.*'))
|
|
234
|
-
|
|
235
|
-
image_files = find_image_strings(strings)
|
|
236
|
-
|
|
237
|
-
if return_relative_paths:
|
|
238
|
-
image_files = [os.path.relpath(fn,dirname) for fn in image_files]
|
|
239
|
-
|
|
240
|
-
image_files = sorted(image_files)
|
|
241
|
-
return image_files
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
#%% Filename cleaning functions
|
|
245
|
-
|
|
246
|
-
def clean_filename(filename: str, allow_list: str = VALID_FILENAME_CHARS,
|
|
247
|
-
char_limit: int = CHAR_LIMIT, force_lower: bool = False) -> str:
|
|
248
|
-
"""
|
|
249
|
-
Removes non-ASCII and other invalid filename characters (on any
|
|
250
|
-
reasonable OS) from a filename, then trims to a maximum length.
|
|
251
|
-
|
|
252
|
-
Does not allow :\/, use clean_path if you want to preserve those.
|
|
253
|
-
|
|
254
|
-
Adapted from
|
|
255
|
-
https://gist.github.com/wassname/1393c4a57cfcbf03641dbc31886123b8
|
|
256
|
-
"""
|
|
257
|
-
|
|
258
|
-
# keep only valid ascii chars
|
|
259
|
-
cleaned_filename = (unicodedata.normalize('NFKD', filename)
|
|
260
|
-
.encode('ASCII', 'ignore').decode())
|
|
261
|
-
|
|
262
|
-
# keep only allow-listed chars
|
|
263
|
-
cleaned_filename = ''.join([c for c in cleaned_filename if c in allow_list])
|
|
264
|
-
if char_limit is not None:
|
|
265
|
-
cleaned_filename = cleaned_filename[:char_limit]
|
|
266
|
-
if force_lower:
|
|
267
|
-
cleaned_filename = cleaned_filename.lower()
|
|
268
|
-
return cleaned_filename
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
def clean_path(pathname: str, allow_list: str = VALID_PATH_CHARS,
|
|
272
|
-
char_limit: int = CHAR_LIMIT, force_lower: bool = False) -> str:
|
|
273
|
-
"""
|
|
274
|
-
Removes non-ASCII and other invalid path characters (on any reasonable
|
|
275
|
-
OS) from a path, then trims to a maximum length.
|
|
276
|
-
"""
|
|
277
|
-
|
|
278
|
-
return clean_filename(pathname, allow_list=allow_list,
|
|
279
|
-
char_limit=char_limit, force_lower=force_lower)
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
def flatten_path(pathname: str, separator_chars: str = SEPARATOR_CHARS) -> str:
|
|
283
|
-
"""
|
|
284
|
-
Removes non-ASCII and other invalid path characters (on any reasonable
|
|
285
|
-
OS) from a path, then trims to a maximum length. Replaces all valid
|
|
286
|
-
separators with '~'.
|
|
287
|
-
"""
|
|
288
|
-
|
|
289
|
-
s = clean_path(pathname)
|
|
290
|
-
for c in separator_chars:
|
|
291
|
-
s = s.replace(c, '~')
|
|
292
|
-
return s
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
#%% Platform-independent way to open files in their associated application
|
|
296
|
-
|
|
297
|
-
import sys,subprocess
|
|
298
|
-
|
|
299
|
-
def open_file(filename):
|
|
300
|
-
if sys.platform == "win32":
|
|
301
|
-
os.startfile(filename)
|
|
302
|
-
else:
|
|
303
|
-
opener = "open" if sys.platform == "darwin" else "xdg-open"
|
|
304
|
-
subprocess.call([opener, filename])
|
|
305
|
-
|
|
306
|
-
|
|
307
|
-
#%% zipfile management functions
|
|
308
|
-
|
|
309
|
-
def unzip_file(input_file, output_folder=None):
|
|
310
|
-
"""
|
|
311
|
-
Unzip a zipfile to the specified output folder, defaulting to the same location as
|
|
312
|
-
the input file
|
|
313
|
-
"""
|
|
314
|
-
|
|
315
|
-
if output_folder is None:
|
|
316
|
-
output_folder = os.path.dirname(input_file)
|
|
317
|
-
|
|
318
|
-
with zipfile.ZipFile(input_file, 'r') as zf:
|
|
319
|
-
zf.extractall(output_folder)
|
|
320
|
-
|
|
321
|
-
|
|
322
|
-
#%% File list functions
|
|
323
|
-
|
|
324
|
-
def write_list_to_file(output_file: str, strings: Sequence[str]) -> None:
|
|
325
|
-
"""
|
|
326
|
-
Writes a list of strings to either a JSON file or text file,
|
|
327
|
-
depending on extension of the given file name.
|
|
328
|
-
"""
|
|
329
|
-
|
|
330
|
-
with open(output_file, 'w') as f:
|
|
331
|
-
if output_file.endswith('.json'):
|
|
332
|
-
json.dump(strings, f, indent=1)
|
|
333
|
-
else:
|
|
334
|
-
f.write('\n'.join(strings))
|
|
335
|
-
|
|
336
|
-
|
|
337
|
-
def read_list_from_file(filename: str) -> List[str]:
|
|
338
|
-
"""
|
|
339
|
-
Reads a json-formatted list of strings from a file.
|
|
340
|
-
"""
|
|
341
|
-
|
|
342
|
-
assert filename.endswith('.json')
|
|
343
|
-
with open(filename, 'r') as f:
|
|
344
|
-
file_list = json.load(f)
|
|
345
|
-
assert isinstance(file_list, list)
|
|
346
|
-
for s in file_list:
|
|
347
|
-
assert isinstance(s, str)
|
|
348
|
-
return file_list
|
|
349
|
-
|
|
350
|
-
|
|
351
|
-
#%% Zip functions
|
|
352
|
-
|
|
353
|
-
def zip_file(input_fn, output_fn=None, overwrite=False, verbose=False, compresslevel=9):
|
|
354
|
-
"""
|
|
355
|
-
Zip a single file, by default writing to a new file called [input_fn].zip
|
|
356
|
-
"""
|
|
357
|
-
|
|
358
|
-
basename = os.path.basename(input_fn)
|
|
359
|
-
|
|
360
|
-
if output_fn is None:
|
|
361
|
-
output_fn = input_fn + '.zip'
|
|
362
|
-
|
|
363
|
-
if (not overwrite) and (os.path.isfile(output_fn)):
|
|
364
|
-
print('Skipping existing file {}'.format(output_fn))
|
|
365
|
-
return
|
|
366
|
-
|
|
367
|
-
if verbose:
|
|
368
|
-
print('Zipping {} to {}'.format(input_fn,output_fn))
|
|
369
|
-
|
|
370
|
-
with ZipFile(output_fn,'w',zipfile.ZIP_DEFLATED) as zipf:
|
|
371
|
-
zipf.write(input_fn,arcname=basename,compresslevel=compresslevel,
|
|
372
|
-
compress_type=zipfile.ZIP_DEFLATED)
|
|
373
|
-
|
|
374
|
-
return output_fn
|
|
375
|
-
|
|
376
|
-
|
|
377
|
-
def zip_folder(input_folder, output_fn=None, overwrite=False, verbose=False, compresslevel=9):
|
|
378
|
-
"""
|
|
379
|
-
Recursively zip everything in [input_folder], storing outputs as relative paths.
|
|
380
|
-
|
|
381
|
-
Defaults to writing to [input_folder].zip
|
|
382
|
-
"""
|
|
383
|
-
|
|
384
|
-
if output_fn is None:
|
|
385
|
-
output_fn = input_folder + '.zip'
|
|
386
|
-
|
|
387
|
-
if not overwrite:
|
|
388
|
-
assert not os.path.isfile(output_fn), 'Zip file {} exists'.format(output_fn)
|
|
389
|
-
|
|
390
|
-
if verbose:
|
|
391
|
-
print('Zipping {} to {}'.format(input_folder,output_fn))
|
|
392
|
-
|
|
393
|
-
relative_filenames = recursive_file_list(input_folder,return_relative_paths=True)
|
|
394
|
-
|
|
395
|
-
with ZipFile(output_fn,'w',zipfile.ZIP_DEFLATED) as zipf:
|
|
396
|
-
for input_fn_relative in relative_filenames:
|
|
397
|
-
input_fn_abs = os.path.join(input_folder,input_fn_relative)
|
|
398
|
-
zipf.write(input_fn_abs,
|
|
399
|
-
arcname=input_fn_relative,
|
|
400
|
-
compresslevel=compresslevel,
|
|
401
|
-
compress_type=zipfile.ZIP_DEFLATED)
|
|
402
|
-
|
|
403
|
-
return output_fn
|
|
404
|
-
|
|
405
|
-
|
|
406
|
-
def parallel_zip_files(input_files,max_workers=16):
|
|
407
|
-
"""
|
|
408
|
-
Zip one or more files to separate output files in parallel, leaving the
|
|
409
|
-
original files in place.
|
|
410
|
-
"""
|
|
411
|
-
|
|
412
|
-
n_workers = min(max_workers,len(input_files))
|
|
413
|
-
pool = ThreadPool(n_workers)
|
|
414
|
-
with tqdm(total=len(input_files)) as pbar:
|
|
415
|
-
for i,_ in enumerate(pool.imap_unordered(zip_file,input_files)):
|
|
416
|
-
pbar.update()
|
|
@@ -1,110 +0,0 @@
|
|
|
1
|
-
########
|
|
2
|
-
#
|
|
3
|
-
# process_utils.py
|
|
4
|
-
#
|
|
5
|
-
# Run something at the command line and capture the output, based on:
|
|
6
|
-
#
|
|
7
|
-
# https://stackoverflow.com/questions/4417546/constantly-print-subprocess-output-while-process-is-running
|
|
8
|
-
#
|
|
9
|
-
# Includes handy example code for doing this on multiple processes/threads.
|
|
10
|
-
#
|
|
11
|
-
########
|
|
12
|
-
|
|
13
|
-
#%% Constants, imports, and environment
|
|
14
|
-
|
|
15
|
-
import os
|
|
16
|
-
import subprocess
|
|
17
|
-
|
|
18
|
-
os.environ["PYTHONUNBUFFERED"] = "1"
|
|
19
|
-
|
|
20
|
-
def execute(cmd):
|
|
21
|
-
"""
|
|
22
|
-
Run [cmd] (a single string) in a shell, yielding each line of output to the caller.
|
|
23
|
-
"""
|
|
24
|
-
|
|
25
|
-
# https://stackoverflow.com/questions/4417546/constantly-print-subprocess-output-while-process-is-running
|
|
26
|
-
popen = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
|
|
27
|
-
shell=True, universal_newlines=True)
|
|
28
|
-
for stdout_line in iter(popen.stdout.readline, ""):
|
|
29
|
-
yield stdout_line
|
|
30
|
-
popen.stdout.close()
|
|
31
|
-
return_code = popen.wait()
|
|
32
|
-
if return_code:
|
|
33
|
-
raise subprocess.CalledProcessError(return_code, cmd)
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
def execute_and_print(cmd,print_output=True):
|
|
37
|
-
"""
|
|
38
|
-
Run [cmd] (a single string) in a shell, capturing and printing output. Returns
|
|
39
|
-
a dictionary with fields "status" and "output".
|
|
40
|
-
"""
|
|
41
|
-
|
|
42
|
-
to_return = {'status':'unknown','output':''}
|
|
43
|
-
output=[]
|
|
44
|
-
try:
|
|
45
|
-
for s in execute(cmd):
|
|
46
|
-
output.append(s)
|
|
47
|
-
if print_output:
|
|
48
|
-
print(s,end='',flush=True)
|
|
49
|
-
to_return['status'] = 0
|
|
50
|
-
except subprocess.CalledProcessError as cpe:
|
|
51
|
-
print('execute_and_print caught error: {}'.format(cpe.output))
|
|
52
|
-
to_return['status'] = cpe.returncode
|
|
53
|
-
to_return['output'] = output
|
|
54
|
-
|
|
55
|
-
return to_return
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
#%% Single-threaded test driver for execute_and_print
|
|
59
|
-
|
|
60
|
-
if False:
|
|
61
|
-
|
|
62
|
-
#%%
|
|
63
|
-
|
|
64
|
-
if os.name == 'nt':
|
|
65
|
-
execute_and_print('echo hello && ping -n 5 127.0.0.1 && echo goodbye')
|
|
66
|
-
else:
|
|
67
|
-
execute_and_print('echo hello && sleep 1 && echo goodbye')
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
#%% Parallel test driver for execute_and_print
|
|
71
|
-
|
|
72
|
-
if False:
|
|
73
|
-
|
|
74
|
-
#%%
|
|
75
|
-
|
|
76
|
-
from functools import partial
|
|
77
|
-
from multiprocessing.pool import ThreadPool as ThreadPool
|
|
78
|
-
from multiprocessing.pool import Pool as Pool
|
|
79
|
-
|
|
80
|
-
n_workers = 10
|
|
81
|
-
|
|
82
|
-
# Should we use threads (vs. processes) for parallelization?
|
|
83
|
-
use_threads = True
|
|
84
|
-
|
|
85
|
-
test_data = ['a','b','c','d']
|
|
86
|
-
|
|
87
|
-
def process_sample(s):
|
|
88
|
-
return execute_and_print('echo ' + s,True)
|
|
89
|
-
|
|
90
|
-
if n_workers == 1:
|
|
91
|
-
|
|
92
|
-
results = []
|
|
93
|
-
for i_sample,sample in enumerate(test_data):
|
|
94
|
-
results.append(process_sample(sample))
|
|
95
|
-
|
|
96
|
-
else:
|
|
97
|
-
|
|
98
|
-
n_threads = min(n_workers,len(test_data))
|
|
99
|
-
|
|
100
|
-
if use_threads:
|
|
101
|
-
print('Starting parallel thread pool with {} workers'.format(n_threads))
|
|
102
|
-
pool = ThreadPool(n_threads)
|
|
103
|
-
else:
|
|
104
|
-
print('Starting parallel process pool with {} workers'.format(n_threads))
|
|
105
|
-
pool = Pool(n_threads)
|
|
106
|
-
|
|
107
|
-
results = list(pool.map(partial(process_sample),test_data))
|
|
108
|
-
|
|
109
|
-
for r in results:
|
|
110
|
-
print(r)
|