megadetector 5.0.10__py3-none-any.whl → 5.0.11__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of megadetector might be problematic. Click here for more details.
- {megadetector-5.0.10.dist-info → megadetector-5.0.11.dist-info}/LICENSE +0 -0
- {megadetector-5.0.10.dist-info → megadetector-5.0.11.dist-info}/METADATA +12 -11
- megadetector-5.0.11.dist-info/RECORD +5 -0
- megadetector-5.0.11.dist-info/top_level.txt +1 -0
- api/__init__.py +0 -0
- api/batch_processing/__init__.py +0 -0
- api/batch_processing/api_core/__init__.py +0 -0
- api/batch_processing/api_core/batch_service/__init__.py +0 -0
- api/batch_processing/api_core/batch_service/score.py +0 -439
- api/batch_processing/api_core/server.py +0 -294
- api/batch_processing/api_core/server_api_config.py +0 -98
- api/batch_processing/api_core/server_app_config.py +0 -55
- api/batch_processing/api_core/server_batch_job_manager.py +0 -220
- api/batch_processing/api_core/server_job_status_table.py +0 -152
- api/batch_processing/api_core/server_orchestration.py +0 -360
- api/batch_processing/api_core/server_utils.py +0 -92
- api/batch_processing/api_core_support/__init__.py +0 -0
- api/batch_processing/api_core_support/aggregate_results_manually.py +0 -46
- api/batch_processing/api_support/__init__.py +0 -0
- api/batch_processing/api_support/summarize_daily_activity.py +0 -152
- api/batch_processing/data_preparation/__init__.py +0 -0
- api/batch_processing/data_preparation/manage_local_batch.py +0 -2391
- api/batch_processing/data_preparation/manage_video_batch.py +0 -327
- api/batch_processing/integration/digiKam/setup.py +0 -6
- api/batch_processing/integration/digiKam/xmp_integration.py +0 -465
- api/batch_processing/integration/eMammal/test_scripts/config_template.py +0 -5
- api/batch_processing/integration/eMammal/test_scripts/push_annotations_to_emammal.py +0 -126
- api/batch_processing/integration/eMammal/test_scripts/select_images_for_testing.py +0 -55
- api/batch_processing/postprocessing/__init__.py +0 -0
- api/batch_processing/postprocessing/add_max_conf.py +0 -64
- api/batch_processing/postprocessing/categorize_detections_by_size.py +0 -163
- api/batch_processing/postprocessing/combine_api_outputs.py +0 -249
- api/batch_processing/postprocessing/compare_batch_results.py +0 -958
- api/batch_processing/postprocessing/convert_output_format.py +0 -397
- api/batch_processing/postprocessing/load_api_results.py +0 -195
- api/batch_processing/postprocessing/md_to_coco.py +0 -310
- api/batch_processing/postprocessing/md_to_labelme.py +0 -330
- api/batch_processing/postprocessing/merge_detections.py +0 -401
- api/batch_processing/postprocessing/postprocess_batch_results.py +0 -1904
- api/batch_processing/postprocessing/remap_detection_categories.py +0 -170
- api/batch_processing/postprocessing/render_detection_confusion_matrix.py +0 -661
- api/batch_processing/postprocessing/repeat_detection_elimination/find_repeat_detections.py +0 -211
- api/batch_processing/postprocessing/repeat_detection_elimination/remove_repeat_detections.py +0 -82
- api/batch_processing/postprocessing/repeat_detection_elimination/repeat_detections_core.py +0 -1631
- api/batch_processing/postprocessing/separate_detections_into_folders.py +0 -731
- api/batch_processing/postprocessing/subset_json_detector_output.py +0 -696
- api/batch_processing/postprocessing/top_folders_to_bottom.py +0 -223
- api/synchronous/__init__.py +0 -0
- api/synchronous/api_core/animal_detection_api/__init__.py +0 -0
- api/synchronous/api_core/animal_detection_api/api_backend.py +0 -152
- api/synchronous/api_core/animal_detection_api/api_frontend.py +0 -266
- api/synchronous/api_core/animal_detection_api/config.py +0 -35
- api/synchronous/api_core/animal_detection_api/data_management/annotations/annotation_constants.py +0 -47
- api/synchronous/api_core/animal_detection_api/detection/detector_training/copy_checkpoints.py +0 -43
- api/synchronous/api_core/animal_detection_api/detection/detector_training/model_main_tf2.py +0 -114
- api/synchronous/api_core/animal_detection_api/detection/process_video.py +0 -543
- api/synchronous/api_core/animal_detection_api/detection/pytorch_detector.py +0 -304
- api/synchronous/api_core/animal_detection_api/detection/run_detector.py +0 -627
- api/synchronous/api_core/animal_detection_api/detection/run_detector_batch.py +0 -1029
- api/synchronous/api_core/animal_detection_api/detection/run_inference_with_yolov5_val.py +0 -581
- api/synchronous/api_core/animal_detection_api/detection/run_tiled_inference.py +0 -754
- api/synchronous/api_core/animal_detection_api/detection/tf_detector.py +0 -165
- api/synchronous/api_core/animal_detection_api/detection/video_utils.py +0 -495
- api/synchronous/api_core/animal_detection_api/md_utils/azure_utils.py +0 -174
- api/synchronous/api_core/animal_detection_api/md_utils/ct_utils.py +0 -262
- api/synchronous/api_core/animal_detection_api/md_utils/directory_listing.py +0 -251
- api/synchronous/api_core/animal_detection_api/md_utils/matlab_porting_tools.py +0 -97
- api/synchronous/api_core/animal_detection_api/md_utils/path_utils.py +0 -416
- api/synchronous/api_core/animal_detection_api/md_utils/process_utils.py +0 -110
- api/synchronous/api_core/animal_detection_api/md_utils/sas_blob_utils.py +0 -509
- api/synchronous/api_core/animal_detection_api/md_utils/string_utils.py +0 -59
- api/synchronous/api_core/animal_detection_api/md_utils/url_utils.py +0 -144
- api/synchronous/api_core/animal_detection_api/md_utils/write_html_image_list.py +0 -226
- api/synchronous/api_core/animal_detection_api/md_visualization/visualization_utils.py +0 -841
- api/synchronous/api_core/tests/__init__.py +0 -0
- api/synchronous/api_core/tests/load_test.py +0 -110
- classification/__init__.py +0 -0
- classification/aggregate_classifier_probs.py +0 -108
- classification/analyze_failed_images.py +0 -227
- classification/cache_batchapi_outputs.py +0 -198
- classification/create_classification_dataset.py +0 -627
- classification/crop_detections.py +0 -516
- classification/csv_to_json.py +0 -226
- classification/detect_and_crop.py +0 -855
- classification/efficientnet/__init__.py +0 -9
- classification/efficientnet/model.py +0 -415
- classification/efficientnet/utils.py +0 -610
- classification/evaluate_model.py +0 -520
- classification/identify_mislabeled_candidates.py +0 -152
- classification/json_to_azcopy_list.py +0 -63
- classification/json_validator.py +0 -695
- classification/map_classification_categories.py +0 -276
- classification/merge_classification_detection_output.py +0 -506
- classification/prepare_classification_script.py +0 -194
- classification/prepare_classification_script_mc.py +0 -228
- classification/run_classifier.py +0 -286
- classification/save_mislabeled.py +0 -110
- classification/train_classifier.py +0 -825
- classification/train_classifier_tf.py +0 -724
- classification/train_utils.py +0 -322
- data_management/__init__.py +0 -0
- data_management/annotations/__init__.py +0 -0
- data_management/annotations/annotation_constants.py +0 -34
- data_management/camtrap_dp_to_coco.py +0 -238
- data_management/cct_json_utils.py +0 -395
- data_management/cct_to_md.py +0 -176
- data_management/cct_to_wi.py +0 -289
- data_management/coco_to_labelme.py +0 -272
- data_management/coco_to_yolo.py +0 -662
- data_management/databases/__init__.py +0 -0
- data_management/databases/add_width_and_height_to_db.py +0 -33
- data_management/databases/combine_coco_camera_traps_files.py +0 -206
- data_management/databases/integrity_check_json_db.py +0 -477
- data_management/databases/subset_json_db.py +0 -115
- data_management/generate_crops_from_cct.py +0 -149
- data_management/get_image_sizes.py +0 -188
- data_management/importers/add_nacti_sizes.py +0 -52
- data_management/importers/add_timestamps_to_icct.py +0 -79
- data_management/importers/animl_results_to_md_results.py +0 -158
- data_management/importers/auckland_doc_test_to_json.py +0 -372
- data_management/importers/auckland_doc_to_json.py +0 -200
- data_management/importers/awc_to_json.py +0 -189
- data_management/importers/bellevue_to_json.py +0 -273
- data_management/importers/cacophony-thermal-importer.py +0 -796
- data_management/importers/carrizo_shrubfree_2018.py +0 -268
- data_management/importers/carrizo_trail_cam_2017.py +0 -287
- data_management/importers/cct_field_adjustments.py +0 -57
- data_management/importers/channel_islands_to_cct.py +0 -913
- data_management/importers/eMammal/copy_and_unzip_emammal.py +0 -180
- data_management/importers/eMammal/eMammal_helpers.py +0 -249
- data_management/importers/eMammal/make_eMammal_json.py +0 -223
- data_management/importers/ena24_to_json.py +0 -275
- data_management/importers/filenames_to_json.py +0 -385
- data_management/importers/helena_to_cct.py +0 -282
- data_management/importers/idaho-camera-traps.py +0 -1407
- data_management/importers/idfg_iwildcam_lila_prep.py +0 -294
- data_management/importers/jb_csv_to_json.py +0 -150
- data_management/importers/mcgill_to_json.py +0 -250
- data_management/importers/missouri_to_json.py +0 -489
- data_management/importers/nacti_fieldname_adjustments.py +0 -79
- data_management/importers/noaa_seals_2019.py +0 -181
- data_management/importers/pc_to_json.py +0 -365
- data_management/importers/plot_wni_giraffes.py +0 -123
- data_management/importers/prepare-noaa-fish-data-for-lila.py +0 -359
- data_management/importers/prepare_zsl_imerit.py +0 -131
- data_management/importers/rspb_to_json.py +0 -356
- data_management/importers/save_the_elephants_survey_A.py +0 -320
- data_management/importers/save_the_elephants_survey_B.py +0 -332
- data_management/importers/snapshot_safari_importer.py +0 -758
- data_management/importers/snapshot_safari_importer_reprise.py +0 -665
- data_management/importers/snapshot_serengeti_lila.py +0 -1067
- data_management/importers/snapshotserengeti/make_full_SS_json.py +0 -150
- data_management/importers/snapshotserengeti/make_per_season_SS_json.py +0 -153
- data_management/importers/sulross_get_exif.py +0 -65
- data_management/importers/timelapse_csv_set_to_json.py +0 -490
- data_management/importers/ubc_to_json.py +0 -399
- data_management/importers/umn_to_json.py +0 -507
- data_management/importers/wellington_to_json.py +0 -263
- data_management/importers/wi_to_json.py +0 -441
- data_management/importers/zamba_results_to_md_results.py +0 -181
- data_management/labelme_to_coco.py +0 -548
- data_management/labelme_to_yolo.py +0 -272
- data_management/lila/__init__.py +0 -0
- data_management/lila/add_locations_to_island_camera_traps.py +0 -97
- data_management/lila/add_locations_to_nacti.py +0 -147
- data_management/lila/create_lila_blank_set.py +0 -557
- data_management/lila/create_lila_test_set.py +0 -151
- data_management/lila/create_links_to_md_results_files.py +0 -106
- data_management/lila/download_lila_subset.py +0 -177
- data_management/lila/generate_lila_per_image_labels.py +0 -515
- data_management/lila/get_lila_annotation_counts.py +0 -170
- data_management/lila/get_lila_image_counts.py +0 -111
- data_management/lila/lila_common.py +0 -300
- data_management/lila/test_lila_metadata_urls.py +0 -132
- data_management/ocr_tools.py +0 -874
- data_management/read_exif.py +0 -681
- data_management/remap_coco_categories.py +0 -84
- data_management/remove_exif.py +0 -66
- data_management/resize_coco_dataset.py +0 -189
- data_management/wi_download_csv_to_coco.py +0 -246
- data_management/yolo_output_to_md_output.py +0 -441
- data_management/yolo_to_coco.py +0 -676
- detection/__init__.py +0 -0
- detection/detector_training/__init__.py +0 -0
- detection/detector_training/model_main_tf2.py +0 -114
- detection/process_video.py +0 -703
- detection/pytorch_detector.py +0 -337
- detection/run_detector.py +0 -779
- detection/run_detector_batch.py +0 -1219
- detection/run_inference_with_yolov5_val.py +0 -917
- detection/run_tiled_inference.py +0 -935
- detection/tf_detector.py +0 -188
- detection/video_utils.py +0 -606
- docs/source/conf.py +0 -43
- md_utils/__init__.py +0 -0
- md_utils/azure_utils.py +0 -174
- md_utils/ct_utils.py +0 -612
- md_utils/directory_listing.py +0 -246
- md_utils/md_tests.py +0 -968
- md_utils/path_utils.py +0 -1044
- md_utils/process_utils.py +0 -157
- md_utils/sas_blob_utils.py +0 -509
- md_utils/split_locations_into_train_val.py +0 -228
- md_utils/string_utils.py +0 -92
- md_utils/url_utils.py +0 -323
- md_utils/write_html_image_list.py +0 -225
- md_visualization/__init__.py +0 -0
- md_visualization/plot_utils.py +0 -293
- md_visualization/render_images_with_thumbnails.py +0 -275
- md_visualization/visualization_utils.py +0 -1537
- md_visualization/visualize_db.py +0 -551
- md_visualization/visualize_detector_output.py +0 -406
- megadetector-5.0.10.dist-info/RECORD +0 -224
- megadetector-5.0.10.dist-info/top_level.txt +0 -8
- taxonomy_mapping/__init__.py +0 -0
- taxonomy_mapping/map_lila_taxonomy_to_wi_taxonomy.py +0 -491
- taxonomy_mapping/map_new_lila_datasets.py +0 -154
- taxonomy_mapping/prepare_lila_taxonomy_release.py +0 -142
- taxonomy_mapping/preview_lila_taxonomy.py +0 -591
- taxonomy_mapping/retrieve_sample_image.py +0 -71
- taxonomy_mapping/simple_image_download.py +0 -218
- taxonomy_mapping/species_lookup.py +0 -834
- taxonomy_mapping/taxonomy_csv_checker.py +0 -159
- taxonomy_mapping/taxonomy_graph.py +0 -346
- taxonomy_mapping/validate_lila_category_mappings.py +0 -83
- {megadetector-5.0.10.dist-info → megadetector-5.0.11.dist-info}/WHEEL +0 -0
|
@@ -1,174 +0,0 @@
|
|
|
1
|
-
########
|
|
2
|
-
#
|
|
3
|
-
# azure_utils.py
|
|
4
|
-
#
|
|
5
|
-
# Miscellaneous Azure Blob Storage utilities
|
|
6
|
-
#
|
|
7
|
-
# Requires azure-storage-blob>=12.4.0
|
|
8
|
-
#
|
|
9
|
-
########
|
|
10
|
-
|
|
11
|
-
import json
|
|
12
|
-
from md_utils import path_utils
|
|
13
|
-
from typing import Any, Iterable, List, Optional, Tuple, Union
|
|
14
|
-
|
|
15
|
-
from azure.storage.blob import BlobPrefix, ContainerClient
|
|
16
|
-
|
|
17
|
-
from md_utils import sas_blob_utils
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
def walk_container(container_client: ContainerClient,
|
|
21
|
-
max_depth: int = -1,
|
|
22
|
-
prefix: str = '',
|
|
23
|
-
store_folders: bool = True,
|
|
24
|
-
store_blobs: bool = True,
|
|
25
|
-
debug_max_items: int = -1) -> Tuple[List[str], List[str]]:
|
|
26
|
-
"""
|
|
27
|
-
Recursively walk folders a Azure Blob Storage container.
|
|
28
|
-
|
|
29
|
-
Based on:
|
|
30
|
-
https://github.com/Azure/azure-sdk-for-python/blob/master/sdk/storage/azure-storage-blob/samples/blob_samples_walk_blob_hierarchy.py
|
|
31
|
-
"""
|
|
32
|
-
|
|
33
|
-
depth = 1
|
|
34
|
-
|
|
35
|
-
def walk_blob_hierarchy(prefix: str,
|
|
36
|
-
folders: Optional[List[str]] = None,
|
|
37
|
-
blobs: Optional[List[str]] = None
|
|
38
|
-
) -> Tuple[List[str], List[str]]:
|
|
39
|
-
if folders is None:
|
|
40
|
-
folders = []
|
|
41
|
-
if blobs is None:
|
|
42
|
-
blobs = []
|
|
43
|
-
|
|
44
|
-
nonlocal depth
|
|
45
|
-
|
|
46
|
-
if 0 < max_depth < depth:
|
|
47
|
-
return folders, blobs
|
|
48
|
-
|
|
49
|
-
for item in container_client.walk_blobs(name_starts_with=prefix):
|
|
50
|
-
short_name = item.name[len(prefix):]
|
|
51
|
-
if isinstance(item, BlobPrefix):
|
|
52
|
-
# print('F: ' + prefix + short_name)
|
|
53
|
-
if store_folders:
|
|
54
|
-
folders.append(prefix + short_name)
|
|
55
|
-
depth += 1
|
|
56
|
-
walk_blob_hierarchy(item.name, folders=folders, blobs=blobs)
|
|
57
|
-
if (debug_max_items > 0
|
|
58
|
-
and len(folders) + len(blobs) > debug_max_items):
|
|
59
|
-
return folders, blobs
|
|
60
|
-
depth -= 1
|
|
61
|
-
else:
|
|
62
|
-
if store_blobs:
|
|
63
|
-
blobs.append(prefix + short_name)
|
|
64
|
-
|
|
65
|
-
return folders, blobs
|
|
66
|
-
|
|
67
|
-
folders, blobs = walk_blob_hierarchy(prefix=prefix)
|
|
68
|
-
|
|
69
|
-
assert all(s.endswith('/') for s in folders)
|
|
70
|
-
folders = [s.strip('/') for s in folders]
|
|
71
|
-
|
|
72
|
-
return folders, blobs
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
def list_top_level_blob_folders(container_client: ContainerClient) -> List[str]:
|
|
76
|
-
"""
|
|
77
|
-
List all top-level folders in a container.
|
|
78
|
-
"""
|
|
79
|
-
|
|
80
|
-
top_level_folders, _ = walk_container(
|
|
81
|
-
container_client, max_depth=1, store_blobs=False)
|
|
82
|
-
return top_level_folders
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
def concatenate_json_lists(input_files: Iterable[str],
|
|
86
|
-
output_file: Optional[str] = None
|
|
87
|
-
) -> List[Any]:
|
|
88
|
-
"""
|
|
89
|
-
Given a list of JSON files that contain lists (typically string
|
|
90
|
-
filenames), concatenates the lists into a single list and optionally
|
|
91
|
-
writes out this list to a new output JSON file.
|
|
92
|
-
"""
|
|
93
|
-
|
|
94
|
-
output_list = []
|
|
95
|
-
for fn in input_files:
|
|
96
|
-
with open(fn, 'r') as f:
|
|
97
|
-
file_list = json.load(f)
|
|
98
|
-
output_list.extend(file_list)
|
|
99
|
-
if output_file is not None:
|
|
100
|
-
with open(output_file, 'w') as f:
|
|
101
|
-
json.dump(output_list, f, indent=1)
|
|
102
|
-
return output_list
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
def upload_file_to_blob(account_name: str,
|
|
106
|
-
container_name: str,
|
|
107
|
-
local_path: str,
|
|
108
|
-
blob_name: str,
|
|
109
|
-
sas_token: str,
|
|
110
|
-
overwrite: bool=False) -> str:
|
|
111
|
-
"""
|
|
112
|
-
Uploads a local file to Azure Blob Storage and returns the uploaded
|
|
113
|
-
blob URI with SAS token.
|
|
114
|
-
"""
|
|
115
|
-
|
|
116
|
-
container_uri = sas_blob_utils.build_azure_storage_uri(
|
|
117
|
-
account=account_name, container=container_name, sas_token=sas_token)
|
|
118
|
-
with open(local_path, 'rb') as data:
|
|
119
|
-
return sas_blob_utils.upload_blob(
|
|
120
|
-
container_uri=container_uri, blob_name=blob_name, data=data,
|
|
121
|
-
overwrite=overwrite)
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
def enumerate_blobs_to_file(
|
|
125
|
-
output_file: str,
|
|
126
|
-
account_name: str,
|
|
127
|
-
container_name: str,
|
|
128
|
-
sas_token: Optional[str] = None,
|
|
129
|
-
blob_prefix: Optional[str] = None,
|
|
130
|
-
blob_suffix: Optional[Union[str, Tuple[str]]] = None,
|
|
131
|
-
rsearch: Optional[str] = None,
|
|
132
|
-
limit: Optional[int] = None,
|
|
133
|
-
verbose: Optional[bool] = True
|
|
134
|
-
) -> List[str]:
|
|
135
|
-
"""
|
|
136
|
-
Enumerates blobs in a container, and writes the blob names to an output
|
|
137
|
-
file.
|
|
138
|
-
|
|
139
|
-
Args:
|
|
140
|
-
output_file: str, path to save list of files in container
|
|
141
|
-
If ends in '.json', writes a JSON string. Otherwise, writes a
|
|
142
|
-
newline-delimited list. Can be None, in which case this is just a
|
|
143
|
-
convenient wrapper for blob enumeration.
|
|
144
|
-
account_name: str, Azure Storage account name
|
|
145
|
-
container_name: str, Azure Blob Storage container name
|
|
146
|
-
sas_token: optional str, container SAS token, leading ? will be removed if present.
|
|
147
|
-
blob_prefix: optional str, returned results will only contain blob names
|
|
148
|
-
to with this prefix
|
|
149
|
-
blob_suffix: optional str or tuple of str, returned results will only
|
|
150
|
-
contain blob names with this/these suffix(es). The blob names will
|
|
151
|
-
be lowercased first before comparing with the suffix(es).
|
|
152
|
-
rsearch: optional str, returned results will only contain blob names
|
|
153
|
-
that match this regex. Can also be a list of regexes, in which case
|
|
154
|
-
blobs matching *any* of the regex's will be returned.
|
|
155
|
-
limit: int, maximum # of blob names to list
|
|
156
|
-
if None, then returns all blob names
|
|
157
|
-
|
|
158
|
-
Returns: list of str, sorted blob names, of length limit or shorter.
|
|
159
|
-
"""
|
|
160
|
-
|
|
161
|
-
if sas_token is not None and len(sas_token) > 9 and sas_token[0] == '?':
|
|
162
|
-
sas_token = sas_token[1:]
|
|
163
|
-
|
|
164
|
-
container_uri = sas_blob_utils.build_azure_storage_uri(
|
|
165
|
-
account=account_name, container=container_name, sas_token=sas_token)
|
|
166
|
-
|
|
167
|
-
matched_blobs = sas_blob_utils.list_blobs_in_container(
|
|
168
|
-
container_uri=container_uri, blob_prefix=blob_prefix,
|
|
169
|
-
blob_suffix=blob_suffix, rsearch=rsearch, limit=limit, verbose=verbose)
|
|
170
|
-
|
|
171
|
-
if output_file is not None:
|
|
172
|
-
path_utils.write_list_to_file(output_file, matched_blobs)
|
|
173
|
-
|
|
174
|
-
return matched_blobs
|
|
@@ -1,262 +0,0 @@
|
|
|
1
|
-
########
|
|
2
|
-
#
|
|
3
|
-
# ct_utils.py
|
|
4
|
-
#
|
|
5
|
-
# Utility functions that don't depend on other things in this repo.
|
|
6
|
-
#
|
|
7
|
-
########
|
|
8
|
-
|
|
9
|
-
#%% Imports and constants
|
|
10
|
-
|
|
11
|
-
import argparse
|
|
12
|
-
import inspect
|
|
13
|
-
import json
|
|
14
|
-
import math
|
|
15
|
-
import os
|
|
16
|
-
|
|
17
|
-
import jsonpickle
|
|
18
|
-
import numpy as np
|
|
19
|
-
|
|
20
|
-
# List of file extensions we'll consider images; comparisons will be case-insensitive
|
|
21
|
-
# (i.e., no need to include both .jpg and .JPG on this list).
|
|
22
|
-
image_extensions = ['.jpg', '.jpeg', '.gif', '.png']
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
#%% Functions
|
|
26
|
-
|
|
27
|
-
def truncate_float_array(xs, precision=3):
|
|
28
|
-
"""
|
|
29
|
-
Vectorized version of truncate_float(...)
|
|
30
|
-
|
|
31
|
-
Args:
|
|
32
|
-
xs (list of float) List of floats to truncate
|
|
33
|
-
precision (int) The number of significant digits to preserve, should be
|
|
34
|
-
greater or equal 1
|
|
35
|
-
"""
|
|
36
|
-
|
|
37
|
-
return [truncate_float(x, precision=precision) for x in xs]
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
def truncate_float(x, precision=3):
|
|
41
|
-
"""
|
|
42
|
-
Truncates a floating-point value to a specific number of significant digits.
|
|
43
|
-
|
|
44
|
-
For example: truncate_float(0.0003214884) --> 0.000321
|
|
45
|
-
|
|
46
|
-
This function is primarily used to achieve a certain float representation
|
|
47
|
-
before exporting to JSON.
|
|
48
|
-
|
|
49
|
-
Args:
|
|
50
|
-
x (float) Scalar to truncate
|
|
51
|
-
precision (int) The number of significant digits to preserve, should be
|
|
52
|
-
greater or equal 1
|
|
53
|
-
"""
|
|
54
|
-
|
|
55
|
-
assert precision > 0
|
|
56
|
-
|
|
57
|
-
if np.isclose(x, 0):
|
|
58
|
-
|
|
59
|
-
return 0
|
|
60
|
-
|
|
61
|
-
else:
|
|
62
|
-
|
|
63
|
-
# Determine the factor, which shifts the decimal point of x
|
|
64
|
-
# just behind the last significant digit.
|
|
65
|
-
factor = math.pow(10, precision - 1 - math.floor(math.log10(abs(x))))
|
|
66
|
-
|
|
67
|
-
# Shift decimal point by multiplicatipon with factor, flooring, and
|
|
68
|
-
# division by factor.
|
|
69
|
-
return math.floor(x * factor)/factor
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
def args_to_object(args: argparse.Namespace, obj: object) -> None:
|
|
73
|
-
"""
|
|
74
|
-
Copies all fields from a Namespace (i.e., the output from parse_args) to an
|
|
75
|
-
object. Skips fields starting with _. Does not check existence in the target
|
|
76
|
-
object.
|
|
77
|
-
|
|
78
|
-
Args:
|
|
79
|
-
args: argparse.Namespace
|
|
80
|
-
obj: class or object whose whose attributes will be updated
|
|
81
|
-
"""
|
|
82
|
-
|
|
83
|
-
for n, v in inspect.getmembers(args):
|
|
84
|
-
if not n.startswith('_'):
|
|
85
|
-
setattr(obj, n, v)
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
def pretty_print_object(obj, b_print=True):
|
|
89
|
-
"""
|
|
90
|
-
Prints an arbitrary object as .json
|
|
91
|
-
"""
|
|
92
|
-
|
|
93
|
-
# _ = pretty_print_object(obj)
|
|
94
|
-
|
|
95
|
-
# Sloppy that I'm making a module-wide change here...
|
|
96
|
-
jsonpickle.set_encoder_options('json', sort_keys=True, indent=2)
|
|
97
|
-
a = jsonpickle.encode(obj)
|
|
98
|
-
s = '{}'.format(a)
|
|
99
|
-
if b_print:
|
|
100
|
-
print(s)
|
|
101
|
-
return s
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
def is_list_sorted(L,reverse=False):
|
|
105
|
-
"""
|
|
106
|
-
Returns true if the list L appears to be sorted, otherwise False.
|
|
107
|
-
|
|
108
|
-
Calling is_list_sorted(L,reverse=True) is the same as calling
|
|
109
|
-
is_list_sorted(L.reverse(),reverse=False).
|
|
110
|
-
"""
|
|
111
|
-
|
|
112
|
-
if reverse:
|
|
113
|
-
return all(L[i] >= L[i + 1] for i in range(len(L)-1))
|
|
114
|
-
else:
|
|
115
|
-
return all(L[i] <= L[i + 1] for i in range(len(L)-1))
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
def write_json(path, content, indent=1):
|
|
119
|
-
"""
|
|
120
|
-
Standardized wrapper for json.dump
|
|
121
|
-
"""
|
|
122
|
-
|
|
123
|
-
with open(path, 'w') as f:
|
|
124
|
-
json.dump(content, f, indent=indent)
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
def is_image_file(s):
|
|
128
|
-
"""
|
|
129
|
-
Checks a file's extension against a hard-coded set of image file extensions;
|
|
130
|
-
return True if it appears to be an image.
|
|
131
|
-
"""
|
|
132
|
-
|
|
133
|
-
ext = os.path.splitext(s)[1]
|
|
134
|
-
return ext.lower() in image_extensions
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
def convert_yolo_to_xywh(yolo_box):
|
|
138
|
-
"""
|
|
139
|
-
Converts a YOLO format bounding box to [x_min, y_min, width_of_box, height_of_box].
|
|
140
|
-
|
|
141
|
-
Args:
|
|
142
|
-
yolo_box: bounding box of format [x_center, y_center, width_of_box, height_of_box].
|
|
143
|
-
|
|
144
|
-
Returns:
|
|
145
|
-
bbox with coordinates represented as [x_min, y_min, width_of_box, height_of_box].
|
|
146
|
-
"""
|
|
147
|
-
|
|
148
|
-
x_center, y_center, width_of_box, height_of_box = yolo_box
|
|
149
|
-
x_min = x_center - width_of_box / 2.0
|
|
150
|
-
y_min = y_center - height_of_box / 2.0
|
|
151
|
-
return [x_min, y_min, width_of_box, height_of_box]
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
def convert_xywh_to_tf(api_box):
|
|
155
|
-
"""
|
|
156
|
-
Converts an xywh bounding box to an [y_min, x_min, y_max, x_max] box that the TensorFlow
|
|
157
|
-
Object Detection API uses
|
|
158
|
-
|
|
159
|
-
Args:
|
|
160
|
-
api_box: bbox output by the batch processing API [x_min, y_min, width_of_box, height_of_box]
|
|
161
|
-
|
|
162
|
-
Returns:
|
|
163
|
-
bbox with coordinates represented as [y_min, x_min, y_max, x_max]
|
|
164
|
-
"""
|
|
165
|
-
|
|
166
|
-
x_min, y_min, width_of_box, height_of_box = api_box
|
|
167
|
-
x_max = x_min + width_of_box
|
|
168
|
-
y_max = y_min + height_of_box
|
|
169
|
-
return [y_min, x_min, y_max, x_max]
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
def convert_xywh_to_xyxy(api_bbox):
|
|
173
|
-
"""
|
|
174
|
-
Converts an xywh bounding box to an xyxy bounding box.
|
|
175
|
-
|
|
176
|
-
Note that this is also different from the TensorFlow Object Detection API coords format.
|
|
177
|
-
Args:
|
|
178
|
-
api_bbox: bbox output by the batch processing API [x_min, y_min, width_of_box, height_of_box]
|
|
179
|
-
|
|
180
|
-
Returns:
|
|
181
|
-
bbox with coordinates represented as [x_min, y_min, x_max, y_max]
|
|
182
|
-
"""
|
|
183
|
-
|
|
184
|
-
x_min, y_min, width_of_box, height_of_box = api_bbox
|
|
185
|
-
x_max, y_max = x_min + width_of_box, y_min + height_of_box
|
|
186
|
-
return [x_min, y_min, x_max, y_max]
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
def get_iou(bb1, bb2):
|
|
190
|
-
"""
|
|
191
|
-
Calculates the Intersection over Union (IoU) of two bounding boxes.
|
|
192
|
-
|
|
193
|
-
Adapted from:
|
|
194
|
-
|
|
195
|
-
https://stackoverflow.com/questions/25349178/calculating-percentage-of-bounding-box-overlap-for-image-detector-evaluation
|
|
196
|
-
|
|
197
|
-
Args:
|
|
198
|
-
bb1: [x_min, y_min, width_of_box, height_of_box]
|
|
199
|
-
bb2: [x_min, y_min, width_of_box, height_of_box]
|
|
200
|
-
|
|
201
|
-
Returns:
|
|
202
|
-
intersection_over_union, a float in [0, 1]
|
|
203
|
-
"""
|
|
204
|
-
|
|
205
|
-
bb1 = convert_xywh_to_xyxy(bb1)
|
|
206
|
-
bb2 = convert_xywh_to_xyxy(bb2)
|
|
207
|
-
|
|
208
|
-
assert bb1[0] < bb1[2], 'Malformed bounding box (x2 >= x1)'
|
|
209
|
-
assert bb1[1] < bb1[3], 'Malformed bounding box (y2 >= y1)'
|
|
210
|
-
|
|
211
|
-
assert bb2[0] < bb2[2], 'Malformed bounding box (x2 >= x1)'
|
|
212
|
-
assert bb2[1] < bb2[3], 'Malformed bounding box (y2 >= y1)'
|
|
213
|
-
|
|
214
|
-
# Determine the coordinates of the intersection rectangle
|
|
215
|
-
x_left = max(bb1[0], bb2[0])
|
|
216
|
-
y_top = max(bb1[1], bb2[1])
|
|
217
|
-
x_right = min(bb1[2], bb2[2])
|
|
218
|
-
y_bottom = min(bb1[3], bb2[3])
|
|
219
|
-
|
|
220
|
-
if x_right < x_left or y_bottom < y_top:
|
|
221
|
-
return 0.0
|
|
222
|
-
|
|
223
|
-
# The intersection of two axis-aligned bounding boxes is always an
|
|
224
|
-
# axis-aligned bounding box
|
|
225
|
-
intersection_area = (x_right - x_left) * (y_bottom - y_top)
|
|
226
|
-
|
|
227
|
-
# Compute the area of both AABBs
|
|
228
|
-
bb1_area = (bb1[2] - bb1[0]) * (bb1[3] - bb1[1])
|
|
229
|
-
bb2_area = (bb2[2] - bb2[0]) * (bb2[3] - bb2[1])
|
|
230
|
-
|
|
231
|
-
# Compute the intersection over union by taking the intersection
|
|
232
|
-
# area and dividing it by the sum of prediction + ground-truth
|
|
233
|
-
# areas - the intersection area.
|
|
234
|
-
iou = intersection_area / float(bb1_area + bb2_area - intersection_area)
|
|
235
|
-
assert iou >= 0.0, 'Illegal IOU < 0'
|
|
236
|
-
assert iou <= 1.0, 'Illegal IOU > 1'
|
|
237
|
-
return iou
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
def _get_max_conf_from_detections(detections):
|
|
241
|
-
"""
|
|
242
|
-
Internal function used by get_max_conf(); don't call this directly.
|
|
243
|
-
"""
|
|
244
|
-
|
|
245
|
-
max_conf = 0.0
|
|
246
|
-
if detections is not None and len(detections) > 0:
|
|
247
|
-
confidences = [det['conf'] for det in detections]
|
|
248
|
-
max_conf = max(confidences)
|
|
249
|
-
return max_conf
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
def get_max_conf(im):
|
|
253
|
-
"""
|
|
254
|
-
Given an image dict in the format used by the batch API, compute the maximum detection
|
|
255
|
-
confidence for any class. Returns 0.0 (not None) if there was a failure and 'detections'
|
|
256
|
-
isn't present.
|
|
257
|
-
"""
|
|
258
|
-
|
|
259
|
-
max_conf = 0.0
|
|
260
|
-
if 'detections' in im and im['detections'] is not None and len(im['detections']) > 0:
|
|
261
|
-
max_conf = _get_max_conf_from_detections(im['detections'])
|
|
262
|
-
return max_conf
|
|
@@ -1,251 +0,0 @@
|
|
|
1
|
-
########
|
|
2
|
-
#
|
|
3
|
-
# directory_listing.py
|
|
4
|
-
#
|
|
5
|
-
# Script for creating HTML directory listings for a local directory and
|
|
6
|
-
# all its subdirectories. Primarily intended for use on mounted blob
|
|
7
|
-
# containers, so it includes the ability to set the content-type property
|
|
8
|
-
# on the generated html using the blob SDK, so it can be browser-viewable.
|
|
9
|
-
#
|
|
10
|
-
# Also includes a preview of a jpg file (the first in an alphabetical list),
|
|
11
|
-
# if present.
|
|
12
|
-
#
|
|
13
|
-
# Sample invocation:
|
|
14
|
-
#
|
|
15
|
-
# python directory_listing.py /naipout/v002 --basepath /naipout/v002 \
|
|
16
|
-
# --enable_overwrite \
|
|
17
|
-
# --sas_url "https://naipblobs.blob.core.windows.net/naip/v002?sv=..."
|
|
18
|
-
#
|
|
19
|
-
########
|
|
20
|
-
|
|
21
|
-
#%% Imports
|
|
22
|
-
|
|
23
|
-
import os
|
|
24
|
-
import sys
|
|
25
|
-
import argparse
|
|
26
|
-
import re
|
|
27
|
-
|
|
28
|
-
import azure.common
|
|
29
|
-
from azure.storage.blob import BlobServiceClient, ContentSettings
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
#%% Directory enumeration functions
|
|
33
|
-
|
|
34
|
-
def is_image_file(path):
|
|
35
|
-
"""
|
|
36
|
-
Checks whether the provided file path points to an image by checking the
|
|
37
|
-
file extension. The following file extensions are considered images: jpg,
|
|
38
|
-
jpeg. The check is case-insensitive.
|
|
39
|
-
|
|
40
|
-
Args:
|
|
41
|
-
path: string, path to image file or just the file name
|
|
42
|
-
|
|
43
|
-
Returns:
|
|
44
|
-
boolean, True if the file is an image
|
|
45
|
-
"""
|
|
46
|
-
|
|
47
|
-
return os.path.splitext(path)[1].lower()[1:] in ['jpg', 'jpeg'] #, 'gif', 'tiff', 'tif', 'png']
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
def create_plain_index(root, dirs, files, dirname=None):
|
|
51
|
-
"""
|
|
52
|
-
Creates the fairly plain HTML folder index
|
|
53
|
-
including a preview of a single image file, if any is present.
|
|
54
|
-
Returns the HTML source as string.
|
|
55
|
-
|
|
56
|
-
Args:
|
|
57
|
-
root: string, path to the root directory, all paths in *dirs* and
|
|
58
|
-
*files* are relative to this one
|
|
59
|
-
dirs: list of strings, the directories in *root*
|
|
60
|
-
files: list of string, the files in *root*
|
|
61
|
-
dirname: name to print in the html, which may be different than *root*
|
|
62
|
-
|
|
63
|
-
Returns: HTML source of the directory listing
|
|
64
|
-
"""
|
|
65
|
-
|
|
66
|
-
if dirname is None:
|
|
67
|
-
dirname = root or '/'
|
|
68
|
-
|
|
69
|
-
html = "<!DOCTYPE html>\n"
|
|
70
|
-
html += "<html lang='en'><head>"
|
|
71
|
-
html += "<title>Index of {}</title>\n".format(dirname)
|
|
72
|
-
html += "<meta charset='UTF-8'>\n"
|
|
73
|
-
html += "<style>\n"
|
|
74
|
-
html += "body { font-family: Segoe UI, Helvetica, Arial, sans-serif; }\n"
|
|
75
|
-
html += "a { text-decoration:none; }\n"
|
|
76
|
-
html += "</style>\n"
|
|
77
|
-
html += "</head><body>\n"
|
|
78
|
-
|
|
79
|
-
html += "<h1>Index of {}</h1>\n".format(dirname)
|
|
80
|
-
|
|
81
|
-
# Insert preview image
|
|
82
|
-
jpg_files = [f for f in files if is_image_file(f)]
|
|
83
|
-
|
|
84
|
-
if len(jpg_files) > 0:
|
|
85
|
-
|
|
86
|
-
# This is slow, so defaulting to just the first image:
|
|
87
|
-
#
|
|
88
|
-
# Use the largest image file as this is most likely to contain
|
|
89
|
-
# useful content.
|
|
90
|
-
#
|
|
91
|
-
# jpg_file_sizes = [os.path.getsize(f) for f in jpg_files]
|
|
92
|
-
# largest_file_index = max(range(len(jpg_files)), key=lambda x: jpg_file_sizes[x])
|
|
93
|
-
|
|
94
|
-
html += "<a href='{0}'><img style='height:200px; float:right;' src='{0}' alt='Preview image'></a>\n".format(jpg_files[0])
|
|
95
|
-
else:
|
|
96
|
-
html += "\n"
|
|
97
|
-
# html += "<p style='width:15em; float:right; margin:0;'>[No preview available]</p>\n"
|
|
98
|
-
|
|
99
|
-
if root:
|
|
100
|
-
html += "<p><a href='../index.html'>To parent directory</a></p>\n"
|
|
101
|
-
else:
|
|
102
|
-
html += "\n"
|
|
103
|
-
# html += "<p>This is the root directory.</p>\n"
|
|
104
|
-
|
|
105
|
-
html += "<h2>Folders</h2>\n"
|
|
106
|
-
if len(dirs) > 0:
|
|
107
|
-
html += "<ul style='list-style-type: none; padding-left:1em;'>\n"
|
|
108
|
-
for dir in sorted(dirs):
|
|
109
|
-
html += "<li>📁 <a href='{0}/index.html'>{0}</a></li>\n".format(dir)
|
|
110
|
-
html += "</ul>\n"
|
|
111
|
-
else:
|
|
112
|
-
html += "<p style='padding-left:1em;'>No folders</p>\n"
|
|
113
|
-
|
|
114
|
-
html += "<h2>Files</h2>\n"
|
|
115
|
-
if len(files) > 0:
|
|
116
|
-
html += "<ul style='list-style-type: none; padding-left:1em;'>\n"
|
|
117
|
-
for fname in sorted(files):
|
|
118
|
-
if is_image_file(fname):
|
|
119
|
-
html += "<li>🖻 <a href='{0}'>{0}</a></li>\n".format(fname)
|
|
120
|
-
else:
|
|
121
|
-
html += "<li>🖺 <a href='{0}'>{0}</a></li>\n".format(fname)
|
|
122
|
-
html += "</ul>\n"
|
|
123
|
-
else:
|
|
124
|
-
html += "<p style = 'padding-left:1em;'>No files</p>\n"
|
|
125
|
-
|
|
126
|
-
# Add some space at the bottom because the browser's status bar might hide stuff
|
|
127
|
-
html += "<p style='margin:2em;'> </p>\n"
|
|
128
|
-
html += "</body></html>\n"
|
|
129
|
-
return html
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
def traverse_and_create_index(dir, sas_url=None, overwrite_files=False,
|
|
133
|
-
template_fun=create_plain_index, basepath=None):
|
|
134
|
-
"""
|
|
135
|
-
Recursively traverses the local directory *dir* and generates a index
|
|
136
|
-
file for each folder using *template_fun* to generate the HTML output.
|
|
137
|
-
Excludes hidden files.
|
|
138
|
-
|
|
139
|
-
Args:
|
|
140
|
-
dir: string, path to directory
|
|
141
|
-
template_fun: function taking three arguments (string, list of string, list of string)
|
|
142
|
-
representing the current root, the list of folders, and the list of files.
|
|
143
|
-
Should return the HTML source of the index file
|
|
144
|
-
|
|
145
|
-
Return:
|
|
146
|
-
None
|
|
147
|
-
"""
|
|
148
|
-
|
|
149
|
-
print("Traversing {}".format(dir))
|
|
150
|
-
|
|
151
|
-
# Make sure we remove the trailing /
|
|
152
|
-
dir = os.path.normpath(dir)
|
|
153
|
-
|
|
154
|
-
# If we want to set the content type in blob storage using a SAS URL
|
|
155
|
-
if sas_url:
|
|
156
|
-
|
|
157
|
-
# Example: sas_url = 'https://accname.blob.core.windows.net/bname/path/to/folder?st=...&se=...&sp=...&...'
|
|
158
|
-
if '?' in sas_url:
|
|
159
|
-
# 'https://accname.blob.core.windows.net/bname/path/to/folder' and 'st=...&se=...&sp=...&...'
|
|
160
|
-
base_url, sas_token = sas_url.split('?', 1)
|
|
161
|
-
else:
|
|
162
|
-
# 'https://accname.blob.core.windows.net/bname/path/to/folder' and None
|
|
163
|
-
base_url, sas_token = sas_url, None
|
|
164
|
-
# Remove https:// from base url
|
|
165
|
-
# 'accname.blob.core.windows.net/bname/path/to/folder'
|
|
166
|
-
base_url = base_url.split("//", 1)[1]
|
|
167
|
-
# Everything up to the first dot is account name
|
|
168
|
-
# 'accname'
|
|
169
|
-
account_name = base_url.split(".", 1)[0]
|
|
170
|
-
# get everything after the first /
|
|
171
|
-
# 'bname/path/to/folder'
|
|
172
|
-
query_string = base_url.split("/", 1)[1]
|
|
173
|
-
# Get container name and subfolder
|
|
174
|
-
if '/' in query_string:
|
|
175
|
-
# 'bname', 'path/to/folder'
|
|
176
|
-
container_name, container_folder = query_string.split("/", 1)
|
|
177
|
-
else:
|
|
178
|
-
container_name, container_folder = query_string, ''
|
|
179
|
-
|
|
180
|
-
# Prepare the storage access
|
|
181
|
-
target_settings = ContentSettings(content_type='text/html')
|
|
182
|
-
blob_service = BlobServiceClient(
|
|
183
|
-
account_url=f'{account_name}.blob.core.windows.net',
|
|
184
|
-
credential=sas_token)
|
|
185
|
-
|
|
186
|
-
# Traverse directory and all sub directories, excluding hidden files
|
|
187
|
-
for root, dirs, files in os.walk(dir):
|
|
188
|
-
|
|
189
|
-
# Exclude files and folders that are hidden
|
|
190
|
-
files = [f for f in files if not f[0] == '.']
|
|
191
|
-
dirs[:] = [d for d in dirs if not d[0] == '.']
|
|
192
|
-
|
|
193
|
-
# Output is written to file *root*/index.html
|
|
194
|
-
output_file = os.path.join(root, "index.html")
|
|
195
|
-
|
|
196
|
-
if not overwrite_files and os.path.isfile(output_file):
|
|
197
|
-
print('Skipping {}, file exists'.format(output_file))
|
|
198
|
-
continue
|
|
199
|
-
|
|
200
|
-
print("Generating {}".format(output_file))
|
|
201
|
-
|
|
202
|
-
# Generate HTML with template function
|
|
203
|
-
dirname = None
|
|
204
|
-
if basepath is not None:
|
|
205
|
-
dirname = os.path.relpath(root,basepath)
|
|
206
|
-
html = template_fun(root[len(dir):], dirs, files, dirname)
|
|
207
|
-
|
|
208
|
-
# Write to file
|
|
209
|
-
with open(output_file, 'wt') as fi:
|
|
210
|
-
fi.write(html)
|
|
211
|
-
|
|
212
|
-
# Set content type in blob storage
|
|
213
|
-
if sas_url:
|
|
214
|
-
if container_folder:
|
|
215
|
-
output_blob_path = container_folder + '/' + output_file[len(dir) + 1:]
|
|
216
|
-
else:
|
|
217
|
-
output_blob_path = output_file[len(dir) + 1:]
|
|
218
|
-
try:
|
|
219
|
-
blob_client = blob_service.get_blob_client(container_name, output_blob_path)
|
|
220
|
-
blob_client.set_http_headers(content_settings=target_settings)
|
|
221
|
-
except azure.common.AzureMissingResourceHttpError:
|
|
222
|
-
print('ERROR: It seems the SAS URL is incorrect or does not allow setting properties.')
|
|
223
|
-
return
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
#%% Command-line driver
|
|
227
|
-
|
|
228
|
-
def parse_args(argv=sys.argv[1:]):
|
|
229
|
-
|
|
230
|
-
parser = argparse.ArgumentParser()
|
|
231
|
-
parser.add_argument("directory", type=str, help='Path to directory which should be traversed.')
|
|
232
|
-
parser.add_argument("--basepath", type=str, help='Folder names will be printed relative to basepath, if specified', default=None)
|
|
233
|
-
parser.add_argument("--sas_url", type=str, help='Blobfuse does not set the content-type property ' + \
|
|
234
|
-
'properly and hence index.html won\'t be accessible in the browser. If you want to set the ' + \
|
|
235
|
-
'content-type in the corresponding blob storage, provide the SAS URL that corresponds to the ' + \
|
|
236
|
-
"directory, e.g. if *directory* is /mountpoint/path/to/folder, then *--sas_url* looks like " + \
|
|
237
|
-
"'https://accname.blob.core.windows.net/bname/path/to/folder?st=...&se=...&sp=...&...'")
|
|
238
|
-
parser.add_argument("--enable_overwrite", action='store_true', default=False,
|
|
239
|
-
help='If set, the script will overwrite existing index.html files.')
|
|
240
|
-
return parser.parse_args(argv)
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
if __name__ == '__main__':
|
|
244
|
-
|
|
245
|
-
args = parse_args()
|
|
246
|
-
|
|
247
|
-
assert os.path.isdir(args.directory), "{} is not a valid directory".format(args.directory)
|
|
248
|
-
assert re.match('https?://[^\.]+\.blob\.core\.windows\.net/.+', args.sas_url), "--sas_url does not " + \
|
|
249
|
-
"match the format https://accname.blob.core.windows.net/bname/path/to/folder?..."
|
|
250
|
-
|
|
251
|
-
traverse_and_create_index(args.directory, overwrite_files=args.enable_overwrite, sas_url=args.sas_url, basepath=args.basepath)
|