megadetector 5.0.8__py3-none-any.whl → 5.0.10__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of megadetector might be problematic. Click here for more details.
- api/__init__.py +0 -0
- api/batch_processing/__init__.py +0 -0
- api/batch_processing/api_core/__init__.py +0 -0
- api/batch_processing/api_core/batch_service/__init__.py +0 -0
- api/batch_processing/api_core/batch_service/score.py +0 -1
- api/batch_processing/api_core/server_job_status_table.py +0 -1
- api/batch_processing/api_core_support/__init__.py +0 -0
- api/batch_processing/api_core_support/aggregate_results_manually.py +0 -1
- api/batch_processing/api_support/__init__.py +0 -0
- api/batch_processing/api_support/summarize_daily_activity.py +0 -1
- api/batch_processing/data_preparation/__init__.py +0 -0
- api/batch_processing/data_preparation/manage_local_batch.py +65 -65
- api/batch_processing/data_preparation/manage_video_batch.py +8 -8
- api/batch_processing/integration/digiKam/xmp_integration.py +0 -1
- api/batch_processing/integration/eMammal/test_scripts/push_annotations_to_emammal.py +0 -1
- api/batch_processing/postprocessing/__init__.py +0 -0
- api/batch_processing/postprocessing/add_max_conf.py +12 -12
- api/batch_processing/postprocessing/categorize_detections_by_size.py +32 -14
- api/batch_processing/postprocessing/combine_api_outputs.py +68 -54
- api/batch_processing/postprocessing/compare_batch_results.py +113 -43
- api/batch_processing/postprocessing/convert_output_format.py +41 -16
- api/batch_processing/postprocessing/load_api_results.py +16 -17
- api/batch_processing/postprocessing/md_to_coco.py +31 -21
- api/batch_processing/postprocessing/md_to_labelme.py +52 -22
- api/batch_processing/postprocessing/merge_detections.py +14 -14
- api/batch_processing/postprocessing/postprocess_batch_results.py +246 -174
- api/batch_processing/postprocessing/remap_detection_categories.py +32 -25
- api/batch_processing/postprocessing/render_detection_confusion_matrix.py +60 -27
- api/batch_processing/postprocessing/repeat_detection_elimination/find_repeat_detections.py +53 -44
- api/batch_processing/postprocessing/repeat_detection_elimination/remove_repeat_detections.py +25 -14
- api/batch_processing/postprocessing/repeat_detection_elimination/repeat_detections_core.py +242 -158
- api/batch_processing/postprocessing/separate_detections_into_folders.py +159 -114
- api/batch_processing/postprocessing/subset_json_detector_output.py +146 -169
- api/batch_processing/postprocessing/top_folders_to_bottom.py +77 -43
- api/synchronous/__init__.py +0 -0
- api/synchronous/api_core/animal_detection_api/__init__.py +0 -0
- api/synchronous/api_core/animal_detection_api/api_backend.py +0 -2
- api/synchronous/api_core/animal_detection_api/api_frontend.py +266 -268
- api/synchronous/api_core/animal_detection_api/config.py +35 -35
- api/synchronous/api_core/tests/__init__.py +0 -0
- api/synchronous/api_core/tests/load_test.py +109 -109
- classification/__init__.py +0 -0
- classification/aggregate_classifier_probs.py +21 -24
- classification/analyze_failed_images.py +11 -13
- classification/cache_batchapi_outputs.py +51 -51
- classification/create_classification_dataset.py +69 -68
- classification/crop_detections.py +54 -53
- classification/csv_to_json.py +97 -100
- classification/detect_and_crop.py +105 -105
- classification/evaluate_model.py +43 -42
- classification/identify_mislabeled_candidates.py +47 -46
- classification/json_to_azcopy_list.py +10 -10
- classification/json_validator.py +72 -71
- classification/map_classification_categories.py +44 -43
- classification/merge_classification_detection_output.py +68 -68
- classification/prepare_classification_script.py +157 -154
- classification/prepare_classification_script_mc.py +228 -228
- classification/run_classifier.py +27 -26
- classification/save_mislabeled.py +30 -30
- classification/train_classifier.py +20 -20
- classification/train_classifier_tf.py +21 -22
- classification/train_utils.py +10 -10
- data_management/__init__.py +0 -0
- data_management/annotations/__init__.py +0 -0
- data_management/annotations/annotation_constants.py +18 -31
- data_management/camtrap_dp_to_coco.py +238 -0
- data_management/cct_json_utils.py +102 -59
- data_management/cct_to_md.py +176 -158
- data_management/cct_to_wi.py +247 -219
- data_management/coco_to_labelme.py +272 -263
- data_management/coco_to_yolo.py +79 -58
- data_management/databases/__init__.py +0 -0
- data_management/databases/add_width_and_height_to_db.py +20 -16
- data_management/databases/combine_coco_camera_traps_files.py +35 -31
- data_management/databases/integrity_check_json_db.py +62 -24
- data_management/databases/subset_json_db.py +24 -15
- data_management/generate_crops_from_cct.py +27 -45
- data_management/get_image_sizes.py +188 -162
- data_management/importers/add_nacti_sizes.py +8 -8
- data_management/importers/add_timestamps_to_icct.py +78 -78
- data_management/importers/animl_results_to_md_results.py +158 -158
- data_management/importers/auckland_doc_test_to_json.py +9 -9
- data_management/importers/auckland_doc_to_json.py +8 -8
- data_management/importers/awc_to_json.py +7 -7
- data_management/importers/bellevue_to_json.py +15 -15
- data_management/importers/cacophony-thermal-importer.py +13 -13
- data_management/importers/carrizo_shrubfree_2018.py +8 -8
- data_management/importers/carrizo_trail_cam_2017.py +8 -8
- data_management/importers/cct_field_adjustments.py +9 -9
- data_management/importers/channel_islands_to_cct.py +10 -10
- data_management/importers/eMammal/copy_and_unzip_emammal.py +1 -0
- data_management/importers/ena24_to_json.py +7 -7
- data_management/importers/filenames_to_json.py +8 -8
- data_management/importers/helena_to_cct.py +7 -7
- data_management/importers/idaho-camera-traps.py +7 -7
- data_management/importers/idfg_iwildcam_lila_prep.py +10 -10
- data_management/importers/jb_csv_to_json.py +9 -9
- data_management/importers/mcgill_to_json.py +8 -8
- data_management/importers/missouri_to_json.py +18 -18
- data_management/importers/nacti_fieldname_adjustments.py +10 -10
- data_management/importers/noaa_seals_2019.py +7 -7
- data_management/importers/pc_to_json.py +7 -7
- data_management/importers/plot_wni_giraffes.py +7 -7
- data_management/importers/prepare-noaa-fish-data-for-lila.py +359 -359
- data_management/importers/prepare_zsl_imerit.py +7 -7
- data_management/importers/rspb_to_json.py +8 -8
- data_management/importers/save_the_elephants_survey_A.py +8 -8
- data_management/importers/save_the_elephants_survey_B.py +9 -9
- data_management/importers/snapshot_safari_importer.py +26 -26
- data_management/importers/snapshot_safari_importer_reprise.py +665 -665
- data_management/importers/snapshot_serengeti_lila.py +14 -14
- data_management/importers/sulross_get_exif.py +8 -9
- data_management/importers/timelapse_csv_set_to_json.py +11 -11
- data_management/importers/ubc_to_json.py +13 -13
- data_management/importers/umn_to_json.py +7 -7
- data_management/importers/wellington_to_json.py +8 -8
- data_management/importers/wi_to_json.py +9 -9
- data_management/importers/zamba_results_to_md_results.py +181 -181
- data_management/labelme_to_coco.py +65 -24
- data_management/labelme_to_yolo.py +8 -8
- data_management/lila/__init__.py +0 -0
- data_management/lila/add_locations_to_island_camera_traps.py +9 -9
- data_management/lila/add_locations_to_nacti.py +147 -147
- data_management/lila/create_lila_blank_set.py +13 -13
- data_management/lila/create_lila_test_set.py +8 -8
- data_management/lila/create_links_to_md_results_files.py +106 -106
- data_management/lila/download_lila_subset.py +44 -110
- data_management/lila/generate_lila_per_image_labels.py +55 -42
- data_management/lila/get_lila_annotation_counts.py +18 -15
- data_management/lila/get_lila_image_counts.py +11 -11
- data_management/lila/lila_common.py +96 -33
- data_management/lila/test_lila_metadata_urls.py +132 -116
- data_management/ocr_tools.py +173 -128
- data_management/read_exif.py +110 -97
- data_management/remap_coco_categories.py +83 -83
- data_management/remove_exif.py +58 -62
- data_management/resize_coco_dataset.py +30 -23
- data_management/wi_download_csv_to_coco.py +246 -239
- data_management/yolo_output_to_md_output.py +86 -73
- data_management/yolo_to_coco.py +300 -60
- detection/__init__.py +0 -0
- detection/detector_training/__init__.py +0 -0
- detection/process_video.py +85 -33
- detection/pytorch_detector.py +43 -25
- detection/run_detector.py +157 -72
- detection/run_detector_batch.py +179 -113
- detection/run_inference_with_yolov5_val.py +108 -48
- detection/run_tiled_inference.py +111 -40
- detection/tf_detector.py +51 -29
- detection/video_utils.py +606 -521
- docs/source/conf.py +43 -0
- md_utils/__init__.py +0 -0
- md_utils/azure_utils.py +9 -9
- md_utils/ct_utils.py +228 -68
- md_utils/directory_listing.py +59 -64
- md_utils/md_tests.py +968 -871
- md_utils/path_utils.py +460 -134
- md_utils/process_utils.py +157 -133
- md_utils/sas_blob_utils.py +20 -20
- md_utils/split_locations_into_train_val.py +45 -32
- md_utils/string_utils.py +33 -10
- md_utils/url_utils.py +176 -60
- md_utils/write_html_image_list.py +40 -33
- md_visualization/__init__.py +0 -0
- md_visualization/plot_utils.py +102 -109
- md_visualization/render_images_with_thumbnails.py +34 -34
- md_visualization/visualization_utils.py +597 -291
- md_visualization/visualize_db.py +76 -48
- md_visualization/visualize_detector_output.py +61 -42
- {megadetector-5.0.8.dist-info → megadetector-5.0.10.dist-info}/METADATA +13 -7
- megadetector-5.0.10.dist-info/RECORD +224 -0
- {megadetector-5.0.8.dist-info → megadetector-5.0.10.dist-info}/top_level.txt +1 -0
- taxonomy_mapping/__init__.py +0 -0
- taxonomy_mapping/map_lila_taxonomy_to_wi_taxonomy.py +342 -335
- taxonomy_mapping/map_new_lila_datasets.py +154 -154
- taxonomy_mapping/prepare_lila_taxonomy_release.py +142 -134
- taxonomy_mapping/preview_lila_taxonomy.py +591 -591
- taxonomy_mapping/retrieve_sample_image.py +12 -12
- taxonomy_mapping/simple_image_download.py +11 -11
- taxonomy_mapping/species_lookup.py +10 -10
- taxonomy_mapping/taxonomy_csv_checker.py +18 -18
- taxonomy_mapping/taxonomy_graph.py +47 -47
- taxonomy_mapping/validate_lila_category_mappings.py +83 -76
- data_management/cct_json_to_filename_json.py +0 -89
- data_management/cct_to_csv.py +0 -140
- data_management/databases/remove_corrupted_images_from_db.py +0 -191
- detection/detector_training/copy_checkpoints.py +0 -43
- megadetector-5.0.8.dist-info/RECORD +0 -205
- {megadetector-5.0.8.dist-info → megadetector-5.0.10.dist-info}/LICENSE +0 -0
- {megadetector-5.0.8.dist-info → megadetector-5.0.10.dist-info}/WHEEL +0 -0
|
@@ -1,191 +1,194 @@
|
|
|
1
|
-
|
|
2
|
-
|
|
3
|
-
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
1
|
+
"""
|
|
2
|
+
|
|
3
|
+
prepare_classification_script.py
|
|
4
|
+
|
|
5
|
+
Notebook-y script used to prepare a series of shell commands to run a classifier
|
|
6
|
+
(other than MegaClassifier) on a MegaDetector result set.
|
|
7
|
+
|
|
8
|
+
Differs from prepare_classification_script_mc.py only in the final class mapping step.
|
|
9
|
+
|
|
10
|
+
"""
|
|
11
11
|
|
|
12
12
|
#%% Job options
|
|
13
13
|
|
|
14
14
|
import os
|
|
15
15
|
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
16
|
+
def main():
|
|
17
|
+
organization_name = 'idfg'
|
|
18
|
+
job_name = 'idfg-2022-01-27-EOE2021S_Group6'
|
|
19
|
+
input_filename = 'idfg-2022-01-27-EOE2021S_Group6_detections.filtered_rde_0.60_0.85_30_0.20.json'
|
|
20
|
+
image_base = '/datadrive/idfg/EOE2021S_Group6'
|
|
21
|
+
crop_path = os.path.join(os.path.expanduser('~/crops'),job_name + '_crops')
|
|
22
|
+
device_id = 1
|
|
22
23
|
|
|
23
|
-
working_dir_base = os.path.join(os.path.expanduser('~/postprocessing'),
|
|
24
|
-
|
|
25
|
-
|
|
24
|
+
working_dir_base = os.path.join(os.path.expanduser('~/postprocessing'),
|
|
25
|
+
organization_name,
|
|
26
|
+
job_name)
|
|
26
27
|
|
|
27
|
-
output_base = os.path.join(working_dir_base,'combined_api_outputs')
|
|
28
|
+
output_base = os.path.join(working_dir_base,'combined_api_outputs')
|
|
28
29
|
|
|
29
|
-
assert os.path.isdir(working_dir_base)
|
|
30
|
-
assert os.path.isdir(output_base)
|
|
30
|
+
assert os.path.isdir(working_dir_base)
|
|
31
|
+
assert os.path.isdir(output_base)
|
|
31
32
|
|
|
32
|
-
output_file = os.path.join(working_dir_base,'run_idfgclassifier_' + job_name + '.sh')
|
|
33
|
+
output_file = os.path.join(working_dir_base,'run_idfgclassifier_' + job_name + '.sh')
|
|
33
34
|
|
|
34
|
-
input_files = [
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
35
|
+
input_files = [
|
|
36
|
+
os.path.join(
|
|
37
|
+
os.path.expanduser('~/postprocessing'),
|
|
38
|
+
organization_name,
|
|
39
|
+
job_name,
|
|
40
|
+
'combined_api_outputs',
|
|
41
|
+
input_filename
|
|
42
|
+
)
|
|
43
|
+
]
|
|
43
44
|
|
|
44
|
-
for fn in input_files:
|
|
45
|
-
|
|
46
|
-
|
|
45
|
+
for fn in input_files:
|
|
46
|
+
assert os.path.isfile(fn)
|
|
47
|
+
|
|
47
48
|
|
|
48
|
-
#%% Constants
|
|
49
|
+
#%% Constants
|
|
49
50
|
|
|
50
|
-
include_cropping = False
|
|
51
|
+
include_cropping = False
|
|
51
52
|
|
|
52
|
-
classifier_base = os.path.expanduser('~/models/camera_traps/idfg_classifier/idfg_classifier_20200905_042558')
|
|
53
|
-
assert os.path.isdir(classifier_base)
|
|
53
|
+
classifier_base = os.path.expanduser('~/models/camera_traps/idfg_classifier/idfg_classifier_20200905_042558')
|
|
54
|
+
assert os.path.isdir(classifier_base)
|
|
54
55
|
|
|
55
|
-
checkpoint_path = os.path.join(classifier_base,'idfg_classifier_ckpt_14_compiled.pt')
|
|
56
|
-
assert os.path.isfile(checkpoint_path)
|
|
56
|
+
checkpoint_path = os.path.join(classifier_base,'idfg_classifier_ckpt_14_compiled.pt')
|
|
57
|
+
assert os.path.isfile(checkpoint_path)
|
|
57
58
|
|
|
58
|
-
classifier_categories_path = os.path.join(classifier_base,'label_index.json')
|
|
59
|
-
assert os.path.isfile(classifier_categories_path)
|
|
59
|
+
classifier_categories_path = os.path.join(classifier_base,'label_index.json')
|
|
60
|
+
assert os.path.isfile(classifier_categories_path)
|
|
60
61
|
|
|
61
|
-
classifier_output_suffix = '_idfg_classifier_output.csv.gz'
|
|
62
|
-
final_output_suffix = '_idfgclassifier.json'
|
|
62
|
+
classifier_output_suffix = '_idfg_classifier_output.csv.gz'
|
|
63
|
+
final_output_suffix = '_idfgclassifier.json'
|
|
63
64
|
|
|
64
|
-
threshold_str = '0.65'
|
|
65
|
-
n_threads_str = '50'
|
|
66
|
-
image_size_str = '300'
|
|
67
|
-
batch_size_str = '64'
|
|
68
|
-
num_workers_str = '8'
|
|
69
|
-
logdir = working_dir_base
|
|
65
|
+
threshold_str = '0.65'
|
|
66
|
+
n_threads_str = '50'
|
|
67
|
+
image_size_str = '300'
|
|
68
|
+
batch_size_str = '64'
|
|
69
|
+
num_workers_str = '8'
|
|
70
|
+
logdir = working_dir_base
|
|
70
71
|
|
|
71
|
-
classification_threshold_str = '0.05'
|
|
72
|
+
classification_threshold_str = '0.05'
|
|
72
73
|
|
|
73
|
-
# This is just passed along to the metadata in the output file, it has no impact
|
|
74
|
-
# on how the classification scripts run.
|
|
75
|
-
typical_classification_threshold_str = '0.75'
|
|
74
|
+
# This is just passed along to the metadata in the output file, it has no impact
|
|
75
|
+
# on how the classification scripts run.
|
|
76
|
+
typical_classification_threshold_str = '0.75'
|
|
76
77
|
|
|
77
|
-
classifier_name = 'idfg4'
|
|
78
|
-
|
|
78
|
+
classifier_name = 'idfg4'
|
|
79
|
+
|
|
79
80
|
|
|
80
|
-
#%% Set up environment
|
|
81
|
+
#%% Set up environment
|
|
81
82
|
|
|
82
|
-
commands = []
|
|
83
|
-
# commands.append('cd MegaDetector/classification\n')
|
|
84
|
-
# commands.append('conda activate cameratraps-classifier\n')
|
|
83
|
+
commands = []
|
|
84
|
+
# commands.append('cd MegaDetector/classification\n')
|
|
85
|
+
# commands.append('conda activate cameratraps-classifier\n')
|
|
85
86
|
|
|
86
87
|
|
|
87
|
-
#%% Crop images
|
|
88
|
+
#%% Crop images
|
|
89
|
+
|
|
90
|
+
if include_cropping:
|
|
91
|
+
|
|
92
|
+
commands.append('\n### Cropping ###\n')
|
|
93
|
+
|
|
94
|
+
# fn = input_files[0]
|
|
95
|
+
for fn in input_files:
|
|
96
|
+
|
|
97
|
+
input_file_path = fn
|
|
98
|
+
crop_cmd = ''
|
|
99
|
+
|
|
100
|
+
crop_comment = '\n# Cropping {}\n'.format(fn)
|
|
101
|
+
crop_cmd += crop_comment
|
|
102
|
+
|
|
103
|
+
crop_cmd += "python crop_detections.py \\\n" + \
|
|
104
|
+
input_file_path + ' \\\n' + \
|
|
105
|
+
crop_path + ' \\\n' + \
|
|
106
|
+
'--images-dir "' + image_base + '"' + ' \\\n' + \
|
|
107
|
+
'--threshold "' + threshold_str + '"' + ' \\\n' + \
|
|
108
|
+
'--square-crops ' + ' \\\n' + \
|
|
109
|
+
'--threads "' + n_threads_str + '"' + ' \\\n' + \
|
|
110
|
+
'--logdir "' + logdir + '"' + ' \\\n' + \
|
|
111
|
+
'\n'
|
|
112
|
+
crop_cmd = '{}'.format(crop_cmd)
|
|
113
|
+
commands.append(crop_cmd)
|
|
114
|
+
|
|
115
|
+
|
|
116
|
+
#%% Run classifier
|
|
117
|
+
|
|
118
|
+
commands.append('\n### Classifying ###\n')
|
|
88
119
|
|
|
89
|
-
if include_cropping:
|
|
90
|
-
|
|
91
|
-
commands.append('\n### Cropping ###\n')
|
|
92
|
-
|
|
93
120
|
# fn = input_files[0]
|
|
94
121
|
for fn in input_files:
|
|
95
|
-
|
|
122
|
+
|
|
96
123
|
input_file_path = fn
|
|
97
|
-
|
|
124
|
+
classifier_output_path = crop_path + classifier_output_suffix
|
|
98
125
|
|
|
99
|
-
|
|
100
|
-
crop_cmd += crop_comment
|
|
126
|
+
classify_cmd = ''
|
|
101
127
|
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
128
|
+
classify_comment = '\n# Classifying {}\n'.format(fn)
|
|
129
|
+
classify_cmd += classify_comment
|
|
130
|
+
|
|
131
|
+
classify_cmd += "python run_classifier.py \\\n" + \
|
|
132
|
+
checkpoint_path + ' \\\n' + \
|
|
133
|
+
crop_path + ' \\\n' + \
|
|
134
|
+
classifier_output_path + ' \\\n' + \
|
|
135
|
+
'--detections-json "' + input_file_path + '"' + ' \\\n' + \
|
|
136
|
+
'--classifier-categories "' + classifier_categories_path + '"' + ' \\\n' + \
|
|
137
|
+
'--image-size "' + image_size_str + '"' + ' \\\n' + \
|
|
138
|
+
'--batch-size "' + batch_size_str + '"' + ' \\\n' + \
|
|
139
|
+
'--num-workers "' + num_workers_str + '"' + ' \\\n'
|
|
140
|
+
|
|
141
|
+
if device_id is not None:
|
|
142
|
+
classify_cmd += '--device {}'.format(device_id)
|
|
143
|
+
|
|
144
|
+
classify_cmd += '\n\n'
|
|
145
|
+
classify_cmd = '{}'.format(classify_cmd)
|
|
146
|
+
commands.append(classify_cmd)
|
|
147
|
+
|
|
114
148
|
|
|
115
|
-
#%%
|
|
149
|
+
#%% Merge classification and detection outputs
|
|
116
150
|
|
|
117
|
-
commands.append('\n###
|
|
151
|
+
commands.append('\n### Merging ###\n')
|
|
118
152
|
|
|
119
|
-
# fn = input_files[0]
|
|
120
|
-
for fn in input_files:
|
|
153
|
+
# fn = input_files[0]
|
|
154
|
+
for fn in input_files:
|
|
121
155
|
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
classify_cmd += "python run_classifier.py \\\n" + \
|
|
131
|
-
checkpoint_path + ' \\\n' + \
|
|
132
|
-
crop_path + ' \\\n' + \
|
|
133
|
-
classifier_output_path + ' \\\n' + \
|
|
134
|
-
'--detections-json "' + input_file_path + '"' + ' \\\n' + \
|
|
135
|
-
'--classifier-categories "' + classifier_categories_path + '"' + ' \\\n' + \
|
|
136
|
-
'--image-size "' + image_size_str + '"' + ' \\\n' + \
|
|
137
|
-
'--batch-size "' + batch_size_str + '"' + ' \\\n' + \
|
|
138
|
-
'--num-workers "' + num_workers_str + '"' + ' \\\n'
|
|
139
|
-
|
|
140
|
-
if device_id is not None:
|
|
141
|
-
classify_cmd += '--device {}'.format(device_id)
|
|
156
|
+
input_file_path = fn
|
|
157
|
+
classifier_output_path = crop_path + classifier_output_suffix
|
|
158
|
+
final_output_path = os.path.join(output_base,
|
|
159
|
+
os.path.basename(classifier_output_path)).\
|
|
160
|
+
replace(classifier_output_suffix,
|
|
161
|
+
final_output_suffix)
|
|
162
|
+
final_output_path = final_output_path.replace('_detections','')
|
|
163
|
+
final_output_path = final_output_path.replace('_crops','')
|
|
142
164
|
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
#%% Merge classification and detection outputs
|
|
149
|
-
|
|
150
|
-
commands.append('\n### Merging ###\n')
|
|
151
|
-
|
|
152
|
-
# fn = input_files[0]
|
|
153
|
-
for fn in input_files:
|
|
154
|
-
|
|
155
|
-
input_file_path = fn
|
|
156
|
-
classifier_output_path = crop_path + classifier_output_suffix
|
|
157
|
-
final_output_path = os.path.join(output_base,
|
|
158
|
-
os.path.basename(classifier_output_path)).\
|
|
159
|
-
replace(classifier_output_suffix,
|
|
160
|
-
final_output_suffix)
|
|
161
|
-
final_output_path = final_output_path.replace('_detections','')
|
|
162
|
-
final_output_path = final_output_path.replace('_crops','')
|
|
163
|
-
|
|
164
|
-
merge_cmd = ''
|
|
165
|
-
|
|
166
|
-
merge_comment = '\n# Merging {}\n'.format(fn)
|
|
167
|
-
merge_cmd += merge_comment
|
|
168
|
-
|
|
169
|
-
merge_cmd += "python merge_classification_detection_output.py \\\n" + \
|
|
170
|
-
classifier_output_path + ' \\\n' + \
|
|
171
|
-
classifier_categories_path + ' \\\n' + \
|
|
172
|
-
'--output-json "' + final_output_path + '"' + ' \\\n' + \
|
|
173
|
-
'--detection-json "' + input_file_path + '"' + ' \\\n' + \
|
|
174
|
-
'--classifier-name "' + classifier_name + '"' + ' \\\n' + \
|
|
175
|
-
'--threshold "' + classification_threshold_str + '"' + ' \\\n' + \
|
|
176
|
-
'--typical-confidence-threshold "' + typical_classification_threshold_str + '"' + ' \\\n' + \
|
|
177
|
-
'\n'
|
|
178
|
-
merge_cmd = '{}'.format(merge_cmd)
|
|
179
|
-
commands.append(merge_cmd)
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
#%% Write everything out
|
|
183
|
-
|
|
184
|
-
with open(output_file,'w') as f:
|
|
185
|
-
for s in commands:
|
|
186
|
-
f.write('{}'.format(s))
|
|
187
|
-
|
|
188
|
-
import stat
|
|
189
|
-
st = os.stat(output_file)
|
|
190
|
-
os.chmod(output_file, st.st_mode | stat.S_IEXEC)
|
|
165
|
+
merge_cmd = ''
|
|
166
|
+
|
|
167
|
+
merge_comment = '\n# Merging {}\n'.format(fn)
|
|
168
|
+
merge_cmd += merge_comment
|
|
191
169
|
|
|
170
|
+
merge_cmd += "python merge_classification_detection_output.py \\\n" + \
|
|
171
|
+
classifier_output_path + ' \\\n' + \
|
|
172
|
+
classifier_categories_path + ' \\\n' + \
|
|
173
|
+
'--output-json "' + final_output_path + '"' + ' \\\n' + \
|
|
174
|
+
'--detection-json "' + input_file_path + '"' + ' \\\n' + \
|
|
175
|
+
'--classifier-name "' + classifier_name + '"' + ' \\\n' + \
|
|
176
|
+
'--threshold "' + classification_threshold_str + '"' + ' \\\n' + \
|
|
177
|
+
'--typical-confidence-threshold "' + typical_classification_threshold_str + '"' + ' \\\n' + \
|
|
178
|
+
'\n'
|
|
179
|
+
merge_cmd = '{}'.format(merge_cmd)
|
|
180
|
+
commands.append(merge_cmd)
|
|
181
|
+
|
|
182
|
+
|
|
183
|
+
#%% Write everything out
|
|
184
|
+
|
|
185
|
+
with open(output_file,'w') as f:
|
|
186
|
+
for s in commands:
|
|
187
|
+
f.write('{}'.format(s))
|
|
188
|
+
|
|
189
|
+
import stat
|
|
190
|
+
st = os.stat(output_file)
|
|
191
|
+
os.chmod(output_file, st.st_mode | stat.S_IEXEC)
|
|
192
|
+
|
|
193
|
+
if __name__ == '__main__':
|
|
194
|
+
main()
|