megadetector 5.0.28__py3-none-any.whl → 10.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of megadetector might be problematic. Click here for more details.
- megadetector/api/batch_processing/integration/digiKam/xmp_integration.py +2 -2
- megadetector/api/batch_processing/integration/eMammal/test_scripts/push_annotations_to_emammal.py +1 -1
- megadetector/api/batch_processing/integration/eMammal/test_scripts/select_images_for_testing.py +1 -1
- megadetector/classification/aggregate_classifier_probs.py +3 -3
- megadetector/classification/analyze_failed_images.py +5 -5
- megadetector/classification/cache_batchapi_outputs.py +5 -5
- megadetector/classification/create_classification_dataset.py +11 -12
- megadetector/classification/crop_detections.py +10 -10
- megadetector/classification/csv_to_json.py +8 -8
- megadetector/classification/detect_and_crop.py +13 -15
- megadetector/classification/efficientnet/model.py +8 -8
- megadetector/classification/efficientnet/utils.py +6 -5
- megadetector/classification/evaluate_model.py +7 -7
- megadetector/classification/identify_mislabeled_candidates.py +6 -6
- megadetector/classification/json_to_azcopy_list.py +1 -1
- megadetector/classification/json_validator.py +29 -32
- megadetector/classification/map_classification_categories.py +9 -9
- megadetector/classification/merge_classification_detection_output.py +12 -9
- megadetector/classification/prepare_classification_script.py +19 -19
- megadetector/classification/prepare_classification_script_mc.py +26 -26
- megadetector/classification/run_classifier.py +4 -4
- megadetector/classification/save_mislabeled.py +6 -6
- megadetector/classification/train_classifier.py +1 -1
- megadetector/classification/train_classifier_tf.py +9 -9
- megadetector/classification/train_utils.py +10 -10
- megadetector/data_management/annotations/annotation_constants.py +1 -2
- megadetector/data_management/camtrap_dp_to_coco.py +79 -46
- megadetector/data_management/cct_json_utils.py +103 -103
- megadetector/data_management/cct_to_md.py +49 -49
- megadetector/data_management/cct_to_wi.py +33 -33
- megadetector/data_management/coco_to_labelme.py +75 -75
- megadetector/data_management/coco_to_yolo.py +210 -193
- megadetector/data_management/databases/add_width_and_height_to_db.py +86 -12
- megadetector/data_management/databases/combine_coco_camera_traps_files.py +40 -40
- megadetector/data_management/databases/integrity_check_json_db.py +228 -200
- megadetector/data_management/databases/subset_json_db.py +33 -33
- megadetector/data_management/generate_crops_from_cct.py +88 -39
- megadetector/data_management/get_image_sizes.py +54 -49
- megadetector/data_management/labelme_to_coco.py +133 -125
- megadetector/data_management/labelme_to_yolo.py +159 -73
- megadetector/data_management/lila/create_lila_blank_set.py +81 -83
- megadetector/data_management/lila/create_lila_test_set.py +32 -31
- megadetector/data_management/lila/create_links_to_md_results_files.py +18 -18
- megadetector/data_management/lila/download_lila_subset.py +21 -24
- megadetector/data_management/lila/generate_lila_per_image_labels.py +365 -107
- megadetector/data_management/lila/get_lila_annotation_counts.py +35 -33
- megadetector/data_management/lila/get_lila_image_counts.py +22 -22
- megadetector/data_management/lila/lila_common.py +73 -70
- megadetector/data_management/lila/test_lila_metadata_urls.py +28 -19
- megadetector/data_management/mewc_to_md.py +344 -340
- megadetector/data_management/ocr_tools.py +262 -255
- megadetector/data_management/read_exif.py +249 -227
- megadetector/data_management/remap_coco_categories.py +90 -28
- megadetector/data_management/remove_exif.py +81 -21
- megadetector/data_management/rename_images.py +187 -187
- megadetector/data_management/resize_coco_dataset.py +588 -120
- megadetector/data_management/speciesnet_to_md.py +41 -41
- megadetector/data_management/wi_download_csv_to_coco.py +55 -55
- megadetector/data_management/yolo_output_to_md_output.py +248 -122
- megadetector/data_management/yolo_to_coco.py +333 -191
- megadetector/detection/change_detection.py +832 -0
- megadetector/detection/process_video.py +340 -337
- megadetector/detection/pytorch_detector.py +358 -278
- megadetector/detection/run_detector.py +399 -186
- megadetector/detection/run_detector_batch.py +404 -377
- megadetector/detection/run_inference_with_yolov5_val.py +340 -327
- megadetector/detection/run_tiled_inference.py +257 -249
- megadetector/detection/tf_detector.py +24 -24
- megadetector/detection/video_utils.py +332 -295
- megadetector/postprocessing/add_max_conf.py +19 -11
- megadetector/postprocessing/categorize_detections_by_size.py +45 -45
- megadetector/postprocessing/classification_postprocessing.py +468 -433
- megadetector/postprocessing/combine_batch_outputs.py +23 -23
- megadetector/postprocessing/compare_batch_results.py +590 -525
- megadetector/postprocessing/convert_output_format.py +106 -102
- megadetector/postprocessing/create_crop_folder.py +347 -147
- megadetector/postprocessing/detector_calibration.py +173 -168
- megadetector/postprocessing/generate_csv_report.py +508 -499
- megadetector/postprocessing/load_api_results.py +48 -27
- megadetector/postprocessing/md_to_coco.py +133 -102
- megadetector/postprocessing/md_to_labelme.py +107 -90
- megadetector/postprocessing/md_to_wi.py +40 -40
- megadetector/postprocessing/merge_detections.py +92 -114
- megadetector/postprocessing/postprocess_batch_results.py +319 -301
- megadetector/postprocessing/remap_detection_categories.py +91 -38
- megadetector/postprocessing/render_detection_confusion_matrix.py +214 -205
- megadetector/postprocessing/repeat_detection_elimination/find_repeat_detections.py +57 -57
- megadetector/postprocessing/repeat_detection_elimination/remove_repeat_detections.py +27 -28
- megadetector/postprocessing/repeat_detection_elimination/repeat_detections_core.py +704 -679
- megadetector/postprocessing/separate_detections_into_folders.py +226 -211
- megadetector/postprocessing/subset_json_detector_output.py +265 -262
- megadetector/postprocessing/top_folders_to_bottom.py +45 -45
- megadetector/postprocessing/validate_batch_results.py +70 -70
- megadetector/taxonomy_mapping/map_lila_taxonomy_to_wi_taxonomy.py +52 -52
- megadetector/taxonomy_mapping/map_new_lila_datasets.py +18 -19
- megadetector/taxonomy_mapping/prepare_lila_taxonomy_release.py +54 -33
- megadetector/taxonomy_mapping/preview_lila_taxonomy.py +67 -67
- megadetector/taxonomy_mapping/retrieve_sample_image.py +16 -16
- megadetector/taxonomy_mapping/simple_image_download.py +8 -8
- megadetector/taxonomy_mapping/species_lookup.py +156 -74
- megadetector/taxonomy_mapping/taxonomy_csv_checker.py +14 -14
- megadetector/taxonomy_mapping/taxonomy_graph.py +10 -10
- megadetector/taxonomy_mapping/validate_lila_category_mappings.py +13 -13
- megadetector/utils/ct_utils.py +1049 -211
- megadetector/utils/directory_listing.py +21 -77
- megadetector/utils/gpu_test.py +22 -22
- megadetector/utils/md_tests.py +632 -529
- megadetector/utils/path_utils.py +1520 -431
- megadetector/utils/process_utils.py +41 -41
- megadetector/utils/split_locations_into_train_val.py +62 -62
- megadetector/utils/string_utils.py +148 -27
- megadetector/utils/url_utils.py +489 -176
- megadetector/utils/wi_utils.py +2658 -2526
- megadetector/utils/write_html_image_list.py +137 -137
- megadetector/visualization/plot_utils.py +34 -30
- megadetector/visualization/render_images_with_thumbnails.py +39 -74
- megadetector/visualization/visualization_utils.py +487 -435
- megadetector/visualization/visualize_db.py +232 -198
- megadetector/visualization/visualize_detector_output.py +82 -76
- {megadetector-5.0.28.dist-info → megadetector-10.0.0.dist-info}/METADATA +5 -2
- megadetector-10.0.0.dist-info/RECORD +139 -0
- {megadetector-5.0.28.dist-info → megadetector-10.0.0.dist-info}/WHEEL +1 -1
- megadetector/api/batch_processing/api_core/__init__.py +0 -0
- megadetector/api/batch_processing/api_core/batch_service/__init__.py +0 -0
- megadetector/api/batch_processing/api_core/batch_service/score.py +0 -439
- megadetector/api/batch_processing/api_core/server.py +0 -294
- megadetector/api/batch_processing/api_core/server_api_config.py +0 -97
- megadetector/api/batch_processing/api_core/server_app_config.py +0 -55
- megadetector/api/batch_processing/api_core/server_batch_job_manager.py +0 -220
- megadetector/api/batch_processing/api_core/server_job_status_table.py +0 -149
- megadetector/api/batch_processing/api_core/server_orchestration.py +0 -360
- megadetector/api/batch_processing/api_core/server_utils.py +0 -88
- megadetector/api/batch_processing/api_core_support/__init__.py +0 -0
- megadetector/api/batch_processing/api_core_support/aggregate_results_manually.py +0 -46
- megadetector/api/batch_processing/api_support/__init__.py +0 -0
- megadetector/api/batch_processing/api_support/summarize_daily_activity.py +0 -152
- megadetector/api/batch_processing/data_preparation/__init__.py +0 -0
- megadetector/api/synchronous/__init__.py +0 -0
- megadetector/api/synchronous/api_core/animal_detection_api/__init__.py +0 -0
- megadetector/api/synchronous/api_core/animal_detection_api/api_backend.py +0 -151
- megadetector/api/synchronous/api_core/animal_detection_api/api_frontend.py +0 -263
- megadetector/api/synchronous/api_core/animal_detection_api/config.py +0 -35
- megadetector/api/synchronous/api_core/tests/__init__.py +0 -0
- megadetector/api/synchronous/api_core/tests/load_test.py +0 -110
- megadetector/data_management/importers/add_nacti_sizes.py +0 -52
- megadetector/data_management/importers/add_timestamps_to_icct.py +0 -79
- megadetector/data_management/importers/animl_results_to_md_results.py +0 -158
- megadetector/data_management/importers/auckland_doc_test_to_json.py +0 -373
- megadetector/data_management/importers/auckland_doc_to_json.py +0 -201
- megadetector/data_management/importers/awc_to_json.py +0 -191
- megadetector/data_management/importers/bellevue_to_json.py +0 -272
- megadetector/data_management/importers/cacophony-thermal-importer.py +0 -793
- megadetector/data_management/importers/carrizo_shrubfree_2018.py +0 -269
- megadetector/data_management/importers/carrizo_trail_cam_2017.py +0 -289
- megadetector/data_management/importers/cct_field_adjustments.py +0 -58
- megadetector/data_management/importers/channel_islands_to_cct.py +0 -913
- megadetector/data_management/importers/eMammal/copy_and_unzip_emammal.py +0 -180
- megadetector/data_management/importers/eMammal/eMammal_helpers.py +0 -249
- megadetector/data_management/importers/eMammal/make_eMammal_json.py +0 -223
- megadetector/data_management/importers/ena24_to_json.py +0 -276
- megadetector/data_management/importers/filenames_to_json.py +0 -386
- megadetector/data_management/importers/helena_to_cct.py +0 -283
- megadetector/data_management/importers/idaho-camera-traps.py +0 -1407
- megadetector/data_management/importers/idfg_iwildcam_lila_prep.py +0 -294
- megadetector/data_management/importers/import_desert_lion_conservation_camera_traps.py +0 -387
- megadetector/data_management/importers/jb_csv_to_json.py +0 -150
- megadetector/data_management/importers/mcgill_to_json.py +0 -250
- megadetector/data_management/importers/missouri_to_json.py +0 -490
- megadetector/data_management/importers/nacti_fieldname_adjustments.py +0 -79
- megadetector/data_management/importers/noaa_seals_2019.py +0 -181
- megadetector/data_management/importers/osu-small-animals-to-json.py +0 -364
- megadetector/data_management/importers/pc_to_json.py +0 -365
- megadetector/data_management/importers/plot_wni_giraffes.py +0 -123
- megadetector/data_management/importers/prepare_zsl_imerit.py +0 -131
- megadetector/data_management/importers/raic_csv_to_md_results.py +0 -416
- megadetector/data_management/importers/rspb_to_json.py +0 -356
- megadetector/data_management/importers/save_the_elephants_survey_A.py +0 -320
- megadetector/data_management/importers/save_the_elephants_survey_B.py +0 -329
- megadetector/data_management/importers/snapshot_safari_importer.py +0 -758
- megadetector/data_management/importers/snapshot_serengeti_lila.py +0 -1067
- megadetector/data_management/importers/snapshotserengeti/make_full_SS_json.py +0 -150
- megadetector/data_management/importers/snapshotserengeti/make_per_season_SS_json.py +0 -153
- megadetector/data_management/importers/sulross_get_exif.py +0 -65
- megadetector/data_management/importers/timelapse_csv_set_to_json.py +0 -490
- megadetector/data_management/importers/ubc_to_json.py +0 -399
- megadetector/data_management/importers/umn_to_json.py +0 -507
- megadetector/data_management/importers/wellington_to_json.py +0 -263
- megadetector/data_management/importers/wi_to_json.py +0 -442
- megadetector/data_management/importers/zamba_results_to_md_results.py +0 -180
- megadetector/data_management/lila/add_locations_to_island_camera_traps.py +0 -101
- megadetector/data_management/lila/add_locations_to_nacti.py +0 -151
- megadetector/utils/azure_utils.py +0 -178
- megadetector/utils/sas_blob_utils.py +0 -509
- megadetector-5.0.28.dist-info/RECORD +0 -209
- /megadetector/{api/batch_processing/__init__.py → __init__.py} +0 -0
- {megadetector-5.0.28.dist-info → megadetector-10.0.0.dist-info}/licenses/LICENSE +0 -0
- {megadetector-5.0.28.dist-info → megadetector-10.0.0.dist-info}/top_level.txt +0 -0
|
@@ -15,43 +15,51 @@ to add it back to an existing .json file.
|
|
|
15
15
|
|
|
16
16
|
import os
|
|
17
17
|
import json
|
|
18
|
+
import sys
|
|
19
|
+
import argparse
|
|
20
|
+
|
|
18
21
|
from megadetector.utils import ct_utils
|
|
19
22
|
|
|
20
23
|
|
|
21
24
|
#%% Main function
|
|
22
25
|
|
|
23
26
|
def add_max_conf(input_file,output_file):
|
|
24
|
-
|
|
27
|
+
"""
|
|
28
|
+
Add maximum confidence values to [input_file] and write the results to [output_file].
|
|
29
|
+
|
|
30
|
+
Args:
|
|
31
|
+
input_file (str): MD-formatted .json file to which we should add maxconf values
|
|
32
|
+
output_file (str): output .json file
|
|
33
|
+
"""
|
|
34
|
+
|
|
25
35
|
assert os.path.isfile(input_file), "Can't find input file {}".format(input_file)
|
|
26
|
-
|
|
36
|
+
|
|
27
37
|
with open(input_file,'r') as f:
|
|
28
38
|
d = json.load(f)
|
|
29
|
-
|
|
39
|
+
|
|
30
40
|
for im in d['images']:
|
|
31
|
-
|
|
41
|
+
|
|
32
42
|
max_conf = ct_utils.get_max_conf(im)
|
|
33
|
-
|
|
43
|
+
|
|
34
44
|
if 'max_detection_conf' in im:
|
|
35
45
|
assert abs(max_conf - im['max_detection_conf']) < 0.00001
|
|
36
46
|
else:
|
|
37
47
|
im['max_detection_conf'] = max_conf
|
|
38
|
-
|
|
48
|
+
|
|
39
49
|
with open(output_file,'w') as f:
|
|
40
50
|
json.dump(d,f,indent=1)
|
|
41
|
-
|
|
51
|
+
|
|
42
52
|
|
|
43
53
|
#%% Driver
|
|
44
54
|
|
|
45
|
-
|
|
55
|
+
def main(): # noqa
|
|
46
56
|
|
|
47
|
-
def main():
|
|
48
|
-
|
|
49
57
|
parser = argparse.ArgumentParser()
|
|
50
58
|
parser.add_argument('input_file',type=str,
|
|
51
59
|
help='Input .json file')
|
|
52
60
|
parser.add_argument('output_file',type=str,
|
|
53
61
|
help='Output .json file')
|
|
54
|
-
|
|
62
|
+
|
|
55
63
|
if len(sys.argv[1:]) == 0:
|
|
56
64
|
parser.print_help()
|
|
57
65
|
parser.exit()
|
|
@@ -21,145 +21,145 @@ class SizeCategorizationOptions:
|
|
|
21
21
|
"""
|
|
22
22
|
Options used to parameterize categorize_detections_by_size().
|
|
23
23
|
"""
|
|
24
|
-
|
|
24
|
+
|
|
25
25
|
def __init__(self):
|
|
26
|
-
|
|
26
|
+
|
|
27
27
|
#: Thresholds to use for separation, as a fraction of the image size.
|
|
28
28
|
#:
|
|
29
29
|
#: Should be sorted from smallest to largest.
|
|
30
30
|
self.size_thresholds = [0.95]
|
|
31
|
-
|
|
31
|
+
|
|
32
32
|
#: List of category numbers to use in separation; uses all categories if None
|
|
33
33
|
self.categories_to_separate = None
|
|
34
|
-
|
|
34
|
+
|
|
35
35
|
#: Dimension to use for thresholding; can be "size", "width", or "height"
|
|
36
36
|
self.measurement = 'size'
|
|
37
|
-
|
|
37
|
+
|
|
38
38
|
#: Categories to assign to thresholded ranges; should have the same length as
|
|
39
39
|
#: "size_thresholds".
|
|
40
40
|
self.size_category_names = ['large_detection']
|
|
41
|
-
|
|
42
|
-
|
|
41
|
+
|
|
42
|
+
|
|
43
43
|
#%% Main functions
|
|
44
|
-
|
|
44
|
+
|
|
45
45
|
def categorize_detections_by_size(input_file,output_file=None,options=None):
|
|
46
46
|
"""
|
|
47
47
|
Given a MegaDetector .json file, creates a separate category for bounding boxes
|
|
48
48
|
above one or more size thresholds, optionally writing results to [output_file].
|
|
49
|
-
|
|
49
|
+
|
|
50
50
|
Args:
|
|
51
51
|
input_file (str): file to process
|
|
52
52
|
output_file (str, optional): optional output file
|
|
53
|
-
options (SizeCategorizationOptions): categorization parameters
|
|
54
|
-
|
|
53
|
+
options (SizeCategorizationOptions, optional): categorization parameters
|
|
54
|
+
|
|
55
55
|
Returns:
|
|
56
|
-
dict: data loaded from [input_file], with the new size-based categories.
|
|
56
|
+
dict: data loaded from [input_file], with the new size-based categories.
|
|
57
57
|
Identical to what's written to [output_file], if [output_file] is not None.
|
|
58
58
|
"""
|
|
59
59
|
if options is None:
|
|
60
60
|
options = SizeCategorizationOptions()
|
|
61
|
-
|
|
61
|
+
|
|
62
62
|
if options.categories_to_separate is not None:
|
|
63
63
|
options.categories_to_separate = \
|
|
64
64
|
[str(c) for c in options.categories_to_separate]
|
|
65
|
-
|
|
65
|
+
|
|
66
66
|
assert len(options.size_thresholds) == len(options.size_category_names), \
|
|
67
67
|
'Options struct should have the same number of category names and size thresholds'
|
|
68
68
|
|
|
69
69
|
# Sort size thresholds and names from largest to smallest
|
|
70
|
-
options.size_category_names = [x for _,x in sorted(zip(options.size_thresholds,
|
|
70
|
+
options.size_category_names = [x for _,x in sorted(zip(options.size_thresholds, # noqa
|
|
71
71
|
options.size_category_names),reverse=True)]
|
|
72
72
|
options.size_thresholds = sorted(options.size_thresholds,reverse=True)
|
|
73
|
-
|
|
73
|
+
|
|
74
74
|
with open(input_file) as f:
|
|
75
75
|
data = json.load(f)
|
|
76
|
-
|
|
76
|
+
|
|
77
77
|
detection_categories = data['detection_categories']
|
|
78
78
|
category_keys = list(detection_categories.keys())
|
|
79
79
|
category_keys = [int(k) for k in category_keys]
|
|
80
80
|
max_key = max(category_keys)
|
|
81
|
-
|
|
81
|
+
|
|
82
82
|
threshold_to_category_id = {}
|
|
83
83
|
for i_threshold,threshold in enumerate(options.size_thresholds):
|
|
84
|
-
|
|
84
|
+
|
|
85
85
|
category_id = str(max_key+1)
|
|
86
86
|
max_key += 1
|
|
87
87
|
detection_categories[category_id] = options.size_category_names[i_threshold]
|
|
88
88
|
threshold_to_category_id[i_threshold] = category_id
|
|
89
|
-
|
|
89
|
+
|
|
90
90
|
print('Creating category for {} with ID {}'.format(
|
|
91
91
|
options.size_category_names[i_threshold],category_id))
|
|
92
|
-
|
|
92
|
+
|
|
93
93
|
images = data['images']
|
|
94
|
-
|
|
94
|
+
|
|
95
95
|
print('Loaded {} images'.format(len(images)))
|
|
96
|
-
|
|
96
|
+
|
|
97
97
|
# For each image...
|
|
98
98
|
#
|
|
99
99
|
# im = images[0]
|
|
100
|
-
|
|
100
|
+
|
|
101
101
|
category_id_to_count = defaultdict(int)
|
|
102
|
-
|
|
102
|
+
|
|
103
103
|
for im in tqdm(images):
|
|
104
|
-
|
|
104
|
+
|
|
105
105
|
if im['detections'] is None:
|
|
106
106
|
assert im['failure'] is not None and len(im['failure']) > 0
|
|
107
107
|
continue
|
|
108
|
-
|
|
108
|
+
|
|
109
109
|
# d = im['detections'][0]
|
|
110
110
|
for d in im['detections']:
|
|
111
|
-
|
|
111
|
+
|
|
112
112
|
# Are there really any detections here?
|
|
113
113
|
if (d is None) or ('bbox' not in d) or (d['bbox'] is None):
|
|
114
114
|
continue
|
|
115
|
-
|
|
115
|
+
|
|
116
116
|
# Is this a category we're supposed to process?
|
|
117
117
|
if (options.categories_to_separate is not None) and \
|
|
118
118
|
(d['category'] not in options.categories_to_separate):
|
|
119
119
|
continue
|
|
120
|
-
|
|
120
|
+
|
|
121
121
|
# https://github.com/agentmorris/MegaDetector/tree/main/megadetector/api/batch_processing#detector-outputs
|
|
122
122
|
w = d['bbox'][2]
|
|
123
123
|
h = d['bbox'][3]
|
|
124
124
|
detection_size = w*h
|
|
125
|
-
|
|
125
|
+
|
|
126
126
|
metric = None
|
|
127
|
-
|
|
127
|
+
|
|
128
128
|
if options.measurement == 'size':
|
|
129
129
|
metric = detection_size
|
|
130
130
|
elif options.measurement == 'width':
|
|
131
131
|
metric = w
|
|
132
132
|
else:
|
|
133
133
|
assert options.measurement == 'height', 'Unrecognized measurement metric'
|
|
134
|
-
metric = h
|
|
134
|
+
metric = h
|
|
135
135
|
assert metric is not None
|
|
136
|
-
|
|
136
|
+
|
|
137
137
|
for i_threshold,threshold in enumerate(options.size_thresholds):
|
|
138
|
-
|
|
138
|
+
|
|
139
139
|
if metric >= threshold:
|
|
140
|
-
|
|
140
|
+
|
|
141
141
|
category_id = threshold_to_category_id[i_threshold]
|
|
142
|
-
|
|
142
|
+
|
|
143
143
|
category_id_to_count[category_id] += 1
|
|
144
|
-
d['category'] = category_id
|
|
145
|
-
|
|
144
|
+
d['category'] = category_id
|
|
145
|
+
|
|
146
146
|
break
|
|
147
|
-
|
|
147
|
+
|
|
148
148
|
# ...for each threshold
|
|
149
149
|
# ...for each detection
|
|
150
|
-
|
|
150
|
+
|
|
151
151
|
# ...for each image
|
|
152
|
-
|
|
152
|
+
|
|
153
153
|
for i_threshold in range(0,len(options.size_thresholds)):
|
|
154
154
|
category_name = options.size_category_names[i_threshold]
|
|
155
155
|
category_id = threshold_to_category_id[i_threshold]
|
|
156
156
|
category_count = category_id_to_count[category_id]
|
|
157
157
|
print('Found {} detections in category {}'.format(category_count,category_name))
|
|
158
|
-
|
|
158
|
+
|
|
159
159
|
if output_file is not None:
|
|
160
160
|
with open(output_file,'w') as f:
|
|
161
161
|
json.dump(data,f,indent=1)
|
|
162
|
-
|
|
162
|
+
|
|
163
163
|
return data
|
|
164
|
-
|
|
164
|
+
|
|
165
165
|
# ...def categorize_detections_by_size()
|