megadetector 5.0.9__py3-none-any.whl → 5.0.11__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of megadetector might be problematic. Click here for more details.
- {megadetector-5.0.9.dist-info → megadetector-5.0.11.dist-info}/LICENSE +0 -0
- {megadetector-5.0.9.dist-info → megadetector-5.0.11.dist-info}/METADATA +12 -11
- megadetector-5.0.11.dist-info/RECORD +5 -0
- megadetector-5.0.11.dist-info/top_level.txt +1 -0
- api/__init__.py +0 -0
- api/batch_processing/__init__.py +0 -0
- api/batch_processing/api_core/__init__.py +0 -0
- api/batch_processing/api_core/batch_service/__init__.py +0 -0
- api/batch_processing/api_core/batch_service/score.py +0 -439
- api/batch_processing/api_core/server.py +0 -294
- api/batch_processing/api_core/server_api_config.py +0 -98
- api/batch_processing/api_core/server_app_config.py +0 -55
- api/batch_processing/api_core/server_batch_job_manager.py +0 -220
- api/batch_processing/api_core/server_job_status_table.py +0 -152
- api/batch_processing/api_core/server_orchestration.py +0 -360
- api/batch_processing/api_core/server_utils.py +0 -92
- api/batch_processing/api_core_support/__init__.py +0 -0
- api/batch_processing/api_core_support/aggregate_results_manually.py +0 -46
- api/batch_processing/api_support/__init__.py +0 -0
- api/batch_processing/api_support/summarize_daily_activity.py +0 -152
- api/batch_processing/data_preparation/__init__.py +0 -0
- api/batch_processing/data_preparation/manage_local_batch.py +0 -2391
- api/batch_processing/data_preparation/manage_video_batch.py +0 -327
- api/batch_processing/integration/digiKam/setup.py +0 -6
- api/batch_processing/integration/digiKam/xmp_integration.py +0 -465
- api/batch_processing/integration/eMammal/test_scripts/config_template.py +0 -5
- api/batch_processing/integration/eMammal/test_scripts/push_annotations_to_emammal.py +0 -126
- api/batch_processing/integration/eMammal/test_scripts/select_images_for_testing.py +0 -55
- api/batch_processing/postprocessing/__init__.py +0 -0
- api/batch_processing/postprocessing/add_max_conf.py +0 -64
- api/batch_processing/postprocessing/categorize_detections_by_size.py +0 -163
- api/batch_processing/postprocessing/combine_api_outputs.py +0 -249
- api/batch_processing/postprocessing/compare_batch_results.py +0 -958
- api/batch_processing/postprocessing/convert_output_format.py +0 -397
- api/batch_processing/postprocessing/load_api_results.py +0 -195
- api/batch_processing/postprocessing/md_to_coco.py +0 -310
- api/batch_processing/postprocessing/md_to_labelme.py +0 -330
- api/batch_processing/postprocessing/merge_detections.py +0 -401
- api/batch_processing/postprocessing/postprocess_batch_results.py +0 -1904
- api/batch_processing/postprocessing/remap_detection_categories.py +0 -170
- api/batch_processing/postprocessing/render_detection_confusion_matrix.py +0 -661
- api/batch_processing/postprocessing/repeat_detection_elimination/find_repeat_detections.py +0 -211
- api/batch_processing/postprocessing/repeat_detection_elimination/remove_repeat_detections.py +0 -82
- api/batch_processing/postprocessing/repeat_detection_elimination/repeat_detections_core.py +0 -1631
- api/batch_processing/postprocessing/separate_detections_into_folders.py +0 -731
- api/batch_processing/postprocessing/subset_json_detector_output.py +0 -696
- api/batch_processing/postprocessing/top_folders_to_bottom.py +0 -223
- api/synchronous/__init__.py +0 -0
- api/synchronous/api_core/animal_detection_api/__init__.py +0 -0
- api/synchronous/api_core/animal_detection_api/api_backend.py +0 -152
- api/synchronous/api_core/animal_detection_api/api_frontend.py +0 -266
- api/synchronous/api_core/animal_detection_api/config.py +0 -35
- api/synchronous/api_core/animal_detection_api/data_management/annotations/annotation_constants.py +0 -47
- api/synchronous/api_core/animal_detection_api/detection/detector_training/copy_checkpoints.py +0 -43
- api/synchronous/api_core/animal_detection_api/detection/detector_training/model_main_tf2.py +0 -114
- api/synchronous/api_core/animal_detection_api/detection/process_video.py +0 -543
- api/synchronous/api_core/animal_detection_api/detection/pytorch_detector.py +0 -304
- api/synchronous/api_core/animal_detection_api/detection/run_detector.py +0 -627
- api/synchronous/api_core/animal_detection_api/detection/run_detector_batch.py +0 -1029
- api/synchronous/api_core/animal_detection_api/detection/run_inference_with_yolov5_val.py +0 -581
- api/synchronous/api_core/animal_detection_api/detection/run_tiled_inference.py +0 -754
- api/synchronous/api_core/animal_detection_api/detection/tf_detector.py +0 -165
- api/synchronous/api_core/animal_detection_api/detection/video_utils.py +0 -495
- api/synchronous/api_core/animal_detection_api/md_utils/azure_utils.py +0 -174
- api/synchronous/api_core/animal_detection_api/md_utils/ct_utils.py +0 -262
- api/synchronous/api_core/animal_detection_api/md_utils/directory_listing.py +0 -251
- api/synchronous/api_core/animal_detection_api/md_utils/matlab_porting_tools.py +0 -97
- api/synchronous/api_core/animal_detection_api/md_utils/path_utils.py +0 -416
- api/synchronous/api_core/animal_detection_api/md_utils/process_utils.py +0 -110
- api/synchronous/api_core/animal_detection_api/md_utils/sas_blob_utils.py +0 -509
- api/synchronous/api_core/animal_detection_api/md_utils/string_utils.py +0 -59
- api/synchronous/api_core/animal_detection_api/md_utils/url_utils.py +0 -144
- api/synchronous/api_core/animal_detection_api/md_utils/write_html_image_list.py +0 -226
- api/synchronous/api_core/animal_detection_api/md_visualization/visualization_utils.py +0 -841
- api/synchronous/api_core/tests/__init__.py +0 -0
- api/synchronous/api_core/tests/load_test.py +0 -110
- classification/__init__.py +0 -0
- classification/aggregate_classifier_probs.py +0 -108
- classification/analyze_failed_images.py +0 -227
- classification/cache_batchapi_outputs.py +0 -198
- classification/create_classification_dataset.py +0 -627
- classification/crop_detections.py +0 -516
- classification/csv_to_json.py +0 -226
- classification/detect_and_crop.py +0 -855
- classification/efficientnet/__init__.py +0 -9
- classification/efficientnet/model.py +0 -415
- classification/efficientnet/utils.py +0 -610
- classification/evaluate_model.py +0 -520
- classification/identify_mislabeled_candidates.py +0 -152
- classification/json_to_azcopy_list.py +0 -63
- classification/json_validator.py +0 -695
- classification/map_classification_categories.py +0 -276
- classification/merge_classification_detection_output.py +0 -506
- classification/prepare_classification_script.py +0 -194
- classification/prepare_classification_script_mc.py +0 -228
- classification/run_classifier.py +0 -286
- classification/save_mislabeled.py +0 -110
- classification/train_classifier.py +0 -825
- classification/train_classifier_tf.py +0 -724
- classification/train_utils.py +0 -322
- data_management/__init__.py +0 -0
- data_management/annotations/__init__.py +0 -0
- data_management/annotations/annotation_constants.py +0 -34
- data_management/camtrap_dp_to_coco.py +0 -238
- data_management/cct_json_utils.py +0 -395
- data_management/cct_to_md.py +0 -176
- data_management/cct_to_wi.py +0 -289
- data_management/coco_to_labelme.py +0 -272
- data_management/coco_to_yolo.py +0 -662
- data_management/databases/__init__.py +0 -0
- data_management/databases/add_width_and_height_to_db.py +0 -33
- data_management/databases/combine_coco_camera_traps_files.py +0 -206
- data_management/databases/integrity_check_json_db.py +0 -477
- data_management/databases/subset_json_db.py +0 -115
- data_management/generate_crops_from_cct.py +0 -149
- data_management/get_image_sizes.py +0 -188
- data_management/importers/add_nacti_sizes.py +0 -52
- data_management/importers/add_timestamps_to_icct.py +0 -79
- data_management/importers/animl_results_to_md_results.py +0 -158
- data_management/importers/auckland_doc_test_to_json.py +0 -372
- data_management/importers/auckland_doc_to_json.py +0 -200
- data_management/importers/awc_to_json.py +0 -189
- data_management/importers/bellevue_to_json.py +0 -273
- data_management/importers/cacophony-thermal-importer.py +0 -796
- data_management/importers/carrizo_shrubfree_2018.py +0 -268
- data_management/importers/carrizo_trail_cam_2017.py +0 -287
- data_management/importers/cct_field_adjustments.py +0 -57
- data_management/importers/channel_islands_to_cct.py +0 -913
- data_management/importers/eMammal/copy_and_unzip_emammal.py +0 -180
- data_management/importers/eMammal/eMammal_helpers.py +0 -249
- data_management/importers/eMammal/make_eMammal_json.py +0 -223
- data_management/importers/ena24_to_json.py +0 -275
- data_management/importers/filenames_to_json.py +0 -385
- data_management/importers/helena_to_cct.py +0 -282
- data_management/importers/idaho-camera-traps.py +0 -1407
- data_management/importers/idfg_iwildcam_lila_prep.py +0 -294
- data_management/importers/jb_csv_to_json.py +0 -150
- data_management/importers/mcgill_to_json.py +0 -250
- data_management/importers/missouri_to_json.py +0 -489
- data_management/importers/nacti_fieldname_adjustments.py +0 -79
- data_management/importers/noaa_seals_2019.py +0 -181
- data_management/importers/pc_to_json.py +0 -365
- data_management/importers/plot_wni_giraffes.py +0 -123
- data_management/importers/prepare-noaa-fish-data-for-lila.py +0 -359
- data_management/importers/prepare_zsl_imerit.py +0 -131
- data_management/importers/rspb_to_json.py +0 -356
- data_management/importers/save_the_elephants_survey_A.py +0 -320
- data_management/importers/save_the_elephants_survey_B.py +0 -332
- data_management/importers/snapshot_safari_importer.py +0 -758
- data_management/importers/snapshot_safari_importer_reprise.py +0 -665
- data_management/importers/snapshot_serengeti_lila.py +0 -1067
- data_management/importers/snapshotserengeti/make_full_SS_json.py +0 -150
- data_management/importers/snapshotserengeti/make_per_season_SS_json.py +0 -153
- data_management/importers/sulross_get_exif.py +0 -65
- data_management/importers/timelapse_csv_set_to_json.py +0 -490
- data_management/importers/ubc_to_json.py +0 -399
- data_management/importers/umn_to_json.py +0 -507
- data_management/importers/wellington_to_json.py +0 -263
- data_management/importers/wi_to_json.py +0 -441
- data_management/importers/zamba_results_to_md_results.py +0 -181
- data_management/labelme_to_coco.py +0 -548
- data_management/labelme_to_yolo.py +0 -272
- data_management/lila/__init__.py +0 -0
- data_management/lila/add_locations_to_island_camera_traps.py +0 -97
- data_management/lila/add_locations_to_nacti.py +0 -147
- data_management/lila/create_lila_blank_set.py +0 -557
- data_management/lila/create_lila_test_set.py +0 -151
- data_management/lila/create_links_to_md_results_files.py +0 -106
- data_management/lila/download_lila_subset.py +0 -177
- data_management/lila/generate_lila_per_image_labels.py +0 -515
- data_management/lila/get_lila_annotation_counts.py +0 -170
- data_management/lila/get_lila_image_counts.py +0 -111
- data_management/lila/lila_common.py +0 -300
- data_management/lila/test_lila_metadata_urls.py +0 -132
- data_management/ocr_tools.py +0 -874
- data_management/read_exif.py +0 -681
- data_management/remap_coco_categories.py +0 -84
- data_management/remove_exif.py +0 -66
- data_management/resize_coco_dataset.py +0 -189
- data_management/wi_download_csv_to_coco.py +0 -246
- data_management/yolo_output_to_md_output.py +0 -441
- data_management/yolo_to_coco.py +0 -676
- detection/__init__.py +0 -0
- detection/detector_training/__init__.py +0 -0
- detection/detector_training/model_main_tf2.py +0 -114
- detection/process_video.py +0 -703
- detection/pytorch_detector.py +0 -337
- detection/run_detector.py +0 -779
- detection/run_detector_batch.py +0 -1219
- detection/run_inference_with_yolov5_val.py +0 -917
- detection/run_tiled_inference.py +0 -935
- detection/tf_detector.py +0 -188
- detection/video_utils.py +0 -606
- docs/source/conf.py +0 -43
- md_utils/__init__.py +0 -0
- md_utils/azure_utils.py +0 -174
- md_utils/ct_utils.py +0 -612
- md_utils/directory_listing.py +0 -246
- md_utils/md_tests.py +0 -968
- md_utils/path_utils.py +0 -1044
- md_utils/process_utils.py +0 -157
- md_utils/sas_blob_utils.py +0 -509
- md_utils/split_locations_into_train_val.py +0 -228
- md_utils/string_utils.py +0 -92
- md_utils/url_utils.py +0 -323
- md_utils/write_html_image_list.py +0 -225
- md_visualization/__init__.py +0 -0
- md_visualization/plot_utils.py +0 -293
- md_visualization/render_images_with_thumbnails.py +0 -275
- md_visualization/visualization_utils.py +0 -1537
- md_visualization/visualize_db.py +0 -551
- md_visualization/visualize_detector_output.py +0 -406
- megadetector-5.0.9.dist-info/RECORD +0 -224
- megadetector-5.0.9.dist-info/top_level.txt +0 -8
- taxonomy_mapping/__init__.py +0 -0
- taxonomy_mapping/map_lila_taxonomy_to_wi_taxonomy.py +0 -491
- taxonomy_mapping/map_new_lila_datasets.py +0 -154
- taxonomy_mapping/prepare_lila_taxonomy_release.py +0 -142
- taxonomy_mapping/preview_lila_taxonomy.py +0 -591
- taxonomy_mapping/retrieve_sample_image.py +0 -71
- taxonomy_mapping/simple_image_download.py +0 -218
- taxonomy_mapping/species_lookup.py +0 -834
- taxonomy_mapping/taxonomy_csv_checker.py +0 -159
- taxonomy_mapping/taxonomy_graph.py +0 -346
- taxonomy_mapping/validate_lila_category_mappings.py +0 -83
- {megadetector-5.0.9.dist-info → megadetector-5.0.11.dist-info}/WHEEL +0 -0
|
@@ -1,754 +0,0 @@
|
|
|
1
|
-
########
|
|
2
|
-
#
|
|
3
|
-
# run_tiled_inference.py
|
|
4
|
-
#
|
|
5
|
-
# Run inference on a folder, fist splitting each image up into tiles of size
|
|
6
|
-
# MxN (typically the native inference size of your detector), writing those
|
|
7
|
-
# tiles out to a temporary folder, then de-duplicating the results before merging
|
|
8
|
-
# them back into a set of detections that make sense on the original images.
|
|
9
|
-
#
|
|
10
|
-
# This approach will likely fail to detect very large animals, so if you expect both large
|
|
11
|
-
# and small animals (in terms of pixel size), this script is best used in
|
|
12
|
-
# conjunction with a traditional inference pass that looks at whole images.
|
|
13
|
-
#
|
|
14
|
-
# Currently requires temporary storage at least as large as the input data, generally
|
|
15
|
-
# a lot more than that (depending on the overlap between adjacent tiles). This is
|
|
16
|
-
# inefficient, but easy to debug.
|
|
17
|
-
#
|
|
18
|
-
# Programmatic invocation supports using YOLOv5's inference scripts (and test-time
|
|
19
|
-
# augmentation); the command-line interface only supports standard inference right now.
|
|
20
|
-
#
|
|
21
|
-
########
|
|
22
|
-
|
|
23
|
-
#%% Imports and constants
|
|
24
|
-
|
|
25
|
-
import os
|
|
26
|
-
import json
|
|
27
|
-
|
|
28
|
-
from tqdm import tqdm
|
|
29
|
-
|
|
30
|
-
from detection.run_inference_with_yolov5_val import YoloInferenceOptions,run_inference_with_yolo_val
|
|
31
|
-
from detection.run_detector_batch import load_and_run_detector_batch,write_results_to_file
|
|
32
|
-
|
|
33
|
-
import torch
|
|
34
|
-
from torchvision import ops
|
|
35
|
-
|
|
36
|
-
from md_utils import path_utils
|
|
37
|
-
from md_visualization import visualization_utils as vis_utils
|
|
38
|
-
|
|
39
|
-
default_patch_overlap = 0.5
|
|
40
|
-
patch_jpeg_quality = 95
|
|
41
|
-
|
|
42
|
-
# This isn't NMS in the usual sense of redundant model predictions; this is being
|
|
43
|
-
# used to de-duplicate predictions from overlapping patches.
|
|
44
|
-
nms_iou_threshold = 0.45
|
|
45
|
-
|
|
46
|
-
default_tile_size = [1280,1280]
|
|
47
|
-
|
|
48
|
-
default_n_patch_extraction_workers = 1
|
|
49
|
-
parallelization_uses_threads = False
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
#%% Support functions
|
|
53
|
-
|
|
54
|
-
def get_patch_boundaries(image_size,patch_size,patch_stride=None):
|
|
55
|
-
"""
|
|
56
|
-
Get a list of patch starting coordinates (x,y) given an image size (w,h)
|
|
57
|
-
and a stride (x,y). Stride defaults to half the patch size.
|
|
58
|
-
|
|
59
|
-
patch_stride can also be a single float, in which case that is interpreted
|
|
60
|
-
as the stride relative to the patch size (0.1 == 10% stride).
|
|
61
|
-
|
|
62
|
-
Patch size is guaranteed, stride may deviate to make sure all pixels are covered.
|
|
63
|
-
I.e., we move by regular strides until the current patch walks off the right/bottom,
|
|
64
|
-
at which point it backs up to one patch from the end. So if your image is 15
|
|
65
|
-
pixels wide and you have a stride of 10 pixels, you will get starting positions
|
|
66
|
-
of 0 (from 0 to 9) and 5 (from 5 to 14).
|
|
67
|
-
"""
|
|
68
|
-
|
|
69
|
-
if patch_stride is None:
|
|
70
|
-
patch_stride = (round(patch_size[0]*(1.0-default_patch_overlap)),
|
|
71
|
-
round(patch_size[1]*(1.0-default_patch_overlap)))
|
|
72
|
-
elif isinstance(patch_stride,float):
|
|
73
|
-
patch_stride = (round(patch_size[0]*(patch_stride)),
|
|
74
|
-
round(patch_size[1]*(patch_stride)))
|
|
75
|
-
|
|
76
|
-
image_width = image_size[0]
|
|
77
|
-
image_height = image_size[1]
|
|
78
|
-
|
|
79
|
-
assert patch_size[0] <= image_size[0], 'Patch width {} is larger than image width {}'.format(
|
|
80
|
-
patch_size[0],image_size[0])
|
|
81
|
-
assert patch_size[1] <= image_size[1], 'Patch height {} is larger than image height {}'.format(
|
|
82
|
-
patch_size[1],image_size[1])
|
|
83
|
-
|
|
84
|
-
def add_patch_row(patch_start_positions,y_start):
|
|
85
|
-
"""
|
|
86
|
-
Add one row to our list of patch start positions, i.e.
|
|
87
|
-
loop over all columns.
|
|
88
|
-
"""
|
|
89
|
-
|
|
90
|
-
x_start = 0; x_end = x_start + patch_size[0] - 1
|
|
91
|
-
|
|
92
|
-
while(True):
|
|
93
|
-
|
|
94
|
-
patch_start_positions.append([x_start,y_start])
|
|
95
|
-
|
|
96
|
-
# If this patch put us right at the end of the last column, we're done
|
|
97
|
-
if x_end == image_width - 1:
|
|
98
|
-
break
|
|
99
|
-
|
|
100
|
-
# Move one patch to the right
|
|
101
|
-
x_start += patch_stride[0]
|
|
102
|
-
x_end = x_start + patch_size[0] - 1
|
|
103
|
-
|
|
104
|
-
# If this patch flows over the edge, add one more patch to cover
|
|
105
|
-
# the pixels on the end, then we're done.
|
|
106
|
-
if x_end > (image_width - 1):
|
|
107
|
-
overshoot = (x_end - image_width) + 1
|
|
108
|
-
x_start -= overshoot
|
|
109
|
-
x_end = x_start + patch_size[0] - 1
|
|
110
|
-
patch_start_positions.append([x_start,y_start])
|
|
111
|
-
break
|
|
112
|
-
|
|
113
|
-
# ...for each column
|
|
114
|
-
|
|
115
|
-
return patch_start_positions
|
|
116
|
-
|
|
117
|
-
patch_start_positions = []
|
|
118
|
-
|
|
119
|
-
y_start = 0; y_end = y_start + patch_size[1] - 1
|
|
120
|
-
|
|
121
|
-
while(True):
|
|
122
|
-
|
|
123
|
-
patch_start_positions = add_patch_row(patch_start_positions,y_start)
|
|
124
|
-
|
|
125
|
-
# If this patch put us right at the bottom of the lats row, we're done
|
|
126
|
-
if y_end == image_height - 1:
|
|
127
|
-
break
|
|
128
|
-
|
|
129
|
-
# Move one patch down
|
|
130
|
-
y_start += patch_stride[1]
|
|
131
|
-
y_end = y_start + patch_size[1] - 1
|
|
132
|
-
|
|
133
|
-
# If this patch flows over the bottom, add one more patch to cover
|
|
134
|
-
# the pixels at the bottom, then we're done
|
|
135
|
-
if y_end > (image_height - 1):
|
|
136
|
-
overshoot = (y_end - image_height) + 1
|
|
137
|
-
y_start -= overshoot
|
|
138
|
-
y_end = y_start + patch_size[1] - 1
|
|
139
|
-
patch_start_positions = add_patch_row(patch_start_positions,y_start)
|
|
140
|
-
break
|
|
141
|
-
|
|
142
|
-
# ...for each row
|
|
143
|
-
|
|
144
|
-
for p in patch_start_positions:
|
|
145
|
-
assert p[0] >= 0 and p[1] >= 0 and p[0] <= image_width and p[1] <= image_height, \
|
|
146
|
-
'Patch generation error (illegal patch {})'.format(p)
|
|
147
|
-
|
|
148
|
-
# The last patch should always end at the bottom-right of the image
|
|
149
|
-
assert patch_start_positions[-1][0]+patch_size[0] == image_width, \
|
|
150
|
-
'Patch generation error (last patch does not end on the right)'
|
|
151
|
-
assert patch_start_positions[-1][1]+patch_size[1] == image_height, \
|
|
152
|
-
'Patch generation error (last patch does not end at the bottom)'
|
|
153
|
-
|
|
154
|
-
# All patches should be unique
|
|
155
|
-
patch_start_positions_tuples = [tuple(x) for x in patch_start_positions]
|
|
156
|
-
assert len(patch_start_positions_tuples) == len(set(patch_start_positions_tuples)), \
|
|
157
|
-
'Patch generation error (duplicate start position)'
|
|
158
|
-
|
|
159
|
-
return patch_start_positions
|
|
160
|
-
|
|
161
|
-
# ...get_patch_boundaries()
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
def patch_info_to_patch_name(image_name,patch_x_min,patch_y_min):
|
|
165
|
-
|
|
166
|
-
patch_name = image_name + '_' + \
|
|
167
|
-
str(patch_x_min).zfill(4) + '_' + str(patch_y_min).zfill(4)
|
|
168
|
-
return patch_name
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
def extract_patch_from_image(im,patch_xy,patch_size,
|
|
172
|
-
patch_image_fn=None,patch_folder=None,image_name=None,overwrite=True):
|
|
173
|
-
"""
|
|
174
|
-
Extracts a patch from the provided image, writing the patch out to patch_image_fn.
|
|
175
|
-
[im] can be a string or a PIL image.
|
|
176
|
-
|
|
177
|
-
patch_xy is a length-2 tuple specifying the upper-left corner of the patch.
|
|
178
|
-
|
|
179
|
-
image_name and patch_folder are only required if patch_image_fn is None.
|
|
180
|
-
|
|
181
|
-
Returns a dictionary with fields xmin,xmax,ymin,ymax,patch_fn.
|
|
182
|
-
"""
|
|
183
|
-
|
|
184
|
-
if isinstance(im,str):
|
|
185
|
-
pil_im = vis_utils.open_image(im)
|
|
186
|
-
else:
|
|
187
|
-
pil_im = im
|
|
188
|
-
|
|
189
|
-
patch_x_min = patch_xy[0]
|
|
190
|
-
patch_y_min = patch_xy[1]
|
|
191
|
-
patch_x_max = patch_x_min + patch_size[0] - 1
|
|
192
|
-
patch_y_max = patch_y_min + patch_size[1] - 1
|
|
193
|
-
|
|
194
|
-
# PIL represents coordinates in a way that is very hard for me to get my head
|
|
195
|
-
# around, such that even though the "right" and "bottom" arguments to the crop()
|
|
196
|
-
# function are inclusive... well, they're not really.
|
|
197
|
-
#
|
|
198
|
-
# https://pillow.readthedocs.io/en/stable/handbook/concepts.html#coordinate-system
|
|
199
|
-
#
|
|
200
|
-
# So we add 1 to the max values.
|
|
201
|
-
patch_im = pil_im.crop((patch_x_min,patch_y_min,patch_x_max+1,patch_y_max+1))
|
|
202
|
-
assert patch_im.size[0] == patch_size[0]
|
|
203
|
-
assert patch_im.size[1] == patch_size[1]
|
|
204
|
-
|
|
205
|
-
if patch_image_fn is None:
|
|
206
|
-
assert patch_folder is not None,\
|
|
207
|
-
"If you don't supply a patch filename to extract_patch_from_image, you need to supply a folder name"
|
|
208
|
-
patch_name = patch_info_to_patch_name(image_name,patch_x_min,patch_y_min)
|
|
209
|
-
patch_image_fn = os.path.join(patch_folder,patch_name + '.jpg')
|
|
210
|
-
|
|
211
|
-
if os.path.isfile(patch_image_fn) and (not overwrite):
|
|
212
|
-
pass
|
|
213
|
-
else:
|
|
214
|
-
patch_im.save(patch_image_fn,quality=patch_jpeg_quality)
|
|
215
|
-
|
|
216
|
-
patch_info = {}
|
|
217
|
-
patch_info['xmin'] = patch_x_min
|
|
218
|
-
patch_info['xmax'] = patch_x_max
|
|
219
|
-
patch_info['ymin'] = patch_y_min
|
|
220
|
-
patch_info['ymax'] = patch_y_max
|
|
221
|
-
patch_info['patch_fn'] = patch_image_fn
|
|
222
|
-
|
|
223
|
-
return patch_info
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
def in_place_nms(md_results, iou_thres=0.45, verbose=True):
|
|
227
|
-
"""
|
|
228
|
-
Run torch.ops.nms in-place on MD-formatted detection results
|
|
229
|
-
"""
|
|
230
|
-
|
|
231
|
-
n_detections_before = 0
|
|
232
|
-
n_detections_after = 0
|
|
233
|
-
|
|
234
|
-
# i_image = 18; im = md_results['images'][i_image]
|
|
235
|
-
for i_image,im in tqdm(enumerate(md_results['images']),total=len(md_results['images'])):
|
|
236
|
-
|
|
237
|
-
if len(im['detections']) == 0:
|
|
238
|
-
continue
|
|
239
|
-
|
|
240
|
-
boxes = []
|
|
241
|
-
scores = []
|
|
242
|
-
|
|
243
|
-
n_detections_before += len(im['detections'])
|
|
244
|
-
|
|
245
|
-
# det = im['detections'][0]
|
|
246
|
-
for det in im['detections']:
|
|
247
|
-
|
|
248
|
-
# Using x1/x2 notation rather than x0/x1 notation to be consistent
|
|
249
|
-
# with the Torch documentation.
|
|
250
|
-
x1 = det['bbox'][0]
|
|
251
|
-
y1 = det['bbox'][1]
|
|
252
|
-
x2 = det['bbox'][0] + det['bbox'][2]
|
|
253
|
-
y2 = det['bbox'][1] + det['bbox'][3]
|
|
254
|
-
box = [x1,y1,x2,y2]
|
|
255
|
-
boxes.append(box)
|
|
256
|
-
scores.append(det['conf'])
|
|
257
|
-
|
|
258
|
-
# ...for each detection
|
|
259
|
-
|
|
260
|
-
t_boxes = torch.tensor(boxes)
|
|
261
|
-
t_scores = torch.tensor(scores)
|
|
262
|
-
|
|
263
|
-
box_indices = ops.nms(t_boxes,t_scores,iou_thres).tolist()
|
|
264
|
-
|
|
265
|
-
post_nms_detections = [im['detections'][x] for x in box_indices]
|
|
266
|
-
|
|
267
|
-
assert len(post_nms_detections) <= len(im['detections'])
|
|
268
|
-
|
|
269
|
-
im['detections'] = post_nms_detections
|
|
270
|
-
|
|
271
|
-
n_detections_after += len(im['detections'])
|
|
272
|
-
|
|
273
|
-
# ...for each image
|
|
274
|
-
|
|
275
|
-
if verbose:
|
|
276
|
-
print('NMS removed {} of {} detections'.format(
|
|
277
|
-
n_detections_before-n_detections_after,
|
|
278
|
-
n_detections_before))
|
|
279
|
-
|
|
280
|
-
# ...in_place_nms()
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
def _extract_tiles_for_image(fn_relative,image_folder,tiling_folder,patch_size,patch_stride,overwrite):
|
|
284
|
-
"""
|
|
285
|
-
Extract tiles for a single image
|
|
286
|
-
|
|
287
|
-
Not really a standalone function; isolated from the main function to simplify
|
|
288
|
-
multiprocessing.
|
|
289
|
-
"""
|
|
290
|
-
|
|
291
|
-
fn_abs = os.path.join(image_folder,fn_relative)
|
|
292
|
-
|
|
293
|
-
image_name = path_utils.clean_filename(fn_relative,char_limit=None,force_lower=True)
|
|
294
|
-
|
|
295
|
-
# Open the image
|
|
296
|
-
im = vis_utils.open_image(fn_abs)
|
|
297
|
-
image_size = [im.width,im.height]
|
|
298
|
-
|
|
299
|
-
# Generate patch boundaries (a list of [x,y] starting points)
|
|
300
|
-
patch_boundaries = get_patch_boundaries(image_size,patch_size,patch_stride)
|
|
301
|
-
|
|
302
|
-
# Extract patches
|
|
303
|
-
#
|
|
304
|
-
# patch_xy = patch_boundaries[0]
|
|
305
|
-
patches = []
|
|
306
|
-
|
|
307
|
-
for patch_xy in patch_boundaries:
|
|
308
|
-
|
|
309
|
-
patch_info = extract_patch_from_image(im,patch_xy,patch_size,
|
|
310
|
-
patch_folder=tiling_folder,
|
|
311
|
-
image_name=image_name,
|
|
312
|
-
overwrite=overwrite)
|
|
313
|
-
patch_info['source_fn'] = fn_relative
|
|
314
|
-
patches.append(patch_info)
|
|
315
|
-
|
|
316
|
-
image_patch_info = {}
|
|
317
|
-
image_patch_info['patches'] = patches
|
|
318
|
-
image_patch_info['image_fn'] = fn_relative
|
|
319
|
-
|
|
320
|
-
return image_patch_info
|
|
321
|
-
|
|
322
|
-
|
|
323
|
-
#%% Main function
|
|
324
|
-
|
|
325
|
-
def run_tiled_inference(model_file, image_folder, tiling_folder, output_file,
|
|
326
|
-
tile_size_x=1280, tile_size_y=1280, tile_overlap=0.5,
|
|
327
|
-
checkpoint_path=None, checkpoint_frequency=-1, remove_tiles=False,
|
|
328
|
-
yolo_inference_options=None,
|
|
329
|
-
n_patch_extraction_workers=default_n_patch_extraction_workers,
|
|
330
|
-
overwrite_tiles=True):
|
|
331
|
-
"""
|
|
332
|
-
Run inference using [model_file] on the images in [image_folder], fist splitting each image up
|
|
333
|
-
into tiles of size [tile_size_x] x [tile_size_y], writing those tiles to [tiling_folder],
|
|
334
|
-
then de-duplicating the results before merging them back into a set of detections that make
|
|
335
|
-
sense on the original images and writing those results to [output_file].
|
|
336
|
-
|
|
337
|
-
[tiling_folder] can be any folder, but this function reserves the right to do whatever it wants
|
|
338
|
-
within that folder, including deleting everything, so it's best if it's a new folder.
|
|
339
|
-
Conceptually this folder is temporary, it's just helpful in this case to not actually
|
|
340
|
-
use the system temp folder, because the tile cache may be very large,
|
|
341
|
-
|
|
342
|
-
tile_overlap is the fraction of overlap between tiles.
|
|
343
|
-
|
|
344
|
-
Optionally removes the temporary tiles.
|
|
345
|
-
|
|
346
|
-
if yolo_inference_options is supplied, it should be an instance of YoloInferenceOptions; in
|
|
347
|
-
this case the model will be run with run_inference_with_yolov5_val. This is typically used to
|
|
348
|
-
run the model with test-time augmentation.
|
|
349
|
-
"""
|
|
350
|
-
|
|
351
|
-
##%% Validate arguments
|
|
352
|
-
|
|
353
|
-
assert tile_overlap < 1 and tile_overlap >= 0, \
|
|
354
|
-
'Illegal tile overlap value {}'.format(tile_overlap)
|
|
355
|
-
|
|
356
|
-
patch_size = [tile_size_x,tile_size_y]
|
|
357
|
-
patch_stride = (round(patch_size[0]*(1.0-tile_overlap)),
|
|
358
|
-
round(patch_size[1]*(1.0-tile_overlap)))
|
|
359
|
-
|
|
360
|
-
os.makedirs(tiling_folder,exist_ok=True)
|
|
361
|
-
|
|
362
|
-
|
|
363
|
-
##%% List files
|
|
364
|
-
|
|
365
|
-
image_files_relative = path_utils.find_images(image_folder, recursive=True, return_relative_paths=True)
|
|
366
|
-
assert len(image_files_relative) > 0, 'No images found in folder {}'.format(image_folder)
|
|
367
|
-
|
|
368
|
-
|
|
369
|
-
##%% Generate tiles
|
|
370
|
-
|
|
371
|
-
all_image_patch_info = None
|
|
372
|
-
|
|
373
|
-
print('Extracting patches from {} images'.format(len(image_files_relative)))
|
|
374
|
-
|
|
375
|
-
n_workers = n_patch_extraction_workers
|
|
376
|
-
|
|
377
|
-
if n_workers <= 1:
|
|
378
|
-
|
|
379
|
-
all_image_patch_info = []
|
|
380
|
-
|
|
381
|
-
# fn_relative = image_files_relative[0]
|
|
382
|
-
for fn_relative in tqdm(image_files_relative):
|
|
383
|
-
image_patch_info = \
|
|
384
|
-
_extract_tiles_for_image(fn_relative,image_folder,tiling_folder,patch_size,patch_stride,
|
|
385
|
-
overwrite=overwrite_tiles)
|
|
386
|
-
all_image_patch_info.append(image_patch_info)
|
|
387
|
-
|
|
388
|
-
else:
|
|
389
|
-
|
|
390
|
-
from multiprocessing.pool import ThreadPool
|
|
391
|
-
from multiprocessing.pool import Pool
|
|
392
|
-
from functools import partial
|
|
393
|
-
|
|
394
|
-
if n_workers > len(image_files_relative):
|
|
395
|
-
|
|
396
|
-
print('Pool of {} requested, but only {} images available, reducing pool to {}'.\
|
|
397
|
-
format(n_workers,len(image_files_relative),len(image_files_relative)))
|
|
398
|
-
n_workers = len(image_files_relative)
|
|
399
|
-
|
|
400
|
-
if parallelization_uses_threads:
|
|
401
|
-
pool = ThreadPool(n_workers); poolstring = 'threads'
|
|
402
|
-
else:
|
|
403
|
-
pool = Pool(n_workers); poolstring = 'processes'
|
|
404
|
-
|
|
405
|
-
print('Starting patch extraction pool with {} {}'.format(n_workers,poolstring))
|
|
406
|
-
|
|
407
|
-
all_image_patch_info = list(tqdm(pool.imap(
|
|
408
|
-
partial(_extract_tiles_for_image,
|
|
409
|
-
image_folder=image_folder,
|
|
410
|
-
tiling_folder=tiling_folder,
|
|
411
|
-
patch_size=patch_size,
|
|
412
|
-
patch_stride=patch_stride,
|
|
413
|
-
overwrite=overwrite_tiles),
|
|
414
|
-
image_files_relative),total=len(image_files_relative)))
|
|
415
|
-
|
|
416
|
-
# ...for each image
|
|
417
|
-
|
|
418
|
-
# Write tile information to file; this is just a debugging convenience
|
|
419
|
-
folder_name = path_utils.clean_filename(image_folder,force_lower=True)
|
|
420
|
-
if folder_name.startswith('_'):
|
|
421
|
-
folder_name = folder_name[1:]
|
|
422
|
-
|
|
423
|
-
tile_cache_file = os.path.join(tiling_folder,folder_name + '_patch_info.json')
|
|
424
|
-
with open(tile_cache_file,'w') as f:
|
|
425
|
-
json.dump(all_image_patch_info,f,indent=1)
|
|
426
|
-
|
|
427
|
-
|
|
428
|
-
##%% Run inference on tiles
|
|
429
|
-
|
|
430
|
-
if yolo_inference_options is not None:
|
|
431
|
-
|
|
432
|
-
patch_level_output_file = os.path.join(tiling_folder,folder_name + '_patch_level_results.json')
|
|
433
|
-
|
|
434
|
-
if yolo_inference_options.model_filename is None:
|
|
435
|
-
yolo_inference_options.model_filename = model_file
|
|
436
|
-
else:
|
|
437
|
-
assert yolo_inference_options.model_filename == model_file, \
|
|
438
|
-
'Model file between yolo inference file ({}) and model file parameter ({})'.format(
|
|
439
|
-
yolo_inference_options.model_filename,model_file)
|
|
440
|
-
|
|
441
|
-
yolo_inference_options.input_folder = tiling_folder
|
|
442
|
-
yolo_inference_options.output_file = patch_level_output_file
|
|
443
|
-
|
|
444
|
-
run_inference_with_yolo_val(yolo_inference_options)
|
|
445
|
-
with open(patch_level_output_file,'r') as f:
|
|
446
|
-
patch_level_results = json.load(f)
|
|
447
|
-
|
|
448
|
-
else:
|
|
449
|
-
|
|
450
|
-
patch_file_names = []
|
|
451
|
-
for im in all_image_patch_info:
|
|
452
|
-
for patch in im['patches']:
|
|
453
|
-
patch_file_names.append(patch['patch_fn'])
|
|
454
|
-
|
|
455
|
-
inference_results = load_and_run_detector_batch(model_file,
|
|
456
|
-
patch_file_names,
|
|
457
|
-
checkpoint_path=checkpoint_path,
|
|
458
|
-
checkpoint_frequency=checkpoint_frequency,
|
|
459
|
-
quiet=True)
|
|
460
|
-
|
|
461
|
-
patch_level_output_file = os.path.join(tiling_folder,folder_name + '_patch_level_results.json')
|
|
462
|
-
|
|
463
|
-
patch_level_results = write_results_to_file(inference_results,
|
|
464
|
-
patch_level_output_file,
|
|
465
|
-
relative_path_base=tiling_folder,
|
|
466
|
-
detector_file=model_file)
|
|
467
|
-
|
|
468
|
-
|
|
469
|
-
##%% Map patch-level detections back to the original images
|
|
470
|
-
|
|
471
|
-
# Map relative paths for patches to detections
|
|
472
|
-
patch_fn_relative_to_results = {}
|
|
473
|
-
for im in tqdm(patch_level_results['images']):
|
|
474
|
-
patch_fn_relative_to_results[im['file']] = im
|
|
475
|
-
|
|
476
|
-
image_level_results = {}
|
|
477
|
-
image_level_results['info'] = patch_level_results['info']
|
|
478
|
-
image_level_results['detection_categories'] = patch_level_results['detection_categories']
|
|
479
|
-
image_level_results['images'] = []
|
|
480
|
-
|
|
481
|
-
image_fn_relative_to_patch_info = { x['image_fn']:x for x in all_image_patch_info }
|
|
482
|
-
|
|
483
|
-
# i_image = 0; image_fn_relative = image_files_relative[i_image]
|
|
484
|
-
for i_image,image_fn_relative in tqdm(enumerate(image_files_relative),total=len(image_files_relative)):
|
|
485
|
-
|
|
486
|
-
image_fn_abs = os.path.join(image_folder,image_fn_relative)
|
|
487
|
-
assert os.path.isfile(image_fn_abs)
|
|
488
|
-
|
|
489
|
-
output_im = {}
|
|
490
|
-
output_im['file'] = image_fn_relative
|
|
491
|
-
output_im['detections'] = []
|
|
492
|
-
|
|
493
|
-
pil_im = vis_utils.open_image(image_fn_abs)
|
|
494
|
-
image_w = pil_im.size[0]
|
|
495
|
-
image_h = pil_im.size[1]
|
|
496
|
-
|
|
497
|
-
image_patch_info = image_fn_relative_to_patch_info[image_fn_relative]
|
|
498
|
-
assert image_patch_info['patches'][0]['source_fn'] == image_fn_relative
|
|
499
|
-
|
|
500
|
-
# Patches for this image
|
|
501
|
-
patch_fn_abs_to_patch_info_this_image = {}
|
|
502
|
-
|
|
503
|
-
for patch_info in image_patch_info['patches']:
|
|
504
|
-
patch_fn_abs_to_patch_info_this_image[patch_info['patch_fn']] = patch_info
|
|
505
|
-
|
|
506
|
-
# For each patch
|
|
507
|
-
#
|
|
508
|
-
# i_patch = 0; patch_fn_abs = list(patch_fn_abs_to_patch_info_this_image.keys())[i_patch]
|
|
509
|
-
for i_patch,patch_fn_abs in enumerate(patch_fn_abs_to_patch_info_this_image.keys()):
|
|
510
|
-
|
|
511
|
-
patch_fn_relative = os.path.relpath(patch_fn_abs,tiling_folder)
|
|
512
|
-
patch_results = patch_fn_relative_to_results[patch_fn_relative]
|
|
513
|
-
patch_info = patch_fn_abs_to_patch_info_this_image[patch_fn_abs]
|
|
514
|
-
|
|
515
|
-
# patch_results['file'] is a relative path, and a subset of patch_info['patch_fn']
|
|
516
|
-
assert patch_results['file'] in patch_info['patch_fn']
|
|
517
|
-
|
|
518
|
-
patch_w = (patch_info['xmax'] - patch_info['xmin']) + 1
|
|
519
|
-
patch_h = (patch_info['ymax'] - patch_info['ymin']) + 1
|
|
520
|
-
assert patch_w == patch_size[0]
|
|
521
|
-
assert patch_h == patch_size[1]
|
|
522
|
-
|
|
523
|
-
# det = patch_results['detections'][0]
|
|
524
|
-
for det in patch_results['detections']:
|
|
525
|
-
|
|
526
|
-
bbox_patch_relative = det['bbox']
|
|
527
|
-
xmin_patch_relative = bbox_patch_relative[0]
|
|
528
|
-
ymin_patch_relative = bbox_patch_relative[1]
|
|
529
|
-
w_patch_relative = bbox_patch_relative[2]
|
|
530
|
-
h_patch_relative = bbox_patch_relative[3]
|
|
531
|
-
|
|
532
|
-
# Convert from patch-relative normalized values to image-relative absolute values
|
|
533
|
-
w_pixels = w_patch_relative * patch_w
|
|
534
|
-
h_pixels = h_patch_relative * patch_h
|
|
535
|
-
xmin_patch_pixels = xmin_patch_relative * patch_w
|
|
536
|
-
ymin_patch_pixels = ymin_patch_relative * patch_h
|
|
537
|
-
xmin_image_pixels = patch_info['xmin'] + xmin_patch_pixels
|
|
538
|
-
ymin_image_pixels = patch_info['ymin'] + ymin_patch_pixels
|
|
539
|
-
|
|
540
|
-
# ...and now to image-relative normalized values
|
|
541
|
-
w_image_normalized = w_pixels / image_w
|
|
542
|
-
h_image_normalized = h_pixels / image_h
|
|
543
|
-
xmin_image_normalized = xmin_image_pixels / image_w
|
|
544
|
-
ymin_image_normalized = ymin_image_pixels / image_h
|
|
545
|
-
|
|
546
|
-
bbox_image_normalized = [xmin_image_normalized,
|
|
547
|
-
ymin_image_normalized,
|
|
548
|
-
w_image_normalized,
|
|
549
|
-
h_image_normalized]
|
|
550
|
-
|
|
551
|
-
output_det = {}
|
|
552
|
-
output_det['bbox'] = bbox_image_normalized
|
|
553
|
-
output_det['conf'] = det['conf']
|
|
554
|
-
output_det['category'] = det['category']
|
|
555
|
-
|
|
556
|
-
output_im['detections'].append(output_det)
|
|
557
|
-
|
|
558
|
-
# ...for each detection
|
|
559
|
-
|
|
560
|
-
# ...for each patch
|
|
561
|
-
|
|
562
|
-
image_level_results['images'].append(output_im)
|
|
563
|
-
|
|
564
|
-
# ...for each image
|
|
565
|
-
|
|
566
|
-
image_level_results_file_pre_nms = \
|
|
567
|
-
os.path.join(tiling_folder,folder_name + '_image_level_results_pre_nms.json')
|
|
568
|
-
with open(image_level_results_file_pre_nms,'w') as f:
|
|
569
|
-
json.dump(image_level_results,f,indent=1)
|
|
570
|
-
|
|
571
|
-
|
|
572
|
-
##%% Run NMS
|
|
573
|
-
|
|
574
|
-
in_place_nms(image_level_results,iou_thres=nms_iou_threshold)
|
|
575
|
-
|
|
576
|
-
|
|
577
|
-
##%% Write output file
|
|
578
|
-
|
|
579
|
-
print('Saving image-level results (after NMS) to {}'.format(output_file))
|
|
580
|
-
|
|
581
|
-
with open(output_file,'w') as f:
|
|
582
|
-
json.dump(image_level_results,f,indent=1)
|
|
583
|
-
|
|
584
|
-
|
|
585
|
-
##%% Possibly remove tiles
|
|
586
|
-
|
|
587
|
-
if remove_tiles:
|
|
588
|
-
|
|
589
|
-
patch_file_names = []
|
|
590
|
-
for im in all_image_patch_info:
|
|
591
|
-
for patch in im['patches']:
|
|
592
|
-
patch_file_names.append(patch['patch_fn'])
|
|
593
|
-
|
|
594
|
-
for patch_fn_abs in patch_file_names:
|
|
595
|
-
os.remove(patch_fn_abs)
|
|
596
|
-
|
|
597
|
-
|
|
598
|
-
##%% Return
|
|
599
|
-
|
|
600
|
-
return image_level_results
|
|
601
|
-
|
|
602
|
-
|
|
603
|
-
#%% Interactive driver
|
|
604
|
-
|
|
605
|
-
if False:
|
|
606
|
-
|
|
607
|
-
pass
|
|
608
|
-
|
|
609
|
-
#%% Run tiled inference (in Python)
|
|
610
|
-
|
|
611
|
-
model_file = os.path.expanduser('~/models/camera_traps/megadetector/md_v5.0.0/md_v5a.0.0.pt')
|
|
612
|
-
image_folder = os.path.expanduser('~/data/KRU-test')
|
|
613
|
-
tiling_folder = os.path.expanduser('~/tmp/tiling-test')
|
|
614
|
-
output_file = os.path.expanduser('~/tmp/KRU-test-tiled.json')
|
|
615
|
-
|
|
616
|
-
tile_size_x = 3000
|
|
617
|
-
tile_size_y = 3000
|
|
618
|
-
tile_overlap = 0.5
|
|
619
|
-
checkpoint_path = None
|
|
620
|
-
checkpoint_frequency = -1
|
|
621
|
-
remove_tiles = False
|
|
622
|
-
|
|
623
|
-
use_yolo_inference = False
|
|
624
|
-
|
|
625
|
-
if not use_yolo_inference:
|
|
626
|
-
|
|
627
|
-
yolo_inference_options = None
|
|
628
|
-
|
|
629
|
-
else:
|
|
630
|
-
|
|
631
|
-
yolo_inference_options = YoloInferenceOptions()
|
|
632
|
-
yolo_inference_options.yolo_working_folder = os.path.expanduser('~/git/yolov5')
|
|
633
|
-
|
|
634
|
-
run_tiled_inference(model_file, image_folder, tiling_folder, output_file,
|
|
635
|
-
tile_size_x=tile_size_x, tile_size_y=tile_size_y,
|
|
636
|
-
tile_overlap=tile_overlap,
|
|
637
|
-
checkpoint_path=checkpoint_path,
|
|
638
|
-
checkpoint_frequency=checkpoint_frequency,
|
|
639
|
-
remove_tiles=remove_tiles,
|
|
640
|
-
yolo_inference_options=yolo_inference_options)
|
|
641
|
-
|
|
642
|
-
|
|
643
|
-
#%% Run tiled inference (generate a command)
|
|
644
|
-
|
|
645
|
-
import os
|
|
646
|
-
|
|
647
|
-
model_file = os.path.expanduser('~/models/camera_traps/megadetector/md_v5.0.0/md_v5a.0.0.pt')
|
|
648
|
-
image_folder = os.path.expanduser('~/data/KRU-test')
|
|
649
|
-
tiling_folder = os.path.expanduser('~/tmp/tiling-test')
|
|
650
|
-
output_file = os.path.expanduser('~/tmp/KRU-test-tiled.json')
|
|
651
|
-
tile_size = [5152,3968]
|
|
652
|
-
tile_overlap = 0.8
|
|
653
|
-
|
|
654
|
-
cmd = f'python run_tiled_inference.py {model_file} {image_folder} {tiling_folder} {output_file} ' + \
|
|
655
|
-
f'--tile_overlap {tile_overlap} --no_remove_tiles --tile_size_x {tile_size[0]} --tile_size_y {tile_size[1]}'
|
|
656
|
-
|
|
657
|
-
print(cmd)
|
|
658
|
-
import clipboard; clipboard.copy(cmd)
|
|
659
|
-
|
|
660
|
-
|
|
661
|
-
#%% Preview tiled inference
|
|
662
|
-
|
|
663
|
-
from api.batch_processing.postprocessing.postprocess_batch_results import (
|
|
664
|
-
PostProcessingOptions, process_batch_results)
|
|
665
|
-
|
|
666
|
-
options = PostProcessingOptions()
|
|
667
|
-
options.image_base_dir = image_folder
|
|
668
|
-
options.include_almost_detections = True
|
|
669
|
-
options.num_images_to_sample = None
|
|
670
|
-
options.confidence_threshold = 0.2
|
|
671
|
-
options.almost_detection_confidence_threshold = options.confidence_threshold - 0.05
|
|
672
|
-
options.ground_truth_json_file = None
|
|
673
|
-
options.separate_detections_by_category = True
|
|
674
|
-
# options.sample_seed = 0
|
|
675
|
-
|
|
676
|
-
options.parallelize_rendering = True
|
|
677
|
-
options.parallelize_rendering_n_cores = 10
|
|
678
|
-
options.parallelize_rendering_with_threads = False
|
|
679
|
-
|
|
680
|
-
preview_base = os.path.join(tiling_folder,'preview')
|
|
681
|
-
os.makedirs(preview_base, exist_ok=True)
|
|
682
|
-
|
|
683
|
-
print('Processing post-RDE to {}'.format(preview_base))
|
|
684
|
-
|
|
685
|
-
options.api_output_file = output_file
|
|
686
|
-
options.output_dir = preview_base
|
|
687
|
-
ppresults = process_batch_results(options)
|
|
688
|
-
html_output_file = ppresults.output_html_file
|
|
689
|
-
|
|
690
|
-
path_utils.open_file(html_output_file)
|
|
691
|
-
|
|
692
|
-
|
|
693
|
-
#%% Command-line driver
|
|
694
|
-
|
|
695
|
-
import sys,argparse
|
|
696
|
-
|
|
697
|
-
def main():
|
|
698
|
-
|
|
699
|
-
parser = argparse.ArgumentParser(
|
|
700
|
-
description='Chop a folder of images up into tiles, run MD on the tiles, and stitch the results together')
|
|
701
|
-
parser.add_argument(
|
|
702
|
-
'model_file',
|
|
703
|
-
help='Path to detector model file (.pb or .pt)')
|
|
704
|
-
parser.add_argument(
|
|
705
|
-
'image_folder',
|
|
706
|
-
help='Folder containing images for inference (always recursive)')
|
|
707
|
-
parser.add_argument(
|
|
708
|
-
'tiling_folder',
|
|
709
|
-
help='Temporary folder where tiles and intermediate results will be stored')
|
|
710
|
-
parser.add_argument(
|
|
711
|
-
'output_file',
|
|
712
|
-
help='Path to output JSON results file, should end with a .json extension')
|
|
713
|
-
parser.add_argument(
|
|
714
|
-
'--no_remove_tiles',
|
|
715
|
-
action='store_true',
|
|
716
|
-
help='Tiles are removed by default; this option suppresses tile deletion')
|
|
717
|
-
parser.add_argument(
|
|
718
|
-
'--tile_size_x',
|
|
719
|
-
type=int,
|
|
720
|
-
default=default_tile_size[0],
|
|
721
|
-
help=('Tile width (defaults to {})'.format(default_tile_size[0])))
|
|
722
|
-
parser.add_argument(
|
|
723
|
-
'--tile_size_y',
|
|
724
|
-
type=int,
|
|
725
|
-
default=default_tile_size[0],
|
|
726
|
-
help=('Tile height (defaults to {})'.format(default_tile_size[1])))
|
|
727
|
-
parser.add_argument(
|
|
728
|
-
'--tile_overlap',
|
|
729
|
-
type=float,
|
|
730
|
-
default=default_patch_overlap,
|
|
731
|
-
help=('Overlap between tiles [0,1] (defaults to {})'.format(default_patch_overlap)))
|
|
732
|
-
|
|
733
|
-
if len(sys.argv[1:]) == 0:
|
|
734
|
-
parser.print_help()
|
|
735
|
-
parser.exit()
|
|
736
|
-
|
|
737
|
-
args = parser.parse_args()
|
|
738
|
-
|
|
739
|
-
assert os.path.exists(args.model_file), \
|
|
740
|
-
'detector file {} does not exist'.format(args.model_file)
|
|
741
|
-
|
|
742
|
-
if os.path.exists(args.output_file):
|
|
743
|
-
print('Warning: output_file {} already exists and will be overwritten'.format(
|
|
744
|
-
args.output_file))
|
|
745
|
-
|
|
746
|
-
remove_tiles = (not args.no_remove_tiles)
|
|
747
|
-
|
|
748
|
-
run_tiled_inference(args.model_file, args.image_folder, args.tiling_folder, args.output_file,
|
|
749
|
-
tile_size_x=args.tile_size_x, tile_size_y=args.tile_size_y,
|
|
750
|
-
tile_overlap=args.tile_overlap,
|
|
751
|
-
remove_tiles=remove_tiles)
|
|
752
|
-
|
|
753
|
-
if __name__ == '__main__':
|
|
754
|
-
main()
|