megadetector 5.0.12__py3-none-any.whl → 5.0.14__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of megadetector might be problematic. Click here for more details.
- megadetector/api/batch_processing/api_core/server.py +1 -1
- megadetector/api/batch_processing/api_core/server_api_config.py +0 -1
- megadetector/api/batch_processing/api_core/server_job_status_table.py +0 -3
- megadetector/api/batch_processing/api_core/server_utils.py +0 -4
- megadetector/api/batch_processing/integration/eMammal/test_scripts/push_annotations_to_emammal.py +0 -1
- megadetector/api/synchronous/api_core/animal_detection_api/api_frontend.py +0 -3
- megadetector/classification/efficientnet/utils.py +0 -3
- megadetector/data_management/camtrap_dp_to_coco.py +0 -2
- megadetector/data_management/cct_json_utils.py +15 -6
- megadetector/data_management/coco_to_labelme.py +12 -1
- megadetector/data_management/databases/integrity_check_json_db.py +43 -27
- megadetector/data_management/importers/cacophony-thermal-importer.py +1 -4
- megadetector/data_management/ocr_tools.py +0 -4
- megadetector/data_management/read_exif.py +178 -44
- megadetector/data_management/rename_images.py +187 -0
- megadetector/data_management/wi_download_csv_to_coco.py +3 -2
- megadetector/data_management/yolo_output_to_md_output.py +7 -2
- megadetector/detection/process_video.py +548 -244
- megadetector/detection/pytorch_detector.py +33 -14
- megadetector/detection/run_detector.py +17 -5
- megadetector/detection/run_detector_batch.py +179 -65
- megadetector/detection/run_inference_with_yolov5_val.py +527 -357
- megadetector/detection/tf_detector.py +14 -3
- megadetector/detection/video_utils.py +284 -61
- megadetector/postprocessing/categorize_detections_by_size.py +16 -14
- megadetector/postprocessing/classification_postprocessing.py +716 -0
- megadetector/postprocessing/compare_batch_results.py +101 -93
- megadetector/postprocessing/convert_output_format.py +12 -5
- megadetector/postprocessing/merge_detections.py +18 -7
- megadetector/postprocessing/postprocess_batch_results.py +133 -127
- megadetector/postprocessing/repeat_detection_elimination/repeat_detections_core.py +236 -232
- megadetector/postprocessing/subset_json_detector_output.py +66 -62
- megadetector/taxonomy_mapping/preview_lila_taxonomy.py +0 -2
- megadetector/utils/ct_utils.py +5 -4
- megadetector/utils/md_tests.py +380 -128
- megadetector/utils/path_utils.py +39 -6
- megadetector/utils/process_utils.py +13 -4
- megadetector/visualization/visualization_utils.py +7 -2
- megadetector/visualization/visualize_db.py +79 -77
- megadetector/visualization/visualize_detector_output.py +0 -1
- {megadetector-5.0.12.dist-info → megadetector-5.0.14.dist-info}/LICENSE +0 -0
- {megadetector-5.0.12.dist-info → megadetector-5.0.14.dist-info}/METADATA +2 -2
- {megadetector-5.0.12.dist-info → megadetector-5.0.14.dist-info}/RECORD +45 -43
- {megadetector-5.0.12.dist-info → megadetector-5.0.14.dist-info}/top_level.txt +0 -0
- {megadetector-5.0.12.dist-info → megadetector-5.0.14.dist-info}/WHEEL +0 -0
|
@@ -8,11 +8,9 @@ Module to run MegaDetector v5, a PyTorch YOLOv5 animal detection model.
|
|
|
8
8
|
|
|
9
9
|
#%% Imports and constants
|
|
10
10
|
|
|
11
|
-
import sys
|
|
12
11
|
import torch
|
|
13
12
|
import numpy as np
|
|
14
13
|
import traceback
|
|
15
|
-
import builtins
|
|
16
14
|
|
|
17
15
|
from megadetector.detection.run_detector import CONF_DIGITS, COORD_DIGITS, FAILURE_INFER
|
|
18
16
|
from megadetector.utils import ct_utils
|
|
@@ -130,7 +128,20 @@ class PTDetector:
|
|
|
130
128
|
self.device = 'mps'
|
|
131
129
|
except AttributeError:
|
|
132
130
|
pass
|
|
133
|
-
|
|
131
|
+
try:
|
|
132
|
+
self.model = PTDetector._load_model(model_path, self.device)
|
|
133
|
+
except Exception as e:
|
|
134
|
+
# In a very estoeric scenario where an old version of YOLOv5 is used to run
|
|
135
|
+
# newer models, we run into an issue because the "Model" class became
|
|
136
|
+
# "DetectionModel". New YOLOv5 code handles this case by just setting them
|
|
137
|
+
# to be the same, so doing that via monkey-patch doesn't seem *that* rude.
|
|
138
|
+
if "Can't get attribute 'DetectionModel'" in str(e):
|
|
139
|
+
print('Forward-compatibility issue detected, patching')
|
|
140
|
+
from models import yolo
|
|
141
|
+
yolo.DetectionModel = yolo.Model
|
|
142
|
+
self.model = PTDetector._load_model(model_path, self.device)
|
|
143
|
+
else:
|
|
144
|
+
raise
|
|
134
145
|
if (self.device != 'cpu'):
|
|
135
146
|
print('Sending model to GPU')
|
|
136
147
|
self.model.to(self.device)
|
|
@@ -169,9 +180,12 @@ class PTDetector:
|
|
|
169
180
|
|
|
170
181
|
return model
|
|
171
182
|
|
|
172
|
-
def generate_detections_one_image(self, img_original,
|
|
173
|
-
|
|
174
|
-
|
|
183
|
+
def generate_detections_one_image(self, img_original,
|
|
184
|
+
image_id='unknown',
|
|
185
|
+
detection_threshold=0.00001,
|
|
186
|
+
image_size=None,
|
|
187
|
+
skip_image_resizing=False,
|
|
188
|
+
augment=False):
|
|
175
189
|
"""
|
|
176
190
|
Applies the detector to an image.
|
|
177
191
|
|
|
@@ -181,11 +195,11 @@ class PTDetector:
|
|
|
181
195
|
of the output object
|
|
182
196
|
detection_threshold (float, optional): only detections above this confidence threshold
|
|
183
197
|
will be included in the return value
|
|
184
|
-
image_size (tuple, optional): image size to use for inference, only mess with this
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
198
|
+
image_size (tuple, optional): image size to use for inference, only mess with this if
|
|
199
|
+
(a) you're using a model other than MegaDetector or (b) you know what you're getting into
|
|
200
|
+
skip_image_resizing (bool, optional): whether to skip internal image resizing (and rely on
|
|
201
|
+
external resizing)
|
|
202
|
+
augment (bool, optional): enable (implementation-specific) image augmentation
|
|
189
203
|
|
|
190
204
|
Returns:
|
|
191
205
|
dict: a dictionary with the following fields:
|
|
@@ -201,6 +215,9 @@ class PTDetector:
|
|
|
201
215
|
detections = []
|
|
202
216
|
max_conf = 0.0
|
|
203
217
|
|
|
218
|
+
if detection_threshold is None:
|
|
219
|
+
detection_threshold = 0
|
|
220
|
+
|
|
204
221
|
try:
|
|
205
222
|
|
|
206
223
|
img_original = np.asarray(img_original)
|
|
@@ -228,8 +245,10 @@ class PTDetector:
|
|
|
228
245
|
if skip_image_resizing:
|
|
229
246
|
img = img_original
|
|
230
247
|
else:
|
|
231
|
-
letterbox_result = letterbox(img_original,
|
|
232
|
-
|
|
248
|
+
letterbox_result = letterbox(img_original,
|
|
249
|
+
new_shape=target_size,
|
|
250
|
+
stride=PTDetector.STRIDE,
|
|
251
|
+
auto=True)
|
|
233
252
|
img = letterbox_result[0]
|
|
234
253
|
|
|
235
254
|
# HWC to CHW; PIL Image is RGB already
|
|
@@ -244,7 +263,7 @@ class PTDetector:
|
|
|
244
263
|
if len(img.shape) == 3:
|
|
245
264
|
img = torch.unsqueeze(img, 0)
|
|
246
265
|
|
|
247
|
-
pred
|
|
266
|
+
pred = self.model(img,augment=augment)[0]
|
|
248
267
|
|
|
249
268
|
# NMS
|
|
250
269
|
if self.device == 'mps':
|
|
@@ -394,7 +394,8 @@ def load_and_run_detector(model_file,
|
|
|
394
394
|
box_thickness=DEFAULT_BOX_THICKNESS,
|
|
395
395
|
box_expansion=DEFAULT_BOX_EXPANSION,
|
|
396
396
|
image_size=None,
|
|
397
|
-
label_font_size=DEFAULT_LABEL_FONT_SIZE
|
|
397
|
+
label_font_size=DEFAULT_LABEL_FONT_SIZE,
|
|
398
|
+
augment=False
|
|
398
399
|
):
|
|
399
400
|
r"""
|
|
400
401
|
Loads and runs a detector on target images, and visualizes the results.
|
|
@@ -415,6 +416,7 @@ def load_and_run_detector(model_file,
|
|
|
415
416
|
doing
|
|
416
417
|
label_font_size (float, optional): font size to use for displaying class names
|
|
417
418
|
and confidence values in the rendered images
|
|
419
|
+
augment (bool, optional): enable (implementation-specific) image augmentation
|
|
418
420
|
"""
|
|
419
421
|
|
|
420
422
|
if len(image_file_names) == 0:
|
|
@@ -506,9 +508,12 @@ def load_and_run_detector(model_file,
|
|
|
506
508
|
try:
|
|
507
509
|
start_time = time.time()
|
|
508
510
|
|
|
509
|
-
result = detector.generate_detections_one_image(
|
|
510
|
-
|
|
511
|
-
|
|
511
|
+
result = detector.generate_detections_one_image(
|
|
512
|
+
image,
|
|
513
|
+
im_file,
|
|
514
|
+
detection_threshold=DEFAULT_OUTPUT_CONFIDENCE_THRESHOLD,
|
|
515
|
+
image_size=image_size,
|
|
516
|
+
augment=augment)
|
|
512
517
|
detection_results.append(result)
|
|
513
518
|
|
|
514
519
|
elapsed = time.time() - start_time
|
|
@@ -679,6 +684,12 @@ def main():
|
|
|
679
684
|
help=('If set, produces separate output images for each crop, '
|
|
680
685
|
'rather than adding bounding boxes to the original image'))
|
|
681
686
|
|
|
687
|
+
parser.add_argument(
|
|
688
|
+
'--augment',
|
|
689
|
+
default=False,
|
|
690
|
+
action='store_true',
|
|
691
|
+
help=('Enable image augmentation'))
|
|
692
|
+
|
|
682
693
|
parser.add_argument(
|
|
683
694
|
'--box_thickness',
|
|
684
695
|
type=int,
|
|
@@ -755,7 +766,8 @@ def main():
|
|
|
755
766
|
box_expansion=args.box_expansion,
|
|
756
767
|
crop_images=args.crop,
|
|
757
768
|
image_size=args.image_size,
|
|
758
|
-
label_font_size=args.label_font_size
|
|
769
|
+
label_font_size=args.label_font_size,
|
|
770
|
+
augment=args.augment)
|
|
759
771
|
|
|
760
772
|
if __name__ == '__main__':
|
|
761
773
|
main()
|
|
@@ -126,7 +126,15 @@ def _producer_func(q,image_files):
|
|
|
126
126
|
print('Finished image loading'); sys.stdout.flush()
|
|
127
127
|
|
|
128
128
|
|
|
129
|
-
def _consumer_func(q,
|
|
129
|
+
def _consumer_func(q,
|
|
130
|
+
return_queue,
|
|
131
|
+
model_file,
|
|
132
|
+
confidence_threshold,
|
|
133
|
+
image_size=None,
|
|
134
|
+
include_image_size=False,
|
|
135
|
+
include_image_timestamp=False,
|
|
136
|
+
include_exif_data=False,
|
|
137
|
+
augment=False):
|
|
130
138
|
"""
|
|
131
139
|
Consumer function; only used when using the (optional) image queue.
|
|
132
140
|
|
|
@@ -168,16 +176,30 @@ def _consumer_func(q,return_queue,model_file,confidence_threshold,image_size=Non
|
|
|
168
176
|
results.append({'file': im_file,
|
|
169
177
|
'failure': image})
|
|
170
178
|
else:
|
|
171
|
-
results.append(process_image(im_file=im_file,
|
|
179
|
+
results.append(process_image(im_file=im_file,
|
|
180
|
+
detector=detector,
|
|
172
181
|
confidence_threshold=confidence_threshold,
|
|
173
|
-
image=image,
|
|
182
|
+
image=image,
|
|
183
|
+
quiet=True,
|
|
184
|
+
image_size=image_size,
|
|
185
|
+
include_image_size=include_image_size,
|
|
186
|
+
include_image_timestamp=include_image_timestamp,
|
|
187
|
+
include_exif_data=include_exif_data,
|
|
188
|
+
augment=augment))
|
|
174
189
|
if verbose:
|
|
175
190
|
print('Processed image {}'.format(im_file)); sys.stdout.flush()
|
|
176
191
|
q.task_done()
|
|
177
192
|
|
|
178
193
|
|
|
179
|
-
def run_detector_with_image_queue(image_files,
|
|
180
|
-
|
|
194
|
+
def run_detector_with_image_queue(image_files,
|
|
195
|
+
model_file,
|
|
196
|
+
confidence_threshold,
|
|
197
|
+
quiet=False,
|
|
198
|
+
image_size=None,
|
|
199
|
+
include_image_size=False,
|
|
200
|
+
include_image_timestamp=False,
|
|
201
|
+
include_exif_data=False,
|
|
202
|
+
augment=False):
|
|
181
203
|
"""
|
|
182
204
|
Driver function for the (optional) multiprocessing-based image queue; only used
|
|
183
205
|
when --use_image_queue is specified. Starts a reader process to read images from disk, but
|
|
@@ -218,15 +240,37 @@ def run_detector_with_image_queue(image_files,model_file,confidence_threshold,
|
|
|
218
240
|
|
|
219
241
|
if run_separate_consumer_process:
|
|
220
242
|
if use_threads_for_queue:
|
|
221
|
-
consumer = Thread(target=_consumer_func,args=(q,
|
|
222
|
-
|
|
243
|
+
consumer = Thread(target=_consumer_func,args=(q,
|
|
244
|
+
return_queue,
|
|
245
|
+
model_file,
|
|
246
|
+
confidence_threshold,
|
|
247
|
+
image_size,
|
|
248
|
+
include_image_size,
|
|
249
|
+
include_image_timestamp,
|
|
250
|
+
include_exif_data,
|
|
251
|
+
augment))
|
|
223
252
|
else:
|
|
224
|
-
consumer = Process(target=_consumer_func,args=(q,
|
|
225
|
-
|
|
253
|
+
consumer = Process(target=_consumer_func,args=(q,
|
|
254
|
+
return_queue,
|
|
255
|
+
model_file,
|
|
256
|
+
confidence_threshold,
|
|
257
|
+
image_size,
|
|
258
|
+
include_image_size,
|
|
259
|
+
include_image_timestamp,
|
|
260
|
+
include_exif_data,
|
|
261
|
+
augment))
|
|
226
262
|
consumer.daemon = True
|
|
227
263
|
consumer.start()
|
|
228
264
|
else:
|
|
229
|
-
_consumer_func(q,
|
|
265
|
+
_consumer_func(q,
|
|
266
|
+
return_queue,
|
|
267
|
+
model_file,
|
|
268
|
+
confidence_threshold,
|
|
269
|
+
image_size,
|
|
270
|
+
include_image_size,
|
|
271
|
+
include_image_timestamp,
|
|
272
|
+
include_exif_data,
|
|
273
|
+
augment)
|
|
230
274
|
|
|
231
275
|
producer.join()
|
|
232
276
|
print('Producer finished')
|
|
@@ -262,15 +306,20 @@ def _chunks_by_number_of_chunks(ls, n):
|
|
|
262
306
|
|
|
263
307
|
#%% Image processing functions
|
|
264
308
|
|
|
265
|
-
def process_images(im_files,
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
|
|
309
|
+
def process_images(im_files,
|
|
310
|
+
detector,
|
|
311
|
+
confidence_threshold,
|
|
312
|
+
use_image_queue=False,
|
|
313
|
+
quiet=False,
|
|
314
|
+
image_size=None,
|
|
315
|
+
checkpoint_queue=None,
|
|
316
|
+
include_image_size=False,
|
|
317
|
+
include_image_timestamp=False,
|
|
318
|
+
include_exif_data=False,
|
|
319
|
+
augment=False):
|
|
269
320
|
"""
|
|
270
|
-
Runs a detector (typically MegaDetector) over a list of image files.
|
|
271
|
-
|
|
272
|
-
standard inference path (which instead loops over process_image()).
|
|
273
|
-
|
|
321
|
+
Runs a detector (typically MegaDetector) over a list of image files on a single thread.
|
|
322
|
+
|
|
274
323
|
Args:
|
|
275
324
|
im_files (list: paths to image files
|
|
276
325
|
detector (str or detector object): loaded model or str; if this is a string, it can be a
|
|
@@ -284,7 +333,8 @@ def process_images(im_files, detector, confidence_threshold, use_image_queue=Fal
|
|
|
284
333
|
checkpoint_queue (Queue, optional): internal parameter used to pass image queues around
|
|
285
334
|
include_image_size (bool, optional): should we include image size in the output for each image?
|
|
286
335
|
include_image_timestamp (bool, optional): should we include image timestamps in the output for each image?
|
|
287
|
-
include_exif_data (bool, optional): should we include EXIF data in the output for each image?
|
|
336
|
+
include_exif_data (bool, optional): should we include EXIF data in the output for each image?
|
|
337
|
+
augment (bool, optional): enable image augmentation
|
|
288
338
|
|
|
289
339
|
Returns:
|
|
290
340
|
list: list of dicts, in which each dict represents detections on one image,
|
|
@@ -292,25 +342,37 @@ def process_images(im_files, detector, confidence_threshold, use_image_queue=Fal
|
|
|
292
342
|
"""
|
|
293
343
|
|
|
294
344
|
if isinstance(detector, str):
|
|
345
|
+
|
|
295
346
|
start_time = time.time()
|
|
296
347
|
detector = load_detector(detector)
|
|
297
348
|
elapsed = time.time() - start_time
|
|
298
349
|
print('Loaded model (batch level) in {}'.format(humanfriendly.format_timespan(elapsed)))
|
|
299
350
|
|
|
300
351
|
if use_image_queue:
|
|
301
|
-
|
|
302
|
-
|
|
352
|
+
|
|
353
|
+
run_detector_with_image_queue(im_files,
|
|
354
|
+
detector,
|
|
355
|
+
confidence_threshold,
|
|
356
|
+
quiet=quiet,
|
|
357
|
+
image_size=image_size,
|
|
303
358
|
include_image_size=include_image_size,
|
|
304
359
|
include_image_timestamp=include_image_timestamp,
|
|
305
|
-
include_exif_data=include_exif_data
|
|
360
|
+
include_exif_data=include_exif_data,
|
|
361
|
+
augment=augment)
|
|
362
|
+
|
|
306
363
|
else:
|
|
364
|
+
|
|
307
365
|
results = []
|
|
308
366
|
for im_file in im_files:
|
|
309
|
-
result = process_image(im_file,
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
|
|
367
|
+
result = process_image(im_file,
|
|
368
|
+
detector,
|
|
369
|
+
confidence_threshold,
|
|
370
|
+
quiet=quiet,
|
|
371
|
+
image_size=image_size,
|
|
372
|
+
include_image_size=include_image_size,
|
|
373
|
+
include_image_timestamp=include_image_timestamp,
|
|
374
|
+
include_exif_data=include_exif_data,
|
|
375
|
+
augment=augment)
|
|
314
376
|
|
|
315
377
|
if checkpoint_queue is not None:
|
|
316
378
|
checkpoint_queue.put(result)
|
|
@@ -321,10 +383,16 @@ def process_images(im_files, detector, confidence_threshold, use_image_queue=Fal
|
|
|
321
383
|
# ...def process_images(...)
|
|
322
384
|
|
|
323
385
|
|
|
324
|
-
def process_image(im_file, detector,
|
|
325
|
-
|
|
326
|
-
|
|
327
|
-
|
|
386
|
+
def process_image(im_file, detector,
|
|
387
|
+
confidence_threshold,
|
|
388
|
+
image=None,
|
|
389
|
+
quiet=False,
|
|
390
|
+
image_size=None,
|
|
391
|
+
include_image_size=False,
|
|
392
|
+
include_image_timestamp=False,
|
|
393
|
+
include_exif_data=False,
|
|
394
|
+
skip_image_resizing=False,
|
|
395
|
+
augment=False):
|
|
328
396
|
"""
|
|
329
397
|
Runs a detector (typically MegaDetector) on a single image file.
|
|
330
398
|
|
|
@@ -343,6 +411,7 @@ def process_image(im_file, detector, confidence_threshold, image=None,
|
|
|
343
411
|
include_image_timestamp (bool, optional): should we include image timestamps in the output for each image?
|
|
344
412
|
include_exif_data (bool, optional): should we include EXIF data in the output for each image?
|
|
345
413
|
skip_image_resizing (bool, optional): whether to skip internal image resizing and rely on external resizing
|
|
414
|
+
augment (bool, optional): enable image augmentation
|
|
346
415
|
|
|
347
416
|
Returns:
|
|
348
417
|
dict: dict representing detections on one image,
|
|
@@ -367,8 +436,12 @@ def process_image(im_file, detector, confidence_threshold, image=None,
|
|
|
367
436
|
|
|
368
437
|
try:
|
|
369
438
|
result = detector.generate_detections_one_image(
|
|
370
|
-
|
|
371
|
-
|
|
439
|
+
image,
|
|
440
|
+
im_file,
|
|
441
|
+
detection_threshold=confidence_threshold,
|
|
442
|
+
image_size=image_size,
|
|
443
|
+
skip_image_resizing=skip_image_resizing,
|
|
444
|
+
augment=augment)
|
|
372
445
|
except Exception as e:
|
|
373
446
|
if not quiet:
|
|
374
447
|
print('Image {} cannot be processed. Exception: {}'.format(im_file, e))
|
|
@@ -423,12 +496,21 @@ def _load_custom_class_mapping(class_mapping_filename):
|
|
|
423
496
|
|
|
424
497
|
#%% Main function
|
|
425
498
|
|
|
426
|
-
def load_and_run_detector_batch(model_file,
|
|
499
|
+
def load_and_run_detector_batch(model_file,
|
|
500
|
+
image_file_names,
|
|
501
|
+
checkpoint_path=None,
|
|
427
502
|
confidence_threshold=run_detector.DEFAULT_OUTPUT_CONFIDENCE_THRESHOLD,
|
|
428
|
-
checkpoint_frequency=-1,
|
|
429
|
-
|
|
430
|
-
|
|
431
|
-
|
|
503
|
+
checkpoint_frequency=-1,
|
|
504
|
+
results=None,
|
|
505
|
+
n_cores=1,
|
|
506
|
+
use_image_queue=False,
|
|
507
|
+
quiet=False,
|
|
508
|
+
image_size=None,
|
|
509
|
+
class_mapping_filename=None,
|
|
510
|
+
include_image_size=False,
|
|
511
|
+
include_image_timestamp=False,
|
|
512
|
+
include_exif_data=False,
|
|
513
|
+
augment=False):
|
|
432
514
|
"""
|
|
433
515
|
Load a model file and run it on a list of images.
|
|
434
516
|
|
|
@@ -455,7 +537,8 @@ def load_and_run_detector_batch(model_file, image_file_names, checkpoint_path=No
|
|
|
455
537
|
file or YOLOv5 dataset.yaml file
|
|
456
538
|
include_image_size (bool, optional): should we include image size in the output for each image?
|
|
457
539
|
include_image_timestamp (bool, optional): should we include image timestamps in the output for each image?
|
|
458
|
-
include_exif_data (bool, optional): should we include EXIF data in the output for each image?
|
|
540
|
+
include_exif_data (bool, optional): should we include EXIF data in the output for each image?
|
|
541
|
+
augment (bool, optional): enable image augmentation
|
|
459
542
|
|
|
460
543
|
Returns:
|
|
461
544
|
results: list of dicts; each dict represents detections on one image
|
|
@@ -539,9 +622,15 @@ def load_and_run_detector_batch(model_file, image_file_names, checkpoint_path=No
|
|
|
539
622
|
assert len(results) == 0, \
|
|
540
623
|
'Using an image queue with results loaded from a checkpoint is not currently supported'
|
|
541
624
|
assert n_cores <= 1
|
|
542
|
-
results = run_detector_with_image_queue(image_file_names,
|
|
543
|
-
|
|
544
|
-
|
|
625
|
+
results = run_detector_with_image_queue(image_file_names,
|
|
626
|
+
model_file,
|
|
627
|
+
confidence_threshold,
|
|
628
|
+
quiet,
|
|
629
|
+
image_size=image_size,
|
|
630
|
+
include_image_size=include_image_size,
|
|
631
|
+
include_image_timestamp=include_image_timestamp,
|
|
632
|
+
include_exif_data=include_exif_data,
|
|
633
|
+
augment=augment)
|
|
545
634
|
|
|
546
635
|
elif n_cores <= 1:
|
|
547
636
|
|
|
@@ -565,11 +654,15 @@ def load_and_run_detector_batch(model_file, image_file_names, checkpoint_path=No
|
|
|
565
654
|
|
|
566
655
|
count += 1
|
|
567
656
|
|
|
568
|
-
result = process_image(im_file,
|
|
569
|
-
|
|
570
|
-
|
|
657
|
+
result = process_image(im_file,
|
|
658
|
+
detector,
|
|
659
|
+
confidence_threshold,
|
|
660
|
+
quiet=quiet,
|
|
661
|
+
image_size=image_size,
|
|
662
|
+
include_image_size=include_image_size,
|
|
571
663
|
include_image_timestamp=include_image_timestamp,
|
|
572
|
-
include_exif_data=include_exif_data
|
|
664
|
+
include_exif_data=include_exif_data,
|
|
665
|
+
augment=augment)
|
|
573
666
|
results.append(result)
|
|
574
667
|
|
|
575
668
|
# Write a checkpoint if necessary
|
|
@@ -616,14 +709,18 @@ def load_and_run_detector_batch(model_file, image_file_names, checkpoint_path=No
|
|
|
616
709
|
checkpoint_queue, results), daemon=True)
|
|
617
710
|
checkpoint_thread.start()
|
|
618
711
|
|
|
619
|
-
pool.map(partial(process_images,
|
|
620
|
-
|
|
621
|
-
|
|
622
|
-
|
|
623
|
-
|
|
624
|
-
|
|
625
|
-
|
|
626
|
-
|
|
712
|
+
pool.map(partial(process_images,
|
|
713
|
+
detector=detector,
|
|
714
|
+
confidence_threshold=confidence_threshold,
|
|
715
|
+
use_image_queue=False,
|
|
716
|
+
quiet=quiet,
|
|
717
|
+
image_size=image_size,
|
|
718
|
+
checkpoint_queue=checkpoint_queue,
|
|
719
|
+
include_image_size=include_image_size,
|
|
720
|
+
include_image_timestamp=include_image_timestamp,
|
|
721
|
+
include_exif_data=include_exif_data,
|
|
722
|
+
augment=augment),
|
|
723
|
+
image_batches)
|
|
627
724
|
|
|
628
725
|
checkpoint_queue.put(None)
|
|
629
726
|
|
|
@@ -631,12 +728,18 @@ def load_and_run_detector_batch(model_file, image_file_names, checkpoint_path=No
|
|
|
631
728
|
|
|
632
729
|
# Multprocessing is enabled, but checkpointing is not
|
|
633
730
|
|
|
634
|
-
new_results = pool.map(partial(process_images,
|
|
635
|
-
|
|
636
|
-
|
|
637
|
-
|
|
638
|
-
|
|
639
|
-
|
|
731
|
+
new_results = pool.map(partial(process_images,
|
|
732
|
+
detector=detector,
|
|
733
|
+
confidence_threshold=confidence_threshold,
|
|
734
|
+
use_image_queue=False,
|
|
735
|
+
quiet=quiet,
|
|
736
|
+
checkpoint_queue=None,
|
|
737
|
+
image_size=image_size,
|
|
738
|
+
include_image_size=include_image_size,
|
|
739
|
+
include_image_timestamp=include_image_timestamp,
|
|
740
|
+
include_exif_data=include_exif_data,
|
|
741
|
+
augment=augment),
|
|
742
|
+
image_batches)
|
|
640
743
|
|
|
641
744
|
new_results = list(itertools.chain.from_iterable(new_results))
|
|
642
745
|
|
|
@@ -724,9 +827,14 @@ def get_image_datetime(image):
|
|
|
724
827
|
return None
|
|
725
828
|
|
|
726
829
|
|
|
727
|
-
def write_results_to_file(results,
|
|
728
|
-
|
|
729
|
-
|
|
830
|
+
def write_results_to_file(results,
|
|
831
|
+
output_file,
|
|
832
|
+
relative_path_base=None,
|
|
833
|
+
detector_file=None,
|
|
834
|
+
info=None,
|
|
835
|
+
include_max_conf=False,
|
|
836
|
+
custom_metadata=None,
|
|
837
|
+
force_forward_slashes=True):
|
|
730
838
|
"""
|
|
731
839
|
Writes list of detection results to JSON output file. Format matches:
|
|
732
840
|
|
|
@@ -951,7 +1059,12 @@ def main():
|
|
|
951
1059
|
'--image_size',
|
|
952
1060
|
type=int,
|
|
953
1061
|
default=None,
|
|
954
|
-
help=('Force image resizing to a
|
|
1062
|
+
help=('Force image resizing to a specific integer size on the long axis (not recommended to change this)'))
|
|
1063
|
+
parser.add_argument(
|
|
1064
|
+
'--augment',
|
|
1065
|
+
action='store_true',
|
|
1066
|
+
help='Enable image augmentation'
|
|
1067
|
+
)
|
|
955
1068
|
parser.add_argument(
|
|
956
1069
|
'--use_image_queue',
|
|
957
1070
|
action='store_true',
|
|
@@ -1196,7 +1309,8 @@ def main():
|
|
|
1196
1309
|
class_mapping_filename=args.class_mapping_filename,
|
|
1197
1310
|
include_image_size=args.include_image_size,
|
|
1198
1311
|
include_image_timestamp=args.include_image_timestamp,
|
|
1199
|
-
include_exif_data=args.include_exif_data
|
|
1312
|
+
include_exif_data=args.include_exif_data,
|
|
1313
|
+
augment=args.augment)
|
|
1200
1314
|
|
|
1201
1315
|
elapsed = time.time() - start_time
|
|
1202
1316
|
images_per_second = len(results) / elapsed
|