megadetector 5.0.13__py3-none-any.whl → 5.0.15__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of megadetector might be problematic. Click here for more details.
- megadetector/data_management/read_exif.py +11 -5
- megadetector/detection/process_video.py +230 -70
- megadetector/detection/pytorch_detector.py +16 -11
- megadetector/detection/run_detector.py +17 -5
- megadetector/detection/run_detector_batch.py +186 -67
- megadetector/detection/tf_detector.py +11 -3
- megadetector/detection/video_utils.py +177 -43
- megadetector/postprocessing/convert_output_format.py +12 -5
- megadetector/utils/md_tests.py +279 -108
- megadetector/utils/path_utils.py +38 -6
- megadetector/utils/process_utils.py +8 -2
- megadetector/visualization/visualization_utils.py +7 -2
- megadetector/visualization/visualize_detector_output.py +0 -1
- {megadetector-5.0.13.dist-info → megadetector-5.0.15.dist-info}/METADATA +1 -1
- {megadetector-5.0.13.dist-info → megadetector-5.0.15.dist-info}/RECORD +18 -18
- {megadetector-5.0.13.dist-info → megadetector-5.0.15.dist-info}/LICENSE +0 -0
- {megadetector-5.0.13.dist-info → megadetector-5.0.15.dist-info}/WHEEL +0 -0
- {megadetector-5.0.13.dist-info → megadetector-5.0.15.dist-info}/top_level.txt +0 -0
|
@@ -394,7 +394,8 @@ def load_and_run_detector(model_file,
|
|
|
394
394
|
box_thickness=DEFAULT_BOX_THICKNESS,
|
|
395
395
|
box_expansion=DEFAULT_BOX_EXPANSION,
|
|
396
396
|
image_size=None,
|
|
397
|
-
label_font_size=DEFAULT_LABEL_FONT_SIZE
|
|
397
|
+
label_font_size=DEFAULT_LABEL_FONT_SIZE,
|
|
398
|
+
augment=False
|
|
398
399
|
):
|
|
399
400
|
r"""
|
|
400
401
|
Loads and runs a detector on target images, and visualizes the results.
|
|
@@ -415,6 +416,7 @@ def load_and_run_detector(model_file,
|
|
|
415
416
|
doing
|
|
416
417
|
label_font_size (float, optional): font size to use for displaying class names
|
|
417
418
|
and confidence values in the rendered images
|
|
419
|
+
augment (bool, optional): enable (implementation-specific) image augmentation
|
|
418
420
|
"""
|
|
419
421
|
|
|
420
422
|
if len(image_file_names) == 0:
|
|
@@ -506,9 +508,12 @@ def load_and_run_detector(model_file,
|
|
|
506
508
|
try:
|
|
507
509
|
start_time = time.time()
|
|
508
510
|
|
|
509
|
-
result = detector.generate_detections_one_image(
|
|
510
|
-
|
|
511
|
-
|
|
511
|
+
result = detector.generate_detections_one_image(
|
|
512
|
+
image,
|
|
513
|
+
im_file,
|
|
514
|
+
detection_threshold=DEFAULT_OUTPUT_CONFIDENCE_THRESHOLD,
|
|
515
|
+
image_size=image_size,
|
|
516
|
+
augment=augment)
|
|
512
517
|
detection_results.append(result)
|
|
513
518
|
|
|
514
519
|
elapsed = time.time() - start_time
|
|
@@ -679,6 +684,12 @@ def main():
|
|
|
679
684
|
help=('If set, produces separate output images for each crop, '
|
|
680
685
|
'rather than adding bounding boxes to the original image'))
|
|
681
686
|
|
|
687
|
+
parser.add_argument(
|
|
688
|
+
'--augment',
|
|
689
|
+
default=False,
|
|
690
|
+
action='store_true',
|
|
691
|
+
help=('Enable image augmentation'))
|
|
692
|
+
|
|
682
693
|
parser.add_argument(
|
|
683
694
|
'--box_thickness',
|
|
684
695
|
type=int,
|
|
@@ -755,7 +766,8 @@ def main():
|
|
|
755
766
|
box_expansion=args.box_expansion,
|
|
756
767
|
crop_images=args.crop,
|
|
757
768
|
image_size=args.image_size,
|
|
758
|
-
label_font_size=args.label_font_size
|
|
769
|
+
label_font_size=args.label_font_size,
|
|
770
|
+
augment=args.augment)
|
|
759
771
|
|
|
760
772
|
if __name__ == '__main__':
|
|
761
773
|
main()
|
|
@@ -126,7 +126,15 @@ def _producer_func(q,image_files):
|
|
|
126
126
|
print('Finished image loading'); sys.stdout.flush()
|
|
127
127
|
|
|
128
128
|
|
|
129
|
-
def _consumer_func(q,
|
|
129
|
+
def _consumer_func(q,
|
|
130
|
+
return_queue,
|
|
131
|
+
model_file,
|
|
132
|
+
confidence_threshold,
|
|
133
|
+
image_size=None,
|
|
134
|
+
include_image_size=False,
|
|
135
|
+
include_image_timestamp=False,
|
|
136
|
+
include_exif_data=False,
|
|
137
|
+
augment=False):
|
|
130
138
|
"""
|
|
131
139
|
Consumer function; only used when using the (optional) image queue.
|
|
132
140
|
|
|
@@ -168,16 +176,30 @@ def _consumer_func(q,return_queue,model_file,confidence_threshold,image_size=Non
|
|
|
168
176
|
results.append({'file': im_file,
|
|
169
177
|
'failure': image})
|
|
170
178
|
else:
|
|
171
|
-
results.append(process_image(im_file=im_file,
|
|
179
|
+
results.append(process_image(im_file=im_file,
|
|
180
|
+
detector=detector,
|
|
172
181
|
confidence_threshold=confidence_threshold,
|
|
173
|
-
image=image,
|
|
182
|
+
image=image,
|
|
183
|
+
quiet=True,
|
|
184
|
+
image_size=image_size,
|
|
185
|
+
include_image_size=include_image_size,
|
|
186
|
+
include_image_timestamp=include_image_timestamp,
|
|
187
|
+
include_exif_data=include_exif_data,
|
|
188
|
+
augment=augment))
|
|
174
189
|
if verbose:
|
|
175
190
|
print('Processed image {}'.format(im_file)); sys.stdout.flush()
|
|
176
191
|
q.task_done()
|
|
177
192
|
|
|
178
193
|
|
|
179
|
-
def run_detector_with_image_queue(image_files,
|
|
180
|
-
|
|
194
|
+
def run_detector_with_image_queue(image_files,
|
|
195
|
+
model_file,
|
|
196
|
+
confidence_threshold,
|
|
197
|
+
quiet=False,
|
|
198
|
+
image_size=None,
|
|
199
|
+
include_image_size=False,
|
|
200
|
+
include_image_timestamp=False,
|
|
201
|
+
include_exif_data=False,
|
|
202
|
+
augment=False):
|
|
181
203
|
"""
|
|
182
204
|
Driver function for the (optional) multiprocessing-based image queue; only used
|
|
183
205
|
when --use_image_queue is specified. Starts a reader process to read images from disk, but
|
|
@@ -218,15 +240,37 @@ def run_detector_with_image_queue(image_files,model_file,confidence_threshold,
|
|
|
218
240
|
|
|
219
241
|
if run_separate_consumer_process:
|
|
220
242
|
if use_threads_for_queue:
|
|
221
|
-
consumer = Thread(target=_consumer_func,args=(q,
|
|
222
|
-
|
|
243
|
+
consumer = Thread(target=_consumer_func,args=(q,
|
|
244
|
+
return_queue,
|
|
245
|
+
model_file,
|
|
246
|
+
confidence_threshold,
|
|
247
|
+
image_size,
|
|
248
|
+
include_image_size,
|
|
249
|
+
include_image_timestamp,
|
|
250
|
+
include_exif_data,
|
|
251
|
+
augment))
|
|
223
252
|
else:
|
|
224
|
-
consumer = Process(target=_consumer_func,args=(q,
|
|
225
|
-
|
|
253
|
+
consumer = Process(target=_consumer_func,args=(q,
|
|
254
|
+
return_queue,
|
|
255
|
+
model_file,
|
|
256
|
+
confidence_threshold,
|
|
257
|
+
image_size,
|
|
258
|
+
include_image_size,
|
|
259
|
+
include_image_timestamp,
|
|
260
|
+
include_exif_data,
|
|
261
|
+
augment))
|
|
226
262
|
consumer.daemon = True
|
|
227
263
|
consumer.start()
|
|
228
264
|
else:
|
|
229
|
-
_consumer_func(q,
|
|
265
|
+
_consumer_func(q,
|
|
266
|
+
return_queue,
|
|
267
|
+
model_file,
|
|
268
|
+
confidence_threshold,
|
|
269
|
+
image_size,
|
|
270
|
+
include_image_size,
|
|
271
|
+
include_image_timestamp,
|
|
272
|
+
include_exif_data,
|
|
273
|
+
augment)
|
|
230
274
|
|
|
231
275
|
producer.join()
|
|
232
276
|
print('Producer finished')
|
|
@@ -262,15 +306,20 @@ def _chunks_by_number_of_chunks(ls, n):
|
|
|
262
306
|
|
|
263
307
|
#%% Image processing functions
|
|
264
308
|
|
|
265
|
-
def process_images(im_files,
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
|
|
309
|
+
def process_images(im_files,
|
|
310
|
+
detector,
|
|
311
|
+
confidence_threshold,
|
|
312
|
+
use_image_queue=False,
|
|
313
|
+
quiet=False,
|
|
314
|
+
image_size=None,
|
|
315
|
+
checkpoint_queue=None,
|
|
316
|
+
include_image_size=False,
|
|
317
|
+
include_image_timestamp=False,
|
|
318
|
+
include_exif_data=False,
|
|
319
|
+
augment=False):
|
|
269
320
|
"""
|
|
270
|
-
Runs a detector (typically MegaDetector) over a list of image files.
|
|
271
|
-
|
|
272
|
-
standard inference path (which instead loops over process_image()).
|
|
273
|
-
|
|
321
|
+
Runs a detector (typically MegaDetector) over a list of image files on a single thread.
|
|
322
|
+
|
|
274
323
|
Args:
|
|
275
324
|
im_files (list: paths to image files
|
|
276
325
|
detector (str or detector object): loaded model or str; if this is a string, it can be a
|
|
@@ -284,7 +333,8 @@ def process_images(im_files, detector, confidence_threshold, use_image_queue=Fal
|
|
|
284
333
|
checkpoint_queue (Queue, optional): internal parameter used to pass image queues around
|
|
285
334
|
include_image_size (bool, optional): should we include image size in the output for each image?
|
|
286
335
|
include_image_timestamp (bool, optional): should we include image timestamps in the output for each image?
|
|
287
|
-
include_exif_data (bool, optional): should we include EXIF data in the output for each image?
|
|
336
|
+
include_exif_data (bool, optional): should we include EXIF data in the output for each image?
|
|
337
|
+
augment (bool, optional): enable image augmentation
|
|
288
338
|
|
|
289
339
|
Returns:
|
|
290
340
|
list: list of dicts, in which each dict represents detections on one image,
|
|
@@ -292,25 +342,37 @@ def process_images(im_files, detector, confidence_threshold, use_image_queue=Fal
|
|
|
292
342
|
"""
|
|
293
343
|
|
|
294
344
|
if isinstance(detector, str):
|
|
345
|
+
|
|
295
346
|
start_time = time.time()
|
|
296
347
|
detector = load_detector(detector)
|
|
297
348
|
elapsed = time.time() - start_time
|
|
298
349
|
print('Loaded model (batch level) in {}'.format(humanfriendly.format_timespan(elapsed)))
|
|
299
350
|
|
|
300
351
|
if use_image_queue:
|
|
301
|
-
|
|
302
|
-
|
|
352
|
+
|
|
353
|
+
run_detector_with_image_queue(im_files,
|
|
354
|
+
detector,
|
|
355
|
+
confidence_threshold,
|
|
356
|
+
quiet=quiet,
|
|
357
|
+
image_size=image_size,
|
|
303
358
|
include_image_size=include_image_size,
|
|
304
359
|
include_image_timestamp=include_image_timestamp,
|
|
305
|
-
include_exif_data=include_exif_data
|
|
360
|
+
include_exif_data=include_exif_data,
|
|
361
|
+
augment=augment)
|
|
362
|
+
|
|
306
363
|
else:
|
|
364
|
+
|
|
307
365
|
results = []
|
|
308
366
|
for im_file in im_files:
|
|
309
|
-
result = process_image(im_file,
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
|
|
367
|
+
result = process_image(im_file,
|
|
368
|
+
detector,
|
|
369
|
+
confidence_threshold,
|
|
370
|
+
quiet=quiet,
|
|
371
|
+
image_size=image_size,
|
|
372
|
+
include_image_size=include_image_size,
|
|
373
|
+
include_image_timestamp=include_image_timestamp,
|
|
374
|
+
include_exif_data=include_exif_data,
|
|
375
|
+
augment=augment)
|
|
314
376
|
|
|
315
377
|
if checkpoint_queue is not None:
|
|
316
378
|
checkpoint_queue.put(result)
|
|
@@ -321,10 +383,16 @@ def process_images(im_files, detector, confidence_threshold, use_image_queue=Fal
|
|
|
321
383
|
# ...def process_images(...)
|
|
322
384
|
|
|
323
385
|
|
|
324
|
-
def process_image(im_file, detector,
|
|
325
|
-
|
|
326
|
-
|
|
327
|
-
|
|
386
|
+
def process_image(im_file, detector,
|
|
387
|
+
confidence_threshold,
|
|
388
|
+
image=None,
|
|
389
|
+
quiet=False,
|
|
390
|
+
image_size=None,
|
|
391
|
+
include_image_size=False,
|
|
392
|
+
include_image_timestamp=False,
|
|
393
|
+
include_exif_data=False,
|
|
394
|
+
skip_image_resizing=False,
|
|
395
|
+
augment=False):
|
|
328
396
|
"""
|
|
329
397
|
Runs a detector (typically MegaDetector) on a single image file.
|
|
330
398
|
|
|
@@ -343,6 +411,7 @@ def process_image(im_file, detector, confidence_threshold, image=None,
|
|
|
343
411
|
include_image_timestamp (bool, optional): should we include image timestamps in the output for each image?
|
|
344
412
|
include_exif_data (bool, optional): should we include EXIF data in the output for each image?
|
|
345
413
|
skip_image_resizing (bool, optional): whether to skip internal image resizing and rely on external resizing
|
|
414
|
+
augment (bool, optional): enable image augmentation
|
|
346
415
|
|
|
347
416
|
Returns:
|
|
348
417
|
dict: dict representing detections on one image,
|
|
@@ -367,8 +436,12 @@ def process_image(im_file, detector, confidence_threshold, image=None,
|
|
|
367
436
|
|
|
368
437
|
try:
|
|
369
438
|
result = detector.generate_detections_one_image(
|
|
370
|
-
|
|
371
|
-
|
|
439
|
+
image,
|
|
440
|
+
im_file,
|
|
441
|
+
detection_threshold=confidence_threshold,
|
|
442
|
+
image_size=image_size,
|
|
443
|
+
skip_image_resizing=skip_image_resizing,
|
|
444
|
+
augment=augment)
|
|
372
445
|
except Exception as e:
|
|
373
446
|
if not quiet:
|
|
374
447
|
print('Image {} cannot be processed. Exception: {}'.format(im_file, e))
|
|
@@ -423,12 +496,21 @@ def _load_custom_class_mapping(class_mapping_filename):
|
|
|
423
496
|
|
|
424
497
|
#%% Main function
|
|
425
498
|
|
|
426
|
-
def load_and_run_detector_batch(model_file,
|
|
499
|
+
def load_and_run_detector_batch(model_file,
|
|
500
|
+
image_file_names,
|
|
501
|
+
checkpoint_path=None,
|
|
427
502
|
confidence_threshold=run_detector.DEFAULT_OUTPUT_CONFIDENCE_THRESHOLD,
|
|
428
|
-
checkpoint_frequency=-1,
|
|
429
|
-
|
|
430
|
-
|
|
431
|
-
|
|
503
|
+
checkpoint_frequency=-1,
|
|
504
|
+
results=None,
|
|
505
|
+
n_cores=1,
|
|
506
|
+
use_image_queue=False,
|
|
507
|
+
quiet=False,
|
|
508
|
+
image_size=None,
|
|
509
|
+
class_mapping_filename=None,
|
|
510
|
+
include_image_size=False,
|
|
511
|
+
include_image_timestamp=False,
|
|
512
|
+
include_exif_data=False,
|
|
513
|
+
augment=False):
|
|
432
514
|
"""
|
|
433
515
|
Load a model file and run it on a list of images.
|
|
434
516
|
|
|
@@ -455,7 +537,8 @@ def load_and_run_detector_batch(model_file, image_file_names, checkpoint_path=No
|
|
|
455
537
|
file or YOLOv5 dataset.yaml file
|
|
456
538
|
include_image_size (bool, optional): should we include image size in the output for each image?
|
|
457
539
|
include_image_timestamp (bool, optional): should we include image timestamps in the output for each image?
|
|
458
|
-
include_exif_data (bool, optional): should we include EXIF data in the output for each image?
|
|
540
|
+
include_exif_data (bool, optional): should we include EXIF data in the output for each image?
|
|
541
|
+
augment (bool, optional): enable image augmentation
|
|
459
542
|
|
|
460
543
|
Returns:
|
|
461
544
|
results: list of dicts; each dict represents detections on one image
|
|
@@ -539,9 +622,15 @@ def load_and_run_detector_batch(model_file, image_file_names, checkpoint_path=No
|
|
|
539
622
|
assert len(results) == 0, \
|
|
540
623
|
'Using an image queue with results loaded from a checkpoint is not currently supported'
|
|
541
624
|
assert n_cores <= 1
|
|
542
|
-
results = run_detector_with_image_queue(image_file_names,
|
|
543
|
-
|
|
544
|
-
|
|
625
|
+
results = run_detector_with_image_queue(image_file_names,
|
|
626
|
+
model_file,
|
|
627
|
+
confidence_threshold,
|
|
628
|
+
quiet,
|
|
629
|
+
image_size=image_size,
|
|
630
|
+
include_image_size=include_image_size,
|
|
631
|
+
include_image_timestamp=include_image_timestamp,
|
|
632
|
+
include_exif_data=include_exif_data,
|
|
633
|
+
augment=augment)
|
|
545
634
|
|
|
546
635
|
elif n_cores <= 1:
|
|
547
636
|
|
|
@@ -565,11 +654,15 @@ def load_and_run_detector_batch(model_file, image_file_names, checkpoint_path=No
|
|
|
565
654
|
|
|
566
655
|
count += 1
|
|
567
656
|
|
|
568
|
-
result = process_image(im_file,
|
|
569
|
-
|
|
570
|
-
|
|
657
|
+
result = process_image(im_file,
|
|
658
|
+
detector,
|
|
659
|
+
confidence_threshold,
|
|
660
|
+
quiet=quiet,
|
|
661
|
+
image_size=image_size,
|
|
662
|
+
include_image_size=include_image_size,
|
|
571
663
|
include_image_timestamp=include_image_timestamp,
|
|
572
|
-
include_exif_data=include_exif_data
|
|
664
|
+
include_exif_data=include_exif_data,
|
|
665
|
+
augment=augment)
|
|
573
666
|
results.append(result)
|
|
574
667
|
|
|
575
668
|
# Write a checkpoint if necessary
|
|
@@ -616,14 +709,18 @@ def load_and_run_detector_batch(model_file, image_file_names, checkpoint_path=No
|
|
|
616
709
|
checkpoint_queue, results), daemon=True)
|
|
617
710
|
checkpoint_thread.start()
|
|
618
711
|
|
|
619
|
-
pool.map(partial(process_images,
|
|
620
|
-
|
|
621
|
-
|
|
622
|
-
|
|
623
|
-
|
|
624
|
-
|
|
625
|
-
|
|
626
|
-
|
|
712
|
+
pool.map(partial(process_images,
|
|
713
|
+
detector=detector,
|
|
714
|
+
confidence_threshold=confidence_threshold,
|
|
715
|
+
use_image_queue=False,
|
|
716
|
+
quiet=quiet,
|
|
717
|
+
image_size=image_size,
|
|
718
|
+
checkpoint_queue=checkpoint_queue,
|
|
719
|
+
include_image_size=include_image_size,
|
|
720
|
+
include_image_timestamp=include_image_timestamp,
|
|
721
|
+
include_exif_data=include_exif_data,
|
|
722
|
+
augment=augment),
|
|
723
|
+
image_batches)
|
|
627
724
|
|
|
628
725
|
checkpoint_queue.put(None)
|
|
629
726
|
|
|
@@ -631,12 +728,18 @@ def load_and_run_detector_batch(model_file, image_file_names, checkpoint_path=No
|
|
|
631
728
|
|
|
632
729
|
# Multprocessing is enabled, but checkpointing is not
|
|
633
730
|
|
|
634
|
-
new_results = pool.map(partial(process_images,
|
|
635
|
-
|
|
636
|
-
|
|
637
|
-
|
|
638
|
-
|
|
639
|
-
|
|
731
|
+
new_results = pool.map(partial(process_images,
|
|
732
|
+
detector=detector,
|
|
733
|
+
confidence_threshold=confidence_threshold,
|
|
734
|
+
use_image_queue=False,
|
|
735
|
+
quiet=quiet,
|
|
736
|
+
checkpoint_queue=None,
|
|
737
|
+
image_size=image_size,
|
|
738
|
+
include_image_size=include_image_size,
|
|
739
|
+
include_image_timestamp=include_image_timestamp,
|
|
740
|
+
include_exif_data=include_exif_data,
|
|
741
|
+
augment=augment),
|
|
742
|
+
image_batches)
|
|
640
743
|
|
|
641
744
|
new_results = list(itertools.chain.from_iterable(new_results))
|
|
642
745
|
|
|
@@ -645,8 +748,13 @@ def load_and_run_detector_batch(model_file, image_file_names, checkpoint_path=No
|
|
|
645
748
|
results += new_results
|
|
646
749
|
|
|
647
750
|
# ...if checkpointing is/isn't enabled
|
|
648
|
-
|
|
649
|
-
|
|
751
|
+
|
|
752
|
+
try:
|
|
753
|
+
pool.close()
|
|
754
|
+
except Exception as e:
|
|
755
|
+
print('Warning: error closing multiprocessing pool:\n{}'.format(str(e)))
|
|
756
|
+
|
|
757
|
+
# ...if we're running (1) with image queue, (2) on one core, or (3) on multiple cores
|
|
650
758
|
|
|
651
759
|
# 'results' may have been modified in place, but we also return it for
|
|
652
760
|
# backwards-compatibility.
|
|
@@ -724,9 +832,14 @@ def get_image_datetime(image):
|
|
|
724
832
|
return None
|
|
725
833
|
|
|
726
834
|
|
|
727
|
-
def write_results_to_file(results,
|
|
728
|
-
|
|
729
|
-
|
|
835
|
+
def write_results_to_file(results,
|
|
836
|
+
output_file,
|
|
837
|
+
relative_path_base=None,
|
|
838
|
+
detector_file=None,
|
|
839
|
+
info=None,
|
|
840
|
+
include_max_conf=False,
|
|
841
|
+
custom_metadata=None,
|
|
842
|
+
force_forward_slashes=True):
|
|
730
843
|
"""
|
|
731
844
|
Writes list of detection results to JSON output file. Format matches:
|
|
732
845
|
|
|
@@ -951,7 +1064,12 @@ def main():
|
|
|
951
1064
|
'--image_size',
|
|
952
1065
|
type=int,
|
|
953
1066
|
default=None,
|
|
954
|
-
help=('Force image resizing to a
|
|
1067
|
+
help=('Force image resizing to a specific integer size on the long axis (not recommended to change this)'))
|
|
1068
|
+
parser.add_argument(
|
|
1069
|
+
'--augment',
|
|
1070
|
+
action='store_true',
|
|
1071
|
+
help='Enable image augmentation'
|
|
1072
|
+
)
|
|
955
1073
|
parser.add_argument(
|
|
956
1074
|
'--use_image_queue',
|
|
957
1075
|
action='store_true',
|
|
@@ -1196,7 +1314,8 @@ def main():
|
|
|
1196
1314
|
class_mapping_filename=args.class_mapping_filename,
|
|
1197
1315
|
include_image_size=args.include_image_size,
|
|
1198
1316
|
include_image_timestamp=args.include_image_timestamp,
|
|
1199
|
-
include_exif_data=args.include_exif_data
|
|
1317
|
+
include_exif_data=args.include_exif_data,
|
|
1318
|
+
augment=args.augment)
|
|
1200
1319
|
|
|
1201
1320
|
elapsed = time.time() - start_time
|
|
1202
1321
|
images_per_second = len(results) / elapsed
|
|
@@ -125,8 +125,13 @@ class TFDetector:
|
|
|
125
125
|
return box_tensor_out, score_tensor_out, class_tensor_out
|
|
126
126
|
|
|
127
127
|
|
|
128
|
-
def generate_detections_one_image(self,
|
|
129
|
-
|
|
128
|
+
def generate_detections_one_image(self,
|
|
129
|
+
image,
|
|
130
|
+
image_id,
|
|
131
|
+
detection_threshold,
|
|
132
|
+
image_size=None,
|
|
133
|
+
skip_image_resizing=False,
|
|
134
|
+
augment=False):
|
|
130
135
|
"""
|
|
131
136
|
Runs the detector on an image.
|
|
132
137
|
|
|
@@ -139,7 +144,9 @@ class TFDetector:
|
|
|
139
144
|
if (a) you're using a model other than MegaDetector or (b) you know what you're
|
|
140
145
|
doing
|
|
141
146
|
skip_image_resizing (bool, optional): whether to skip internal image resizing (and rely on external
|
|
142
|
-
resizing)
|
|
147
|
+
resizing). Not currently supported, but included here for compatibility with PTDetector.
|
|
148
|
+
augment (bool, optional): enable image augmentation. Not currently supported, but included
|
|
149
|
+
here for compatibility with PTDetector.
|
|
143
150
|
|
|
144
151
|
Returns:
|
|
145
152
|
dict: a dictionary with the following fields:
|
|
@@ -151,6 +158,7 @@ class TFDetector:
|
|
|
151
158
|
|
|
152
159
|
assert image_size is None, 'Image sizing not supported for TF detectors'
|
|
153
160
|
assert not skip_image_resizing, 'Image sizing not supported for TF detectors'
|
|
161
|
+
assert not augment, 'Image augmentation is not supported for TF detectors'
|
|
154
162
|
|
|
155
163
|
if detection_threshold is None:
|
|
156
164
|
detection_threshold = 0
|