megadetector 5.0.24__py3-none-any.whl → 5.0.25__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of megadetector might be problematic. Click here for more details.

@@ -517,6 +517,52 @@ def invert_dictionary(d):
517
517
  return {v: k for k, v in d.items()}
518
518
 
519
519
 
520
+ def round_floats_in_nested_dict(obj, decimal_places=5):
521
+ """
522
+ Recursively rounds all floating point values in a nested structure to the
523
+ specified number of decimal places. Handles dictionaries, lists, tuples,
524
+ sets, and other iterables. Modifies mutable objects in place.
525
+
526
+ Args:
527
+ obj: The object to process (can be a dict, list, set, tuple, or primitive value)
528
+ decimal_places: Number of decimal places to round to (default: 5)
529
+
530
+ Returns:
531
+ The processed object (useful for recursive calls)
532
+ """
533
+ if isinstance(obj, dict):
534
+ for key in obj:
535
+ obj[key] = round_floats_in_nested_dict(obj[key], decimal_places)
536
+ return obj
537
+
538
+ elif isinstance(obj, list):
539
+ for i in range(len(obj)):
540
+ obj[i] = round_floats_in_nested_dict(obj[i], decimal_places)
541
+ return obj
542
+
543
+ elif isinstance(obj, tuple):
544
+ # Tuples are immutable, so we create a new one
545
+ return tuple(round_floats_in_nested_dict(item, decimal_places) for item in obj)
546
+
547
+ elif isinstance(obj, set):
548
+ # Sets are mutable but we can't modify elements in-place
549
+ # Convert to list, process, and convert back to set
550
+ return set(round_floats_in_nested_dict(list(obj), decimal_places))
551
+
552
+ elif hasattr(obj, '__iter__') and not isinstance(obj, (str, bytes, bytearray)):
553
+ # Handle other iterable types - convert to list, process, and convert back
554
+ return type(obj)(round_floats_in_nested_dict(item, decimal_places) for item in obj)
555
+
556
+ elif isinstance(obj, float):
557
+ return round(obj, decimal_places)
558
+
559
+ else:
560
+ # For other types (int, str, bool, None, etc.), return as is
561
+ return obj
562
+
563
+ # ...def round_floats_in_nested_dict(...)
564
+
565
+
520
566
  def image_file_to_camera_folder(image_fn):
521
567
  r"""
522
568
  Removes common overflow folders (e.g. RECNX101, RECNX102) from paths, i.e. turn:
@@ -780,7 +826,7 @@ def dict_to_kvp_list(d,
780
826
  if len(d) == 0:
781
827
  return ''
782
828
 
783
- s = ''
829
+ s = None
784
830
  for k in d.keys():
785
831
  assert isinstance(k,str), 'Input {} is not a str <--> str dict'.format(str(d))
786
832
  v = d[k]
@@ -800,6 +846,9 @@ def dict_to_kvp_list(d,
800
846
  s += item_separator
801
847
  s += k + kv_separator + v
802
848
 
849
+ if s is None:
850
+ s = ''
851
+
803
852
  return s
804
853
 
805
854
 
@@ -856,3 +905,25 @@ def __module_test__():
856
905
  L = [{'a':5},{'a':0},{'a':10}]
857
906
  k = 'a'
858
907
  sort_list_of_dicts_by_key(L, k, reverse=True)
908
+
909
+
910
+ ##%% Test float rounding
911
+
912
+ # Example with mixed collection types
913
+ data = {
914
+ "name": "Project X",
915
+ "values": [1.23456789, 2.3456789],
916
+ "tuple_values": (3.45678901, 4.56789012),
917
+ "set_values": {5.67890123, 6.78901234},
918
+ "metrics": {
919
+ "score": 98.7654321,
920
+ "components": [5.6789012, 6.7890123]
921
+ }
922
+ }
923
+
924
+ result = round_floats_in_nested_dict(data)
925
+ assert result['values'][0] == 1.23457
926
+ assert result['tuple_values'][0] == 3.45679
927
+ assert min(list(result['set_values'])) == 5.6789
928
+
929
+
@@ -17,9 +17,6 @@ import sys
17
17
  import argparse
18
18
  import re
19
19
 
20
- import azure.common
21
- from azure.storage.blob import BlobServiceClient, ContentSettings
22
-
23
20
  from megadetector.utils.path_utils import is_image_file
24
21
 
25
22
 
@@ -139,6 +136,8 @@ def traverse_and_create_index(dir, sas_url=None, overwrite_files=False,
139
136
  # If we want to set the content type in blob storage using a SAS URL
140
137
  if sas_url:
141
138
 
139
+ from azure.storage.blob import BlobServiceClient, ContentSettings
140
+
142
141
  # Example: sas_url = 'https://accname.blob.core.windows.net/bname/path/to/folder?st=...&se=...&sp=...&...'
143
142
  if '?' in sas_url:
144
143
  # 'https://accname.blob.core.windows.net/bname/path/to/folder' and 'st=...&se=...&sp=...&...'
@@ -196,6 +195,7 @@ def traverse_and_create_index(dir, sas_url=None, overwrite_files=False,
196
195
 
197
196
  # Set content type in blob storage
198
197
  if sas_url:
198
+ import azure.common
199
199
  if container_folder:
200
200
  output_blob_path = container_folder + '/' + output_file[len(dir) + 1:]
201
201
  else:
@@ -7,6 +7,17 @@ for TF or PyTorch
7
7
 
8
8
  """
9
9
 
10
+ # Minimize TF printouts
11
+ import os
12
+ os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1'
13
+
14
+ try:
15
+ import logging
16
+ logging.getLogger('tensorflow').setLevel(logging.ERROR)
17
+ except Exception:
18
+ pass
19
+
20
+
10
21
  #%% Torch/TF test functions
11
22
 
12
23
  def torch_test():
@@ -45,7 +56,7 @@ def torch_test():
45
56
  pass
46
57
  print('{}: {}'.format(device_id,device_name))
47
58
  else:
48
- print('No CUDA GPUs reported by PyTorch')
59
+ print('No GPUs reported by PyTorch')
49
60
 
50
61
  try:
51
62
  if torch.backends.mps.is_built and torch.backends.mps.is_available():
@@ -72,8 +83,15 @@ def tf_test():
72
83
 
73
84
  from tensorflow.python.platform import build_info as build
74
85
  print(f"TF version: {tf.__version__}")
75
- print(f"CUDA build version reported by TensorFlow: {build.build_info['cuda_version']}")
76
- print(f"CuDNN build version reported by TensorFlow: {build.build_info['cudnn_version']}")
86
+
87
+ if 'cuda_version' not in build.build_info:
88
+ print('TF does not appear to be built with CUDA')
89
+ else:
90
+ print(f"CUDA build version reported by TensorFlow: {build.build_info['cuda_version']}")
91
+ if 'cudnn_version' not in build.build_info:
92
+ print('TF does not appear to be built with CuDNN')
93
+ else:
94
+ print(f"CuDNN build version reported by TensorFlow: {build.build_info['cudnn_version']}")
77
95
 
78
96
  try:
79
97
  from tensorflow.python.compiler.tensorrt import trt_convert as trt
@@ -131,6 +131,15 @@ class MDTestOptions:
131
131
  #: this is a range from 0-100.
132
132
  self.python_test_depth = 100
133
133
 
134
+ #: Currently should be 'all' or 'utils-only'
135
+ self.test_mode = 'all'
136
+
137
+ #: Number of cores to use for multi-CPU inference tests
138
+ self.n_cores_for_multiprocessing_tests = 2
139
+
140
+ #: Number of cores to use for multi-CPU video tests
141
+ self.n_cores_for_video_tests = 2
142
+
134
143
  # ...def __init__()
135
144
 
136
145
  # ...class MDTestOptions()
@@ -302,18 +311,24 @@ def download_test_data(options=None):
302
311
 
303
312
  def is_gpu_available(verbose=True):
304
313
  """
305
- Checks whether a GPU (including M1/M2 MPS) is available, according to PyTorch.
314
+ Checks whether a GPU (including M1/M2 MPS) is available, according to PyTorch. Returns
315
+ false if PT fails to import.
306
316
 
307
317
  Args:
308
- verbose (bool, optional): enable additional debug console output
318
+ verbose (bool, optional): enable additional debug console output
309
319
 
310
320
  Returns:
311
- bool: whether a GPU is available
321
+ bool: whether a GPU is available
312
322
  """
313
323
 
314
324
  # Import torch inside this function, so we have a chance to set CUDA_VISIBLE_DEVICES
315
325
  # before checking GPU availability.
316
- import torch
326
+ try:
327
+ import torch
328
+ except Exception:
329
+ print('Warning: could not import torch')
330
+ return False
331
+
317
332
  gpu_available = torch.cuda.is_available()
318
333
 
319
334
  if gpu_available:
@@ -705,6 +720,47 @@ def execute_and_print(cmd,print_output=True,catch_exceptions=False,echo_command=
705
720
 
706
721
  #%% Python tests
707
722
 
723
+ def test_package_imports(package_name,exceptions=None,verbose=True):
724
+ """
725
+ Imports all modules in [package_name]
726
+
727
+ Args:
728
+ package_name (str): the package name to test
729
+ exceptions (list, optional): exclude any modules that contain any of these strings
730
+ verbose (bool, optional): enable additional debug output
731
+ """
732
+ import importlib
733
+ import pkgutil
734
+
735
+ package = importlib.import_module(package_name)
736
+ package_path = package.__path__
737
+ imported_modules = []
738
+
739
+ if exceptions is None:
740
+ exceptions = []
741
+
742
+ for _, modname, _ in pkgutil.walk_packages(package_path, package_name + '.'):
743
+
744
+ skip_module = False
745
+ for s in exceptions:
746
+ if s in modname:
747
+ skip_module = True
748
+ break
749
+ if skip_module:
750
+ continue
751
+
752
+ if verbose:
753
+ print('Testing import: {}'.format(modname))
754
+
755
+ try:
756
+ # Attempt to import each module
757
+ _ = importlib.import_module(modname)
758
+ imported_modules.append(modname)
759
+ except ImportError as e:
760
+ print(f"Failed to import module {modname}: {e}")
761
+ raise
762
+
763
+ #%%
708
764
  def run_python_tests(options):
709
765
  """
710
766
  Runs Python-based (as opposed to CLI-based) package tests.
@@ -716,11 +772,6 @@ def run_python_tests(options):
716
772
  print('\n*** Starting module tests ***\n')
717
773
 
718
774
 
719
- ## Make sure our tests are doing what we think they're doing
720
-
721
- from megadetector.detection import pytorch_detector
722
- pytorch_detector.require_non_default_compatibility_mode = True
723
-
724
775
  ## Prepare data
725
776
 
726
777
  download_test_data(options)
@@ -734,12 +785,34 @@ def run_python_tests(options):
734
785
  ct_utils_test()
735
786
 
736
787
 
788
+ ## Import tests
789
+
790
+ print('\n** Running package import tests **\n')
791
+ test_package_imports('megadetector.visualization')
792
+ test_package_imports('megadetector.postprocessing')
793
+ test_package_imports('megadetector.postprocessing.repeat_detection_elimination')
794
+ test_package_imports('megadetector.utils',exceptions=['azure_utils','sas_blob_utils','md_tests'])
795
+ test_package_imports('megadetector.data_management',exceptions=['lila','ocr_tools'])
796
+
797
+
798
+ ## Return early if we're not running torch-related tests
799
+
800
+ if options.test_mode == 'utils-only':
801
+ return
802
+
803
+
804
+ ## Make sure our tests are doing what we think they're doing
805
+
806
+ from megadetector.detection import pytorch_detector
807
+ pytorch_detector.require_non_default_compatibility_mode = True
808
+
809
+
737
810
  ## Run inference on an image
738
811
 
739
812
  print('\n** Running MD on a single image (module) **\n')
740
813
 
741
814
  from megadetector.detection import run_detector
742
- from megadetector.visualization import visualization_utils as vis_utils
815
+ from megadetector.visualization import visualization_utils as vis_utils # noqa
743
816
  image_fn = os.path.join(options.scratch_dir,options.test_images[0])
744
817
  model = run_detector.load_detector(options.default_model,
745
818
  detector_options=copy(options.detector_options))
@@ -755,7 +828,7 @@ def run_python_tests(options):
755
828
  print('\n** Running MD on a folder of images (module) **\n')
756
829
 
757
830
  from megadetector.detection.run_detector_batch import load_and_run_detector_batch,write_results_to_file
758
- from megadetector.utils import path_utils
831
+ from megadetector.utils import path_utils # noqa
759
832
 
760
833
  image_folder = os.path.join(options.scratch_dir,'md-test-images')
761
834
  assert os.path.isdir(image_folder), 'Test image folder {} is not available'.format(image_folder)
@@ -773,7 +846,7 @@ def run_python_tests(options):
773
846
  ## Verify results
774
847
 
775
848
  # Verify format correctness
776
- from megadetector.postprocessing.validate_batch_results import validate_batch_results
849
+ from megadetector.postprocessing.validate_batch_results import validate_batch_results #noqa
777
850
  validate_batch_results(inference_output_file)
778
851
 
779
852
  # Verify value correctness
@@ -955,7 +1028,7 @@ def run_python_tests(options):
955
1028
  # video_options.rendering_confidence_threshold = None
956
1029
  # video_options.json_confidence_threshold = 0.005
957
1030
  video_options.frame_sample = 10
958
- video_options.n_cores = 5
1031
+ video_options.n_cores = options.n_cores_for_video_tests
959
1032
  # video_options.debug_max_frames = -1
960
1033
  # video_options.class_mapping_filename = None
961
1034
  video_options.detector_options = copy(options.detector_options)
@@ -996,7 +1069,7 @@ def run_python_tests(options):
996
1069
  # video_options.rendering_confidence_threshold = None
997
1070
  # video_options.json_confidence_threshold = 0.005
998
1071
  video_options.frame_sample = 10
999
- video_options.n_cores = 5
1072
+ video_options.n_cores = options.n_cores_for_video_tests
1000
1073
 
1001
1074
  # Force frame extraction to disk, since that's how we generated our expected results file
1002
1075
  video_options.force_on_disk_frame_extraction = True
@@ -1092,6 +1165,17 @@ def run_cli_tests(options):
1092
1165
  from megadetector.utils.path_utils import insert_before_extension
1093
1166
 
1094
1167
 
1168
+ ## Utility tests
1169
+
1170
+ # TODO: move postprocessing tests up to this point, using pre-generated .json results files
1171
+
1172
+
1173
+ ## Return early if we're not running torch-related tests
1174
+
1175
+ if options.test_mode == 'utils-only':
1176
+ return
1177
+
1178
+
1095
1179
  ## Run inference on an image
1096
1180
 
1097
1181
  print('\n** Running MD on a single image (CLI) **\n')
@@ -1218,7 +1302,7 @@ def run_cli_tests(options):
1218
1302
 
1219
1303
  print('\n** Running MD on a folder (multiple CPUs) (CLI) **\n')
1220
1304
 
1221
- cpu_string = ' --ncores 4'
1305
+ cpu_string = ' --ncores {}'.format(options.n_cores_for_multiprocessing_tests)
1222
1306
  cmd = base_cmd + cpu_string
1223
1307
  inference_output_file_cpu_multicore = insert_before_extension(inference_output_file,'multicore')
1224
1308
  cmd = cmd.replace(inference_output_file,inference_output_file_cpu_multicore)
@@ -1385,7 +1469,9 @@ def run_cli_tests(options):
1385
1469
  cmd += ' --frame_folder "{}" --frame_rendering_folder "{}" --output_json_file "{}" --output_video_file "{}"'.format(
1386
1470
  frame_folder,frame_rendering_folder,video_inference_output_file,output_video_file)
1387
1471
  cmd += ' --fourcc {}'.format(options.video_fourcc)
1388
- cmd += ' --force_extracted_frame_folder_deletion --force_rendered_frame_folder_deletion --n_cores 5 --frame_sample 3'
1472
+ cmd += ' --force_extracted_frame_folder_deletion --force_rendered_frame_folder_deletion'
1473
+ cmd += ' --n_cores {}'.format(options.n_cores_for_video_tests)
1474
+ cmd += ' --frame_sample 4'
1389
1475
  cmd += ' --verbose'
1390
1476
  cmd += ' --detector_options {}'.format(dict_to_kvp_list(options.detector_options))
1391
1477
 
@@ -1446,40 +1532,39 @@ def run_download_tests(options):
1446
1532
  options (MDTestOptions): see MDTestOptions for details
1447
1533
  """
1448
1534
 
1449
- if not options.skip_download_tests:
1535
+ if options.skip_download_tests or options.test_mode == 'utils-only':
1536
+ return
1450
1537
 
1451
- from megadetector.detection.run_detector import known_models, \
1452
- try_download_known_detector, \
1453
- get_detector_version_from_model_file, \
1454
- model_string_to_model_version
1538
+ from megadetector.detection.run_detector import known_models, \
1539
+ try_download_known_detector, \
1540
+ get_detector_version_from_model_file, \
1541
+ model_string_to_model_version
1455
1542
 
1456
- # Make sure we can download models based on canonical version numbers,
1457
- # e.g. "v5a.0.0"
1458
- for model_name in known_models:
1459
- url = known_models[model_name]['url']
1460
- if 'localhost' in url:
1461
- continue
1462
- print('Testing download for known model {}'.format(model_name))
1463
- fn = try_download_known_detector(model_name,
1464
- force_download=False,
1465
- verbose=False)
1466
- version_string = get_detector_version_from_model_file(fn, verbose=False)
1467
- assert version_string == model_name
1468
-
1469
- # Make sure we can download models based on short names, e.g. "MDV5A"
1470
- for model_name in model_string_to_model_version:
1471
- model_version = model_string_to_model_version[model_name]
1472
- assert model_version in known_models
1473
- url = known_models[model_version]['url']
1474
- if 'localhost' in url:
1475
- continue
1476
- print('Testing download for model short name {}'.format(model_name))
1477
- fn = try_download_known_detector(model_name,
1478
- force_download=False,
1479
- verbose=False)
1480
- assert fn != model_name
1481
-
1482
- # ...if we need to test model downloads
1543
+ # Make sure we can download models based on canonical version numbers,
1544
+ # e.g. "v5a.0.0"
1545
+ for model_name in known_models:
1546
+ url = known_models[model_name]['url']
1547
+ if 'localhost' in url:
1548
+ continue
1549
+ print('Testing download for known model {}'.format(model_name))
1550
+ fn = try_download_known_detector(model_name,
1551
+ force_download=False,
1552
+ verbose=False)
1553
+ version_string = get_detector_version_from_model_file(fn, verbose=False)
1554
+ assert version_string == model_name
1555
+
1556
+ # Make sure we can download models based on short names, e.g. "MDV5A"
1557
+ for model_name in model_string_to_model_version:
1558
+ model_version = model_string_to_model_version[model_name]
1559
+ assert model_version in known_models
1560
+ url = known_models[model_version]['url']
1561
+ if 'localhost' in url:
1562
+ continue
1563
+ print('Testing download for model short name {}'.format(model_name))
1564
+ fn = try_download_known_detector(model_name,
1565
+ force_download=False,
1566
+ verbose=False)
1567
+ assert fn != model_name
1483
1568
 
1484
1569
  # ...def run_download_tests()
1485
1570
 
@@ -1497,7 +1582,7 @@ def run_tests(options):
1497
1582
  # Prepare data folder
1498
1583
  download_test_data(options)
1499
1584
 
1500
- # Run download tests if necessary
1585
+ # Run model download tests if necessary
1501
1586
  run_download_tests(options)
1502
1587
 
1503
1588
  if options.disable_gpu:
@@ -1740,6 +1825,13 @@ def main():
1740
1825
  help='PYTHONPATH to set for CLI tests; if None, inherits from the parent process'
1741
1826
  )
1742
1827
 
1828
+ parser.add_argument(
1829
+ '--test_mode',
1830
+ type=str,
1831
+ default='utils-only',
1832
+ help='Test mode: "all" or "utils-only"'
1833
+ )
1834
+
1743
1835
  parser.add_argument(
1744
1836
  '--python_test_depth',
1745
1837
  type=int,
@@ -547,8 +547,42 @@ def wsl_path_to_windows_path(filename):
547
547
  if result.returncode != 0:
548
548
  print('Could not convert path {} from WSL to Windows'.format(filename))
549
549
  return None
550
+
550
551
  return result.stdout.strip()
551
552
 
553
+
554
+ def windows_path_to_wsl_path(filename):
555
+ r"""
556
+ Converts a Windows path to a WSL path, or returns None if that's not possible. E.g.
557
+ converts:
558
+
559
+ e:\a\b\c
560
+
561
+ ...to:
562
+
563
+ /mnt/e/a/b/c
564
+
565
+ Args:
566
+ filename (str): filename to convert
567
+
568
+ Returns:
569
+ str: WSL equivalent to the Windows path [filename], or [filename] if the current
570
+ environment is neither Windows nor WSL.
571
+ """
572
+
573
+ if (not environment_is_wsl()) and (os.name != 'nt'):
574
+ return filename
575
+
576
+ if environment_is_wsl():
577
+ result = subprocess.run(['wslpath', '-u', filename], text=True, capture_output=True)
578
+ else:
579
+ result = subprocess.run(['wsl', 'wslpath', '-u', filename], text=True, capture_output=True)
580
+ if result.returncode != 0:
581
+ print('Could not convert path {} from Windows to WSL'.format(filename))
582
+ return None
583
+
584
+ return result.stdout.strip()
585
+
552
586
 
553
587
  def open_file(filename, attempt_to_open_in_wsl_host=False, browser_name=None):
554
588
  """