megadetector 5.0.27__py3-none-any.whl → 5.0.29__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of megadetector might be problematic. Click here for more details.

Files changed (176) hide show
  1. megadetector/api/batch_processing/api_core/batch_service/score.py +4 -5
  2. megadetector/api/batch_processing/api_core_support/aggregate_results_manually.py +1 -1
  3. megadetector/api/batch_processing/api_support/summarize_daily_activity.py +1 -1
  4. megadetector/api/batch_processing/integration/digiKam/xmp_integration.py +2 -2
  5. megadetector/api/batch_processing/integration/eMammal/test_scripts/push_annotations_to_emammal.py +1 -1
  6. megadetector/api/batch_processing/integration/eMammal/test_scripts/select_images_for_testing.py +1 -1
  7. megadetector/api/synchronous/api_core/tests/load_test.py +2 -3
  8. megadetector/classification/aggregate_classifier_probs.py +3 -3
  9. megadetector/classification/analyze_failed_images.py +5 -5
  10. megadetector/classification/cache_batchapi_outputs.py +5 -5
  11. megadetector/classification/create_classification_dataset.py +11 -12
  12. megadetector/classification/crop_detections.py +10 -10
  13. megadetector/classification/csv_to_json.py +8 -8
  14. megadetector/classification/detect_and_crop.py +13 -15
  15. megadetector/classification/evaluate_model.py +7 -7
  16. megadetector/classification/identify_mislabeled_candidates.py +6 -6
  17. megadetector/classification/json_to_azcopy_list.py +1 -1
  18. megadetector/classification/json_validator.py +29 -32
  19. megadetector/classification/map_classification_categories.py +9 -9
  20. megadetector/classification/merge_classification_detection_output.py +12 -9
  21. megadetector/classification/prepare_classification_script.py +19 -19
  22. megadetector/classification/prepare_classification_script_mc.py +23 -23
  23. megadetector/classification/run_classifier.py +4 -4
  24. megadetector/classification/save_mislabeled.py +6 -6
  25. megadetector/classification/train_classifier.py +1 -1
  26. megadetector/classification/train_classifier_tf.py +9 -9
  27. megadetector/classification/train_utils.py +10 -10
  28. megadetector/data_management/annotations/annotation_constants.py +1 -1
  29. megadetector/data_management/camtrap_dp_to_coco.py +45 -45
  30. megadetector/data_management/cct_json_utils.py +101 -101
  31. megadetector/data_management/cct_to_md.py +49 -49
  32. megadetector/data_management/cct_to_wi.py +33 -33
  33. megadetector/data_management/coco_to_labelme.py +75 -75
  34. megadetector/data_management/coco_to_yolo.py +189 -189
  35. megadetector/data_management/databases/add_width_and_height_to_db.py +3 -2
  36. megadetector/data_management/databases/combine_coco_camera_traps_files.py +38 -38
  37. megadetector/data_management/databases/integrity_check_json_db.py +202 -188
  38. megadetector/data_management/databases/subset_json_db.py +33 -33
  39. megadetector/data_management/generate_crops_from_cct.py +38 -38
  40. megadetector/data_management/get_image_sizes.py +54 -49
  41. megadetector/data_management/labelme_to_coco.py +130 -124
  42. megadetector/data_management/labelme_to_yolo.py +78 -72
  43. megadetector/data_management/lila/create_lila_blank_set.py +81 -83
  44. megadetector/data_management/lila/create_lila_test_set.py +32 -31
  45. megadetector/data_management/lila/create_links_to_md_results_files.py +18 -18
  46. megadetector/data_management/lila/download_lila_subset.py +21 -24
  47. megadetector/data_management/lila/generate_lila_per_image_labels.py +91 -91
  48. megadetector/data_management/lila/get_lila_annotation_counts.py +30 -30
  49. megadetector/data_management/lila/get_lila_image_counts.py +22 -22
  50. megadetector/data_management/lila/lila_common.py +70 -70
  51. megadetector/data_management/lila/test_lila_metadata_urls.py +13 -14
  52. megadetector/data_management/mewc_to_md.py +339 -340
  53. megadetector/data_management/ocr_tools.py +258 -252
  54. megadetector/data_management/read_exif.py +232 -223
  55. megadetector/data_management/remap_coco_categories.py +26 -26
  56. megadetector/data_management/remove_exif.py +31 -20
  57. megadetector/data_management/rename_images.py +187 -187
  58. megadetector/data_management/resize_coco_dataset.py +41 -41
  59. megadetector/data_management/speciesnet_to_md.py +41 -41
  60. megadetector/data_management/wi_download_csv_to_coco.py +55 -55
  61. megadetector/data_management/yolo_output_to_md_output.py +117 -120
  62. megadetector/data_management/yolo_to_coco.py +195 -188
  63. megadetector/detection/change_detection.py +831 -0
  64. megadetector/detection/process_video.py +341 -338
  65. megadetector/detection/pytorch_detector.py +308 -266
  66. megadetector/detection/run_detector.py +186 -166
  67. megadetector/detection/run_detector_batch.py +366 -364
  68. megadetector/detection/run_inference_with_yolov5_val.py +328 -325
  69. megadetector/detection/run_tiled_inference.py +312 -253
  70. megadetector/detection/tf_detector.py +24 -24
  71. megadetector/detection/video_utils.py +291 -283
  72. megadetector/postprocessing/add_max_conf.py +15 -11
  73. megadetector/postprocessing/categorize_detections_by_size.py +44 -44
  74. megadetector/postprocessing/classification_postprocessing.py +808 -311
  75. megadetector/postprocessing/combine_batch_outputs.py +20 -21
  76. megadetector/postprocessing/compare_batch_results.py +528 -517
  77. megadetector/postprocessing/convert_output_format.py +97 -97
  78. megadetector/postprocessing/create_crop_folder.py +220 -147
  79. megadetector/postprocessing/detector_calibration.py +173 -168
  80. megadetector/postprocessing/generate_csv_report.py +508 -0
  81. megadetector/postprocessing/load_api_results.py +25 -22
  82. megadetector/postprocessing/md_to_coco.py +129 -98
  83. megadetector/postprocessing/md_to_labelme.py +89 -83
  84. megadetector/postprocessing/md_to_wi.py +40 -40
  85. megadetector/postprocessing/merge_detections.py +87 -114
  86. megadetector/postprocessing/postprocess_batch_results.py +319 -302
  87. megadetector/postprocessing/remap_detection_categories.py +36 -36
  88. megadetector/postprocessing/render_detection_confusion_matrix.py +205 -199
  89. megadetector/postprocessing/repeat_detection_elimination/find_repeat_detections.py +57 -57
  90. megadetector/postprocessing/repeat_detection_elimination/remove_repeat_detections.py +27 -28
  91. megadetector/postprocessing/repeat_detection_elimination/repeat_detections_core.py +702 -677
  92. megadetector/postprocessing/separate_detections_into_folders.py +226 -211
  93. megadetector/postprocessing/subset_json_detector_output.py +265 -262
  94. megadetector/postprocessing/top_folders_to_bottom.py +45 -45
  95. megadetector/postprocessing/validate_batch_results.py +70 -70
  96. megadetector/taxonomy_mapping/map_lila_taxonomy_to_wi_taxonomy.py +52 -52
  97. megadetector/taxonomy_mapping/map_new_lila_datasets.py +15 -15
  98. megadetector/taxonomy_mapping/prepare_lila_taxonomy_release.py +14 -14
  99. megadetector/taxonomy_mapping/preview_lila_taxonomy.py +66 -69
  100. megadetector/taxonomy_mapping/retrieve_sample_image.py +16 -16
  101. megadetector/taxonomy_mapping/simple_image_download.py +8 -8
  102. megadetector/taxonomy_mapping/species_lookup.py +33 -33
  103. megadetector/taxonomy_mapping/taxonomy_csv_checker.py +14 -14
  104. megadetector/taxonomy_mapping/taxonomy_graph.py +11 -11
  105. megadetector/taxonomy_mapping/validate_lila_category_mappings.py +13 -13
  106. megadetector/utils/azure_utils.py +22 -22
  107. megadetector/utils/ct_utils.py +1019 -200
  108. megadetector/utils/directory_listing.py +21 -77
  109. megadetector/utils/gpu_test.py +22 -22
  110. megadetector/utils/md_tests.py +541 -518
  111. megadetector/utils/path_utils.py +1511 -406
  112. megadetector/utils/process_utils.py +41 -41
  113. megadetector/utils/sas_blob_utils.py +53 -49
  114. megadetector/utils/split_locations_into_train_val.py +73 -60
  115. megadetector/utils/string_utils.py +147 -26
  116. megadetector/utils/url_utils.py +463 -173
  117. megadetector/utils/wi_utils.py +2629 -2868
  118. megadetector/utils/write_html_image_list.py +137 -137
  119. megadetector/visualization/plot_utils.py +21 -21
  120. megadetector/visualization/render_images_with_thumbnails.py +37 -73
  121. megadetector/visualization/visualization_utils.py +424 -404
  122. megadetector/visualization/visualize_db.py +197 -190
  123. megadetector/visualization/visualize_detector_output.py +126 -98
  124. {megadetector-5.0.27.dist-info → megadetector-5.0.29.dist-info}/METADATA +6 -3
  125. megadetector-5.0.29.dist-info/RECORD +163 -0
  126. {megadetector-5.0.27.dist-info → megadetector-5.0.29.dist-info}/WHEEL +1 -1
  127. megadetector/data_management/importers/add_nacti_sizes.py +0 -52
  128. megadetector/data_management/importers/add_timestamps_to_icct.py +0 -79
  129. megadetector/data_management/importers/animl_results_to_md_results.py +0 -158
  130. megadetector/data_management/importers/auckland_doc_test_to_json.py +0 -373
  131. megadetector/data_management/importers/auckland_doc_to_json.py +0 -201
  132. megadetector/data_management/importers/awc_to_json.py +0 -191
  133. megadetector/data_management/importers/bellevue_to_json.py +0 -272
  134. megadetector/data_management/importers/cacophony-thermal-importer.py +0 -793
  135. megadetector/data_management/importers/carrizo_shrubfree_2018.py +0 -269
  136. megadetector/data_management/importers/carrizo_trail_cam_2017.py +0 -289
  137. megadetector/data_management/importers/cct_field_adjustments.py +0 -58
  138. megadetector/data_management/importers/channel_islands_to_cct.py +0 -913
  139. megadetector/data_management/importers/eMammal/copy_and_unzip_emammal.py +0 -180
  140. megadetector/data_management/importers/eMammal/eMammal_helpers.py +0 -249
  141. megadetector/data_management/importers/eMammal/make_eMammal_json.py +0 -223
  142. megadetector/data_management/importers/ena24_to_json.py +0 -276
  143. megadetector/data_management/importers/filenames_to_json.py +0 -386
  144. megadetector/data_management/importers/helena_to_cct.py +0 -283
  145. megadetector/data_management/importers/idaho-camera-traps.py +0 -1407
  146. megadetector/data_management/importers/idfg_iwildcam_lila_prep.py +0 -294
  147. megadetector/data_management/importers/import_desert_lion_conservation_camera_traps.py +0 -387
  148. megadetector/data_management/importers/jb_csv_to_json.py +0 -150
  149. megadetector/data_management/importers/mcgill_to_json.py +0 -250
  150. megadetector/data_management/importers/missouri_to_json.py +0 -490
  151. megadetector/data_management/importers/nacti_fieldname_adjustments.py +0 -79
  152. megadetector/data_management/importers/noaa_seals_2019.py +0 -181
  153. megadetector/data_management/importers/osu-small-animals-to-json.py +0 -364
  154. megadetector/data_management/importers/pc_to_json.py +0 -365
  155. megadetector/data_management/importers/plot_wni_giraffes.py +0 -123
  156. megadetector/data_management/importers/prepare_zsl_imerit.py +0 -131
  157. megadetector/data_management/importers/raic_csv_to_md_results.py +0 -416
  158. megadetector/data_management/importers/rspb_to_json.py +0 -356
  159. megadetector/data_management/importers/save_the_elephants_survey_A.py +0 -320
  160. megadetector/data_management/importers/save_the_elephants_survey_B.py +0 -329
  161. megadetector/data_management/importers/snapshot_safari_importer.py +0 -758
  162. megadetector/data_management/importers/snapshot_serengeti_lila.py +0 -1067
  163. megadetector/data_management/importers/snapshotserengeti/make_full_SS_json.py +0 -150
  164. megadetector/data_management/importers/snapshotserengeti/make_per_season_SS_json.py +0 -153
  165. megadetector/data_management/importers/sulross_get_exif.py +0 -65
  166. megadetector/data_management/importers/timelapse_csv_set_to_json.py +0 -490
  167. megadetector/data_management/importers/ubc_to_json.py +0 -399
  168. megadetector/data_management/importers/umn_to_json.py +0 -507
  169. megadetector/data_management/importers/wellington_to_json.py +0 -263
  170. megadetector/data_management/importers/wi_to_json.py +0 -442
  171. megadetector/data_management/importers/zamba_results_to_md_results.py +0 -180
  172. megadetector/data_management/lila/add_locations_to_island_camera_traps.py +0 -101
  173. megadetector/data_management/lila/add_locations_to_nacti.py +0 -151
  174. megadetector-5.0.27.dist-info/RECORD +0 -208
  175. {megadetector-5.0.27.dist-info → megadetector-5.0.29.dist-info}/licenses/LICENSE +0 -0
  176. {megadetector-5.0.27.dist-info → megadetector-5.0.29.dist-info}/top_level.txt +0 -0
@@ -1,191 +0,0 @@
1
- """
2
-
3
- awc_to_json.py
4
-
5
- Convert a particular .csv file from Australian Wildlife Conservancy to CCT format.
6
-
7
- """
8
-
9
- #%% Constants and environment
10
-
11
- import pandas as pd
12
- import uuid
13
- import json
14
- import time
15
- import humanfriendly
16
- import os
17
- import PIL
18
-
19
- from tqdm import tqdm
20
-
21
- from megadetector.visualization import visualize_db
22
- from megadetector.utils import path_utils
23
-
24
- input_metadata_file = r"D:\wildlife_data\awc\awc_imageinfo.csv"
25
- output_file = r"D:\wildlife_data\awc\awc_imageinfo.json"
26
- image_base = r"D:\wildlife_data\awc"
27
- preview_base = r"D:\wildlife_data\awc\label_preview"
28
-
29
- filename_replacements = {'D:\\Wet Tropics':'WetTropics'}
30
- category_mappings = {'none':'empty'}
31
-
32
-
33
- #%% Read source data
34
-
35
- input_metadata = pd.read_csv(input_metadata_file)
36
-
37
- print('Read {} columns and {} rows from metadata file'.format(len(input_metadata.columns),
38
- len(input_metadata)))
39
-
40
-
41
- #%% Main loop over labels
42
-
43
- startTime = time.time()
44
-
45
- relativePathToImage = {}
46
-
47
- images = []
48
- annotations = []
49
- categoryIDToCategories = {}
50
- missingFiles = []
51
-
52
- duplicateImageIDs = set()
53
-
54
- # Force the empty category to be ID 0
55
- emptyCat = {}
56
- emptyCat['name'] = 'empty'
57
- emptyCat['id'] = 0
58
- categoryIDToCategories[0] = emptyCat
59
-
60
- # iRow = 0; row = input_metadata.iloc[iRow]
61
- for iRow,row in tqdm(input_metadata.iterrows(),total=len(input_metadata)):
62
-
63
- # ImageID,FileName,FilePath,SpeciesID,CommonName
64
- imageID = str(row['ImageID'])
65
- fn = row['FileName']
66
- for k in filename_replacements:
67
- dirName = row['FilePath'].replace(k,filename_replacements[k])
68
- relativePath = os.path.join(dirName,fn)
69
-
70
- # This makes an assumption of one annotation per image, which happens to be
71
- # true in this data set.
72
- if relativePath in relativePathToImage:
73
-
74
- im = relativePathToImage[relativePath]
75
- assert im['id'] == imageID
76
- duplicateImageIDs.add(imageID)
77
-
78
- else:
79
- im = {}
80
- im['id'] = imageID
81
- im['file_name'] = relativePath
82
- im['seq_id'] = '-1'
83
- images.append(im)
84
- relativePathToImage[relativePath] = im
85
-
86
- fullPath = os.path.join(image_base,relativePath)
87
-
88
- if not os.path.isfile(fullPath):
89
-
90
- missingFiles.append(fullPath)
91
-
92
- else:
93
- # Retrieve image width and height
94
- pilImage = PIL.Image.open(fullPath)
95
- width, height = pilImage.size
96
- im['width'] = width
97
- im['height'] = height
98
-
99
- categoryName = row['CommonName'].lower()
100
- if categoryName in category_mappings:
101
- categoryName = category_mappings[categoryName]
102
-
103
- categoryID = row['SpeciesID']
104
- assert isinstance(categoryID,int)
105
-
106
- if categoryID not in categoryIDToCategories:
107
- category = {}
108
- category['name'] = categoryName
109
- category['id'] = categoryID
110
- categoryIDToCategories[categoryID] = category
111
- else:
112
- assert categoryIDToCategories[categoryID]['name'] == categoryName
113
-
114
- # Create an annotation
115
- ann = {}
116
-
117
- # The Internet tells me this guarantees uniqueness to a reasonable extent, even
118
- # beyond the sheer improbability of collisions.
119
- ann['id'] = str(uuid.uuid1())
120
- ann['image_id'] = im['id']
121
- ann['category_id'] = categoryID
122
-
123
- annotations.append(ann)
124
-
125
- categories = list(categoryIDToCategories.values())
126
-
127
- elapsed = time.time() - startTime
128
- print('Finished verifying file loop in {}, {} images, {} missing images, {} repeat labels'.format(
129
- humanfriendly.format_timespan(elapsed), len(images), len(missingFiles), len(duplicateImageIDs)))
130
-
131
-
132
- #%% Check for images that aren't included in the metadata file
133
-
134
- # Enumerate all images
135
- # list(relativePathToImage.keys())[0]
136
-
137
- imageFullPaths = path_utils.find_images(image_base,bRecursive=True)
138
- unmatchedFiles = []
139
-
140
- for iImage,imagePath in enumerate(imageFullPaths):
141
-
142
- fn = os.path.relpath(imagePath,image_base)
143
- if fn not in relativePathToImage:
144
- unmatchedFiles.append(fn)
145
-
146
- print('Finished checking {} images to make sure they\'re in the metadata, found {} mismatches'.format(
147
- len(imageFullPaths),len(unmatchedFiles)))
148
-
149
-
150
- #%% Create info struct
151
-
152
- info = {}
153
- info['year'] = 2019
154
- info['version'] = 1
155
- info['description'] = 'COCO style database'
156
- info['secondary_contributor'] = 'Converted to COCO .json by Dan Morris'
157
- info['contributor'] = ''
158
-
159
-
160
- #%% Write output
161
-
162
- json_data = {}
163
- json_data['images'] = images
164
- json_data['annotations'] = annotations
165
- json_data['categories'] = categories
166
- json_data['info'] = info
167
- json.dump(json_data, open(output_file,'w'), indent=4)
168
-
169
- print('Finished writing .json file with {} images, {} annotations, and {} categories'.format(
170
- len(images),len(annotations),len(categories)))
171
-
172
-
173
- #%% Validate the database's integrity
174
-
175
- from megadetector.data_management.databases import integrity_check_json_db
176
-
177
- options = integrity_check_json_db.IntegrityCheckOptions()
178
- sortedCategories,data = integrity_check_json_db.integrity_check_json_db(output_file, options)
179
-
180
-
181
- #%% Render a bunch of images to make sure the labels got carried along correctly
182
-
183
- bbox_db_path = output_file
184
- output_dir = preview_base
185
-
186
- options = visualize_db.BboxDbVizOptions()
187
- options.num_to_visualize = 1000
188
- options.sort_by_filename = False
189
-
190
- htmlOutputFile = visualize_db.visualize_db(bbox_db_path,output_dir,image_base,options)
191
-
@@ -1,272 +0,0 @@
1
- """
2
-
3
- bellevue_to_json.py
4
-
5
- "Bellevue Camera Traps" is the rather unremarkable camera trap data set
6
- used by one of the repo's maintainers for testing. It's organized as:
7
-
8
- approximate_date/[loose_camera_specifier/]/species
9
-
10
- E.g.:
11
-
12
- "2018.03.30\coyote\DSCF0091.JPG"
13
- "2018.07.18\oldcam\empty\DSCF0001.JPG"
14
-
15
- """
16
-
17
- #%% Constants and imports
18
-
19
- import json
20
- import os
21
- import uuid
22
- import datetime
23
-
24
- from PIL import Image
25
- from PIL.ExifTags import TAGS
26
- from tqdm import tqdm
27
-
28
- from megadetector.utils.path_utils import find_images
29
-
30
- # Filenames will be stored in the output .json relative to this base dir
31
- base_dir = r'C:\temp\camera_trap_images_no_people'
32
- output_base = r'c:\temp\previews'
33
- output_filename = os.path.join(base_dir,'bellevue_camera_traps.{}.json'.format(str(datetime.date.today())))
34
-
35
- class_mappings = {'transitional':'unlabeled','moving':'unlabeled','setup':'unlabeled','blurry':'unlabeled','transitional':'unlabeled','junk':'unlabeled','unknown':'unlabeled','blurry':'unlabeled'}
36
- class_mappings['dan'] = 'human'
37
- class_mappings['dan_and_dog'] = 'human,dog'
38
- class_mappings['dan and dog'] = 'human,dog'
39
- class_mappings['unknown'] = 'unknown animal'
40
- class_mappings['racoon'] = 'raccoon'
41
-
42
-
43
- info = {}
44
- info['year'] = 2020
45
- info['version'] = '2.0'
46
- info['description'] = 'Bellevue Camera Traps'
47
- info['contributor'] = 'Dan Morris'
48
- info['date_created'] = str(datetime.date.today())
49
-
50
- max_files = -1
51
-
52
- max_seconds_within_sequence = 10.0
53
-
54
- assert os.path.isdir(base_dir)
55
-
56
- #%% Exif functions
57
-
58
- def get_exif_tags(fn=None,im=None):
59
-
60
- assert (fn is not None) or (im is not None)
61
- ret = {}
62
- if im is None:
63
- im = Image.open(fn)
64
- info = im._getexif()
65
- for tag, value in info.items():
66
- decoded = TAGS.get(tag, tag)
67
- ret[decoded] = value
68
-
69
- return ret
70
-
71
-
72
- #%% Enumerate files, create image/annotation/category info
73
-
74
- annotations = []
75
-
76
- category_name_to_category = {}
77
-
78
- # Force the empty category to be ID 0
79
- empty_category = {}
80
- empty_category['id'] = 0
81
- empty_category['name'] = 'empty'
82
- category_name_to_category['empty'] = empty_category
83
- next_category_id = 1
84
-
85
- # Keep track of unique camera folders
86
- camera_folders = set()
87
-
88
- # Each element will be a dictionary with fields:
89
- #
90
- # relative_path, width, height, datetime
91
- images = []
92
- non_image_files = []
93
-
94
- print('Enumerating files from {}'.format(base_dir))
95
-
96
- image_files = find_images(base_dir,recursive=True)
97
- print('Enumerated {} images'.format(len(image_files)))
98
-
99
- # fname = image_files[0]
100
- for fname in tqdm(image_files):
101
-
102
- if max_files >= 0 and len(images) > max_files:
103
- print('Warning: early break at {} files'.format(max_files))
104
- break
105
-
106
- full_path = fname
107
- relative_path = os.path.relpath(full_path,base_dir)
108
-
109
- try:
110
- im = Image.open(full_path)
111
- h = im.height
112
- w = im.width
113
- tags = get_exif_tags(None,im)
114
- s = tags['DateTimeOriginal']
115
- dt = datetime.datetime.strptime(s,'%Y:%m:%d %H:%M:%S')
116
- except:
117
- # Corrupt or not an image
118
- print('Warning: could not read {}'.format(fname))
119
- non_image_files.append(full_path)
120
- continue
121
-
122
- # Store file info
123
- image_info = {}
124
- image_info['file_name'] = relative_path
125
- image_info['width'] = w
126
- image_info['height'] = h
127
- image_info['datetime'] = dt
128
- image_info['location'] = 'unknown'
129
- image_info['id'] = str(uuid.uuid4())
130
-
131
- images.append(image_info)
132
-
133
- # E.g. 2018.03.30/coyote/DSCF0091.JPG
134
- relative_path = image_info['file_name'].replace('\\','/')
135
- tokens = relative_path.split('/')
136
- camera_path_tokens = tokens[0:-2]
137
- camera_path = '/'.join(camera_path_tokens)
138
- camera_folders.add(camera_path)
139
- image_info['camera_path'] = camera_path
140
-
141
- category_name = tokens[-2].lower()
142
- if category_name in class_mappings:
143
- category_name = class_mappings[category_name]
144
-
145
- if category_name not in category_name_to_category:
146
- category = {}
147
- category['id'] = next_category_id
148
- category['name'] = category_name
149
- next_category_id = next_category_id + 1
150
- category_name_to_category[category_name] = category
151
- else:
152
- category = category_name_to_category[category_name]
153
-
154
- annotation = {}
155
- annotation['sequence_level_annotation'] = False
156
- annotation['id'] = str(uuid.uuid4())
157
- annotation['category_id'] = category['id']
158
- annotation['image_id'] = image_info['id']
159
- annotations.append(annotation)
160
-
161
- # ...for each image file
162
-
163
- assert len(annotations) == len(images)
164
-
165
- categories = list(category_name_to_category.values())
166
-
167
-
168
- #%% Synthesize sequence information
169
-
170
- print('Found {} camera folders'.format(len(camera_folders)))
171
-
172
- camera_folders = list(camera_folders)
173
-
174
- all_sequences = set()
175
-
176
- # Sort images by time within each folder
177
- # camera_path = camera_folders[0]
178
- for i_camera,camera_path in enumerate(camera_folders):
179
-
180
- images_this_camera = [im for im in images if im['camera_path'] == camera_path]
181
- sorted_images_this_camera = sorted(images_this_camera, key = lambda im: im['datetime'])
182
-
183
- current_sequence_id = None
184
- next_sequence_index = 0
185
- previous_datetime = None
186
-
187
- # previous_datetime = sorted_images_this_camera[0]['datetime']
188
- # im = sorted_images_this_camera[1]
189
- for im in sorted_images_this_camera:
190
-
191
- if previous_datetime is None:
192
- delta = None
193
- else:
194
- delta = (im['datetime'] - previous_datetime).total_seconds()
195
-
196
- # Start a new sequence if necessary
197
- if delta is None or delta > max_seconds_within_sequence:
198
- next_sequence_index = 0
199
- current_sequence_id = str(uuid.uuid4())
200
- all_sequences.add(current_sequence_id)
201
-
202
- im['seq_id'] = current_sequence_id
203
- im['seq_num_frames'] = None
204
- im['frame_num'] = next_sequence_index
205
- next_sequence_index = next_sequence_index + 1
206
- previous_datetime = im['datetime']
207
-
208
- # ...for each image in this camera
209
-
210
- # ...for each camera
211
-
212
- print('Created {} sequences from {} images'.format(len(all_sequences),len(images)))
213
-
214
- # Fill in seq_num_frames
215
- num_frames_per_sequence = {}
216
- for seq_id in all_sequences:
217
- images_this_sequence = [im for im in images if im['seq_id'] == seq_id]
218
- num_frames_per_sequence[seq_id] = len(images_this_sequence)
219
- for im in images_this_sequence:
220
- im['seq_num_frames'] = len(images_this_sequence)
221
-
222
-
223
- #%% A little cleanup
224
-
225
- for im in tqdm(images):
226
- if 'camera_path' in im:
227
- del im['camera_path']
228
- if not isinstance(im['datetime'],str):
229
- im['datetime'] = str(im['datetime'])
230
-
231
-
232
- #%% Write output .json
233
-
234
- data = {}
235
- data['info'] = info
236
- data['images'] = images
237
- data['annotations'] = annotations
238
- data['categories'] = categories
239
-
240
- json.dump(data, open(output_filename,'w'), indent=1)
241
-
242
- print('Finished writing json to {}'.format(output_filename))
243
-
244
-
245
- #%% Validate data
246
-
247
- from megadetector.data_management.databases import integrity_check_json_db
248
-
249
- options = integrity_check_json_db.IntegrityCheckOptions()
250
- options.baseDir = base_dir
251
- options.bCheckImageSizes = False
252
- options.bCheckImageExistence = True
253
- options.bFindUnusedImages = False
254
-
255
- sorted_categories = integrity_check_json_db.integrity_check_json_db(output_filename,options)
256
-
257
-
258
- #%% Label previews
259
-
260
- from megadetector.visualization import visualize_db
261
-
262
- viz_options = visualize_db.DbVizOptions()
263
- viz_options.num_to_visualize = None
264
- viz_options.parallelize_rendering_n_cores = 8
265
- viz_options.parallelize_rendering = True
266
- viz_options.trim_to_images_with_bboxes = False
267
- viz_options.add_search_links = True
268
- viz_options.sort_by_filename = False
269
- html_output_file,image_db = visualize_db.visualize_db(output_filename,
270
- os.path.join(output_base,'preview'),
271
- base_dir,viz_options)
272
- os.startfile(html_output_file)