megadetector 5.0.10__py3-none-any.whl → 5.0.11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of megadetector might be problematic. Click here for more details.

Files changed (226) hide show
  1. {megadetector-5.0.10.dist-info → megadetector-5.0.11.dist-info}/LICENSE +0 -0
  2. {megadetector-5.0.10.dist-info → megadetector-5.0.11.dist-info}/METADATA +12 -11
  3. megadetector-5.0.11.dist-info/RECORD +5 -0
  4. megadetector-5.0.11.dist-info/top_level.txt +1 -0
  5. api/__init__.py +0 -0
  6. api/batch_processing/__init__.py +0 -0
  7. api/batch_processing/api_core/__init__.py +0 -0
  8. api/batch_processing/api_core/batch_service/__init__.py +0 -0
  9. api/batch_processing/api_core/batch_service/score.py +0 -439
  10. api/batch_processing/api_core/server.py +0 -294
  11. api/batch_processing/api_core/server_api_config.py +0 -98
  12. api/batch_processing/api_core/server_app_config.py +0 -55
  13. api/batch_processing/api_core/server_batch_job_manager.py +0 -220
  14. api/batch_processing/api_core/server_job_status_table.py +0 -152
  15. api/batch_processing/api_core/server_orchestration.py +0 -360
  16. api/batch_processing/api_core/server_utils.py +0 -92
  17. api/batch_processing/api_core_support/__init__.py +0 -0
  18. api/batch_processing/api_core_support/aggregate_results_manually.py +0 -46
  19. api/batch_processing/api_support/__init__.py +0 -0
  20. api/batch_processing/api_support/summarize_daily_activity.py +0 -152
  21. api/batch_processing/data_preparation/__init__.py +0 -0
  22. api/batch_processing/data_preparation/manage_local_batch.py +0 -2391
  23. api/batch_processing/data_preparation/manage_video_batch.py +0 -327
  24. api/batch_processing/integration/digiKam/setup.py +0 -6
  25. api/batch_processing/integration/digiKam/xmp_integration.py +0 -465
  26. api/batch_processing/integration/eMammal/test_scripts/config_template.py +0 -5
  27. api/batch_processing/integration/eMammal/test_scripts/push_annotations_to_emammal.py +0 -126
  28. api/batch_processing/integration/eMammal/test_scripts/select_images_for_testing.py +0 -55
  29. api/batch_processing/postprocessing/__init__.py +0 -0
  30. api/batch_processing/postprocessing/add_max_conf.py +0 -64
  31. api/batch_processing/postprocessing/categorize_detections_by_size.py +0 -163
  32. api/batch_processing/postprocessing/combine_api_outputs.py +0 -249
  33. api/batch_processing/postprocessing/compare_batch_results.py +0 -958
  34. api/batch_processing/postprocessing/convert_output_format.py +0 -397
  35. api/batch_processing/postprocessing/load_api_results.py +0 -195
  36. api/batch_processing/postprocessing/md_to_coco.py +0 -310
  37. api/batch_processing/postprocessing/md_to_labelme.py +0 -330
  38. api/batch_processing/postprocessing/merge_detections.py +0 -401
  39. api/batch_processing/postprocessing/postprocess_batch_results.py +0 -1904
  40. api/batch_processing/postprocessing/remap_detection_categories.py +0 -170
  41. api/batch_processing/postprocessing/render_detection_confusion_matrix.py +0 -661
  42. api/batch_processing/postprocessing/repeat_detection_elimination/find_repeat_detections.py +0 -211
  43. api/batch_processing/postprocessing/repeat_detection_elimination/remove_repeat_detections.py +0 -82
  44. api/batch_processing/postprocessing/repeat_detection_elimination/repeat_detections_core.py +0 -1631
  45. api/batch_processing/postprocessing/separate_detections_into_folders.py +0 -731
  46. api/batch_processing/postprocessing/subset_json_detector_output.py +0 -696
  47. api/batch_processing/postprocessing/top_folders_to_bottom.py +0 -223
  48. api/synchronous/__init__.py +0 -0
  49. api/synchronous/api_core/animal_detection_api/__init__.py +0 -0
  50. api/synchronous/api_core/animal_detection_api/api_backend.py +0 -152
  51. api/synchronous/api_core/animal_detection_api/api_frontend.py +0 -266
  52. api/synchronous/api_core/animal_detection_api/config.py +0 -35
  53. api/synchronous/api_core/animal_detection_api/data_management/annotations/annotation_constants.py +0 -47
  54. api/synchronous/api_core/animal_detection_api/detection/detector_training/copy_checkpoints.py +0 -43
  55. api/synchronous/api_core/animal_detection_api/detection/detector_training/model_main_tf2.py +0 -114
  56. api/synchronous/api_core/animal_detection_api/detection/process_video.py +0 -543
  57. api/synchronous/api_core/animal_detection_api/detection/pytorch_detector.py +0 -304
  58. api/synchronous/api_core/animal_detection_api/detection/run_detector.py +0 -627
  59. api/synchronous/api_core/animal_detection_api/detection/run_detector_batch.py +0 -1029
  60. api/synchronous/api_core/animal_detection_api/detection/run_inference_with_yolov5_val.py +0 -581
  61. api/synchronous/api_core/animal_detection_api/detection/run_tiled_inference.py +0 -754
  62. api/synchronous/api_core/animal_detection_api/detection/tf_detector.py +0 -165
  63. api/synchronous/api_core/animal_detection_api/detection/video_utils.py +0 -495
  64. api/synchronous/api_core/animal_detection_api/md_utils/azure_utils.py +0 -174
  65. api/synchronous/api_core/animal_detection_api/md_utils/ct_utils.py +0 -262
  66. api/synchronous/api_core/animal_detection_api/md_utils/directory_listing.py +0 -251
  67. api/synchronous/api_core/animal_detection_api/md_utils/matlab_porting_tools.py +0 -97
  68. api/synchronous/api_core/animal_detection_api/md_utils/path_utils.py +0 -416
  69. api/synchronous/api_core/animal_detection_api/md_utils/process_utils.py +0 -110
  70. api/synchronous/api_core/animal_detection_api/md_utils/sas_blob_utils.py +0 -509
  71. api/synchronous/api_core/animal_detection_api/md_utils/string_utils.py +0 -59
  72. api/synchronous/api_core/animal_detection_api/md_utils/url_utils.py +0 -144
  73. api/synchronous/api_core/animal_detection_api/md_utils/write_html_image_list.py +0 -226
  74. api/synchronous/api_core/animal_detection_api/md_visualization/visualization_utils.py +0 -841
  75. api/synchronous/api_core/tests/__init__.py +0 -0
  76. api/synchronous/api_core/tests/load_test.py +0 -110
  77. classification/__init__.py +0 -0
  78. classification/aggregate_classifier_probs.py +0 -108
  79. classification/analyze_failed_images.py +0 -227
  80. classification/cache_batchapi_outputs.py +0 -198
  81. classification/create_classification_dataset.py +0 -627
  82. classification/crop_detections.py +0 -516
  83. classification/csv_to_json.py +0 -226
  84. classification/detect_and_crop.py +0 -855
  85. classification/efficientnet/__init__.py +0 -9
  86. classification/efficientnet/model.py +0 -415
  87. classification/efficientnet/utils.py +0 -610
  88. classification/evaluate_model.py +0 -520
  89. classification/identify_mislabeled_candidates.py +0 -152
  90. classification/json_to_azcopy_list.py +0 -63
  91. classification/json_validator.py +0 -695
  92. classification/map_classification_categories.py +0 -276
  93. classification/merge_classification_detection_output.py +0 -506
  94. classification/prepare_classification_script.py +0 -194
  95. classification/prepare_classification_script_mc.py +0 -228
  96. classification/run_classifier.py +0 -286
  97. classification/save_mislabeled.py +0 -110
  98. classification/train_classifier.py +0 -825
  99. classification/train_classifier_tf.py +0 -724
  100. classification/train_utils.py +0 -322
  101. data_management/__init__.py +0 -0
  102. data_management/annotations/__init__.py +0 -0
  103. data_management/annotations/annotation_constants.py +0 -34
  104. data_management/camtrap_dp_to_coco.py +0 -238
  105. data_management/cct_json_utils.py +0 -395
  106. data_management/cct_to_md.py +0 -176
  107. data_management/cct_to_wi.py +0 -289
  108. data_management/coco_to_labelme.py +0 -272
  109. data_management/coco_to_yolo.py +0 -662
  110. data_management/databases/__init__.py +0 -0
  111. data_management/databases/add_width_and_height_to_db.py +0 -33
  112. data_management/databases/combine_coco_camera_traps_files.py +0 -206
  113. data_management/databases/integrity_check_json_db.py +0 -477
  114. data_management/databases/subset_json_db.py +0 -115
  115. data_management/generate_crops_from_cct.py +0 -149
  116. data_management/get_image_sizes.py +0 -188
  117. data_management/importers/add_nacti_sizes.py +0 -52
  118. data_management/importers/add_timestamps_to_icct.py +0 -79
  119. data_management/importers/animl_results_to_md_results.py +0 -158
  120. data_management/importers/auckland_doc_test_to_json.py +0 -372
  121. data_management/importers/auckland_doc_to_json.py +0 -200
  122. data_management/importers/awc_to_json.py +0 -189
  123. data_management/importers/bellevue_to_json.py +0 -273
  124. data_management/importers/cacophony-thermal-importer.py +0 -796
  125. data_management/importers/carrizo_shrubfree_2018.py +0 -268
  126. data_management/importers/carrizo_trail_cam_2017.py +0 -287
  127. data_management/importers/cct_field_adjustments.py +0 -57
  128. data_management/importers/channel_islands_to_cct.py +0 -913
  129. data_management/importers/eMammal/copy_and_unzip_emammal.py +0 -180
  130. data_management/importers/eMammal/eMammal_helpers.py +0 -249
  131. data_management/importers/eMammal/make_eMammal_json.py +0 -223
  132. data_management/importers/ena24_to_json.py +0 -275
  133. data_management/importers/filenames_to_json.py +0 -385
  134. data_management/importers/helena_to_cct.py +0 -282
  135. data_management/importers/idaho-camera-traps.py +0 -1407
  136. data_management/importers/idfg_iwildcam_lila_prep.py +0 -294
  137. data_management/importers/jb_csv_to_json.py +0 -150
  138. data_management/importers/mcgill_to_json.py +0 -250
  139. data_management/importers/missouri_to_json.py +0 -489
  140. data_management/importers/nacti_fieldname_adjustments.py +0 -79
  141. data_management/importers/noaa_seals_2019.py +0 -181
  142. data_management/importers/pc_to_json.py +0 -365
  143. data_management/importers/plot_wni_giraffes.py +0 -123
  144. data_management/importers/prepare-noaa-fish-data-for-lila.py +0 -359
  145. data_management/importers/prepare_zsl_imerit.py +0 -131
  146. data_management/importers/rspb_to_json.py +0 -356
  147. data_management/importers/save_the_elephants_survey_A.py +0 -320
  148. data_management/importers/save_the_elephants_survey_B.py +0 -332
  149. data_management/importers/snapshot_safari_importer.py +0 -758
  150. data_management/importers/snapshot_safari_importer_reprise.py +0 -665
  151. data_management/importers/snapshot_serengeti_lila.py +0 -1067
  152. data_management/importers/snapshotserengeti/make_full_SS_json.py +0 -150
  153. data_management/importers/snapshotserengeti/make_per_season_SS_json.py +0 -153
  154. data_management/importers/sulross_get_exif.py +0 -65
  155. data_management/importers/timelapse_csv_set_to_json.py +0 -490
  156. data_management/importers/ubc_to_json.py +0 -399
  157. data_management/importers/umn_to_json.py +0 -507
  158. data_management/importers/wellington_to_json.py +0 -263
  159. data_management/importers/wi_to_json.py +0 -441
  160. data_management/importers/zamba_results_to_md_results.py +0 -181
  161. data_management/labelme_to_coco.py +0 -548
  162. data_management/labelme_to_yolo.py +0 -272
  163. data_management/lila/__init__.py +0 -0
  164. data_management/lila/add_locations_to_island_camera_traps.py +0 -97
  165. data_management/lila/add_locations_to_nacti.py +0 -147
  166. data_management/lila/create_lila_blank_set.py +0 -557
  167. data_management/lila/create_lila_test_set.py +0 -151
  168. data_management/lila/create_links_to_md_results_files.py +0 -106
  169. data_management/lila/download_lila_subset.py +0 -177
  170. data_management/lila/generate_lila_per_image_labels.py +0 -515
  171. data_management/lila/get_lila_annotation_counts.py +0 -170
  172. data_management/lila/get_lila_image_counts.py +0 -111
  173. data_management/lila/lila_common.py +0 -300
  174. data_management/lila/test_lila_metadata_urls.py +0 -132
  175. data_management/ocr_tools.py +0 -874
  176. data_management/read_exif.py +0 -681
  177. data_management/remap_coco_categories.py +0 -84
  178. data_management/remove_exif.py +0 -66
  179. data_management/resize_coco_dataset.py +0 -189
  180. data_management/wi_download_csv_to_coco.py +0 -246
  181. data_management/yolo_output_to_md_output.py +0 -441
  182. data_management/yolo_to_coco.py +0 -676
  183. detection/__init__.py +0 -0
  184. detection/detector_training/__init__.py +0 -0
  185. detection/detector_training/model_main_tf2.py +0 -114
  186. detection/process_video.py +0 -703
  187. detection/pytorch_detector.py +0 -337
  188. detection/run_detector.py +0 -779
  189. detection/run_detector_batch.py +0 -1219
  190. detection/run_inference_with_yolov5_val.py +0 -917
  191. detection/run_tiled_inference.py +0 -935
  192. detection/tf_detector.py +0 -188
  193. detection/video_utils.py +0 -606
  194. docs/source/conf.py +0 -43
  195. md_utils/__init__.py +0 -0
  196. md_utils/azure_utils.py +0 -174
  197. md_utils/ct_utils.py +0 -612
  198. md_utils/directory_listing.py +0 -246
  199. md_utils/md_tests.py +0 -968
  200. md_utils/path_utils.py +0 -1044
  201. md_utils/process_utils.py +0 -157
  202. md_utils/sas_blob_utils.py +0 -509
  203. md_utils/split_locations_into_train_val.py +0 -228
  204. md_utils/string_utils.py +0 -92
  205. md_utils/url_utils.py +0 -323
  206. md_utils/write_html_image_list.py +0 -225
  207. md_visualization/__init__.py +0 -0
  208. md_visualization/plot_utils.py +0 -293
  209. md_visualization/render_images_with_thumbnails.py +0 -275
  210. md_visualization/visualization_utils.py +0 -1537
  211. md_visualization/visualize_db.py +0 -551
  212. md_visualization/visualize_detector_output.py +0 -406
  213. megadetector-5.0.10.dist-info/RECORD +0 -224
  214. megadetector-5.0.10.dist-info/top_level.txt +0 -8
  215. taxonomy_mapping/__init__.py +0 -0
  216. taxonomy_mapping/map_lila_taxonomy_to_wi_taxonomy.py +0 -491
  217. taxonomy_mapping/map_new_lila_datasets.py +0 -154
  218. taxonomy_mapping/prepare_lila_taxonomy_release.py +0 -142
  219. taxonomy_mapping/preview_lila_taxonomy.py +0 -591
  220. taxonomy_mapping/retrieve_sample_image.py +0 -71
  221. taxonomy_mapping/simple_image_download.py +0 -218
  222. taxonomy_mapping/species_lookup.py +0 -834
  223. taxonomy_mapping/taxonomy_csv_checker.py +0 -159
  224. taxonomy_mapping/taxonomy_graph.py +0 -346
  225. taxonomy_mapping/validate_lila_category_mappings.py +0 -83
  226. {megadetector-5.0.10.dist-info → megadetector-5.0.11.dist-info}/WHEEL +0 -0
@@ -1,490 +0,0 @@
1
- """
2
-
3
- timelapse_csv_set_to_json.py
4
-
5
- Given a directory full of reasonably-consistent Timelapse-exported
6
- .csvs, assemble a CCT .json.
7
-
8
- Assumes that you have a list of all files in the directory tree, including
9
- image and .csv files.
10
-
11
- """
12
-
13
- #%% Constants and imports
14
-
15
- import uuid
16
- import json
17
- import time
18
- import re
19
- import humanfriendly
20
- import os
21
- import PIL
22
- import pandas as pd
23
- import numpy as np
24
- from tqdm import tqdm
25
-
26
- from md_visualization import visualize_db
27
- from data_management.databases import integrity_check_json_db
28
- from md_utils import path_utils
29
-
30
- # Text file with relative paths to all files (images and .csv files)
31
- input_relative_file_list = ''
32
- output_file = ''
33
- preview_base = ''
34
- file_base = ''
35
- top_level_image_folder = ''
36
- contributor_name = ''
37
- csv_filename_mappings = []
38
- site_name_mappings = []
39
- csv_ignore_tokens = []
40
-
41
- expected_columns = 'File,RelativePath,Folder,Date,Time,ImageQuality,DeleteFlag,CameraLocation,StartDate,TechnicianName,Empty,Service,Species,HumanActivity,Count,AdultFemale,AdultMale,AdultUnknown,Offspring,YOY,UNK,Collars,Tags,NaturalMarks,Reaction,Illegal,GoodPicture,SecondOpinion,Comments'.\
42
- split(',')
43
- im_fields_to_copy = ['TechnicianName','Service','HumanActivity','Count','AdultFemale','AdultMale',
44
- 'AdultUnknown','Offspring','YOY','UNK','Collars','Tags','NaturalMarks','Reaction',
45
- 'Illegal','GoodPicture','SecondOpinion','Comments']
46
-
47
- ignore_fields = []
48
- required_image_regex = None
49
-
50
- category_mappings = {'none':'empty'}
51
-
52
- check_file_existence = False
53
- retrieve_image_size = False
54
-
55
-
56
- #%% Read file list, make a list of all image files and all .csv files
57
-
58
- with open(input_relative_file_list) as f:
59
- all_files = f.readlines()
60
- all_files = [x.strip() for x in all_files]
61
-
62
- image_files = set()
63
- csv_files = []
64
- non_matching_files = []
65
-
66
- for fn in all_files:
67
-
68
- fnl = fn.lower()
69
-
70
- if fnl.endswith('.csv'):
71
-
72
- csv_files.append(fn)
73
-
74
- elif (fnl.endswith('.jpg') or fnl.endswith('.png')):
75
-
76
- if required_image_regex is not None and not re.match(required_image_regex,fn):
77
- non_matching_files.append(fn)
78
- else:
79
- image_files.add(fn)
80
-
81
- for fn in image_files:
82
- assert fn.lower().endswith('.jpg')
83
-
84
- print('Found {} image files and {} .csv files ({} non-matching files)'.format(
85
- len(image_files),len(csv_files),len(non_matching_files)))
86
-
87
-
88
- #%% Verify column consistency, create a giant array with all rows from all .csv files
89
-
90
- bad_csv_files = []
91
- normalized_dataframes = []
92
-
93
- # i_csv = 0; csv_filename = csv_files[0]
94
- for i_csv,csv_filename in enumerate(csv_files):
95
-
96
- full_path = os.path.join(file_base,csv_filename)
97
- try:
98
- df = pd.read_csv(full_path)
99
- except Exception as e:
100
- if 'invalid start byte' in str(e):
101
- try:
102
- print('Read error, reverting to fallback encoding')
103
- df = pd.read_csv(full_path,encoding='latin1')
104
- except Exception as e:
105
- print('Can''t read file {}: {}'.format(csv_filename,str(e)))
106
- bad_csv_files.append(csv_filename)
107
- continue
108
-
109
- if not (len(df.columns) == len(expected_columns) and (df.columns == expected_columns).all()):
110
- extra_fields = ','.join(set(df.columns) - set(expected_columns))
111
- extra_fields = [x for x in extra_fields if x not in ignore_fields]
112
- missing_fields = ','.join(set(expected_columns) - set(df.columns))
113
- missing_fields = [x for x in missing_fields if x not in ignore_fields]
114
- if not (len(missing_fields) == 0 and len(extra_fields) == 0):
115
- print('In file {}, extra fields {}, missing fields {}'.format(csv_filename,
116
- extra_fields,missing_fields))
117
- normalized_df = df[expected_columns].copy()
118
- normalized_df['source_file'] = csv_filename
119
- normalized_dataframes.append(normalized_df)
120
-
121
- print('Ignored {} of {} csv files'.format(len(bad_csv_files),len(csv_files)))
122
- valid_csv_files = [x for x in csv_files if x not in bad_csv_files]
123
-
124
- input_metadata = pd.concat(normalized_dataframes)
125
- assert len(input_metadata.columns) == 1 + len(expected_columns)
126
-
127
- print('Concatenated all .csv files into a dataframe with {} rows'.format(len(input_metadata)))
128
-
129
-
130
- #%% Prepare some data structures we'll need for mapping image rows in .csv files to actual image files
131
-
132
- # Enumerate all folders containing image files
133
- all_image_folders = set()
134
-
135
- for fn in image_files:
136
- dn = os.path.dirname(fn)
137
- all_image_folders.add(dn)
138
-
139
- print('Enumerated {} unique image folders'.format(len(all_image_folders)))
140
-
141
- # In this data set, a site folder looks like:
142
- #
143
- # Processed Images\\site_name
144
-
145
- site_folders = set()
146
- for image_folder in all_image_folders:
147
- tokens = path_utils.split_path(image_folder)
148
- site_folders.add(tokens[0] + '/' + tokens[1])
149
-
150
-
151
- #%% Map .csv files to candidate camera folders
152
-
153
- csv_filename_to_camera_folder = {}
154
-
155
- # fn = valid_csv_files[0]
156
- for fn_original in valid_csv_files:
157
-
158
- fn = fn_original
159
- if any(s in fn for s in csv_ignore_tokens):
160
- continue
161
-
162
- for mapping in csv_filename_mappings:
163
- fn = fn.replace(mapping[0],mapping[1])
164
-
165
- csv_filename = os.path.basename(fn)
166
- pat = '^(?P<site>[^_]+)_(?P<cameranum>[^_]+)_'
167
- re_result = re.search(pat,csv_filename)
168
- if re_result is None:
169
- print('Couldn''t match tokens in {}'.format(csv_filename))
170
- continue
171
- site = re_result.group('site')
172
-
173
- for mapping in site_name_mappings:
174
- site = site.replace(mapping[0],mapping[1])
175
-
176
- cameranum = re_result.group('cameranum')
177
-
178
- site_folder = top_level_image_folder + '/' + site
179
-
180
- # Some site folders appear as "XXNNNN", some appear as "XXNNNN_complete"
181
- if site_folder not in site_folders:
182
- site = site + '_complete'
183
- site_folder = top_level_image_folder + '/' + site
184
- if site_folder not in site_folders:
185
- print('Could not find site folder for {}'.format(fn))
186
- continue
187
-
188
- camera_folder = top_level_image_folder + '/' + site + '/Camera_' + str(cameranum)
189
-
190
- b_found_camera_folder = False
191
-
192
- for candidate_camera_folder in all_image_folders:
193
-
194
- if candidate_camera_folder.startswith(camera_folder):
195
- b_found_camera_folder = True
196
- break
197
-
198
- if not b_found_camera_folder:
199
- print('Could not find camera folder {} for csv {}'.format(camera_folder,fn))
200
- continue
201
-
202
- assert fn not in csv_filename_to_camera_folder
203
- csv_filename_to_camera_folder[fn_original] = camera_folder
204
-
205
- # ...for each .csv file
206
-
207
- print('Successfully mapped {} of {} csv files to camera folders'.format(len(csv_filename_to_camera_folder),
208
- len(valid_csv_files)))
209
-
210
- for fn in valid_csv_files:
211
-
212
- if any(s in fn for s in csv_ignore_tokens):
213
- continue
214
-
215
- if fn not in csv_filename_to_camera_folder:
216
- print('No camera folder mapping for {}'.format(fn))
217
-
218
-
219
- #%% Map camera folders to candidate image folders
220
-
221
- camera_folders_to_image_folders = {}
222
-
223
- for camera_folder in csv_filename_to_camera_folder.values():
224
-
225
- for image_folder in all_image_folders:
226
- if image_folder.startswith(camera_folder):
227
- camera_folders_to_image_folders.setdefault(camera_folder,[]).append(image_folder)
228
-
229
-
230
- #%% Main loop over labels (prep)
231
-
232
- start_time = time.time()
233
-
234
- relative_path_to_image = {}
235
- image_id_to_image = {}
236
-
237
- images = []
238
- annotations = []
239
- category_name_to_category = {}
240
- files_missing_from_file_list = []
241
- files_missing_on_disk = []
242
-
243
- duplicate_image_ids = set()
244
-
245
- # Force the empty category to be ID 0
246
- empty_category = {}
247
- empty_category['name'] = 'empty'
248
- empty_category['id'] = 0
249
- category_name_to_category['empty'] = empty_category
250
-
251
- next_category_id = 1
252
-
253
- ignored_csv_files = set()
254
- ignored_image_folders = set()
255
-
256
- # Images that are marked empty and also have a species label
257
- ambiguous_images = []
258
-
259
-
260
- #%% Main loop over labels (loop)
261
-
262
- # i_row = 0; row = input_metadata.iloc[i_row]
263
- for i_row,row in tqdm(input_metadata.iterrows(),total=len(input_metadata)):
264
- # for i_row,row in input_metadata.iterrows():
265
-
266
- image_filename = row['File']
267
- image_folder = row['RelativePath']
268
- if isinstance(image_folder,float):
269
- assert np.isnan(image_folder)
270
- image_folder = row['Folder']
271
- image_folder = image_folder.replace('\\','/')
272
-
273
- # Usually this is just a single folder name, sometimes it's a full path,
274
- # which we don't want
275
- image_folder = path_utils.split_path(image_folder)[-1]
276
- csv_filename = row['source_file']
277
-
278
- if any(s in csv_filename for s in csv_ignore_tokens):
279
- continue
280
-
281
- if csv_filename not in csv_filename_to_camera_folder:
282
- if csv_filename not in ignored_csv_files:
283
- print('No camera folder for {}'.format(csv_filename))
284
- assert csv_filename in valid_csv_files
285
- ignored_csv_files.add(csv_filename)
286
- continue
287
-
288
- camera_folder = csv_filename_to_camera_folder[csv_filename]
289
- candidate_image_folders = camera_folders_to_image_folders[camera_folder]
290
-
291
- image_folder_relative_path = None
292
- for candidate_image_folder in candidate_image_folders:
293
- if candidate_image_folder.endswith(image_folder):
294
- image_folder_relative_path = candidate_image_folder
295
- if image_folder_relative_path is None:
296
- camera_image_folder = camera_folder + '_' + image_folder
297
- if camera_image_folder not in ignored_image_folders:
298
- print('No image folder for {}'.format(camera_image_folder))
299
- ignored_image_folders.add(camera_image_folder)
300
- continue
301
-
302
- image_relative_path = image_folder_relative_path + '/' + image_filename
303
- if image_relative_path not in image_files:
304
- files_missing_from_file_list.append(image_relative_path)
305
- continue
306
-
307
- image_id = image_relative_path.replace('_','~').replace('/','_').replace('\\','_')
308
-
309
- if image_id in image_id_to_image:
310
-
311
- im = image_id_to_image[image_id]
312
- assert im['id'] == image_id
313
- duplicate_image_ids.add(image_id)
314
-
315
- else:
316
-
317
- im = {}
318
- im['id'] = image_id
319
- im['file_name'] = image_relative_path
320
- im['seq_id'] = '-1'
321
- im['datetime'] = row['Date'] + ' ' + row['Time']
322
- im['location'] = row['CameraLocation']
323
-
324
- for col in im_fields_to_copy:
325
- im[col.lower()] = row[col]
326
-
327
- for k in im:
328
- if isinstance(im[k],float) and np.isnan(im[k]):
329
- im[k] = ''
330
-
331
- images.append(im)
332
- relative_path_to_image[image_relative_path] = im
333
- image_id_to_image[image_id] = im
334
-
335
- if check_file_existence or retrieve_image_size:
336
-
337
- image_full_path = os.path.join(file_base,image_relative_path)
338
-
339
- # Check whether this file exists on disk
340
- if check_file_existence:
341
- if not os.path.isfile(image_full_path):
342
- files_missing_on_disk.append(image_relative_path)
343
-
344
- # Retrieve image width and height
345
- if retrieve_image_size:
346
- pil_image = PIL.Image.open(image_full_path)
347
- width, height = pil_image.size
348
- im['width'] = width
349
- im['height'] = height
350
-
351
- category_name = row['Species']
352
- if isinstance(category_name,float):
353
- assert np.isnan(category_name)
354
- category_name = None
355
- else:
356
- category_name = category_name.lower()
357
-
358
- empty_token = row['Empty']
359
- if empty_token == True:
360
- if category_name is not None:
361
- category_name = 'ambiguous'
362
- ambiguous_images.append(im)
363
- else:
364
- category_name = 'empty'
365
- else:
366
- assert empty_token == False
367
- if category_name is None:
368
- category_name = 'unlabeled'
369
-
370
- if category_name in category_mappings:
371
- category_name = category_mappings[category_name]
372
-
373
- if category_name not in category_name_to_category:
374
- category = {}
375
- category['name'] = category_name
376
- category['id'] = next_category_id
377
- next_category_id += 1
378
- category_name_to_category[category_name] = category
379
- else:
380
- category = category_name_to_category[category_name]
381
-
382
- category_id = category['id']
383
-
384
- # Create an annotation
385
- ann = {}
386
-
387
- # The Internet tells me this guarantees uniqueness to a reasonable extent, even
388
- # beyond the sheer improbability of collisions.
389
- ann['id'] = str(uuid.uuid1())
390
- ann['image_id'] = im['id']
391
- ann['category_id'] = category_id
392
-
393
- annotations.append(ann)
394
-
395
- # ...for each row in the big table of concatenated .csv files
396
-
397
- categories = list(category_name_to_category.values())
398
-
399
- elapsed = time.time() - start_time
400
- print('Finished verifying file loop in {}, {} images, {} missing images, {} repeat labels, {} ambiguous labels'.format(
401
- humanfriendly.format_timespan(elapsed), len(images), len(files_missing_from_file_list),
402
- len(duplicate_image_ids), len(ambiguous_images)))
403
-
404
-
405
- #%% Fix cases where an image was annotated as 'unlabeled' and as something else
406
-
407
- image_id_to_annotations = {}
408
- for ann in annotations:
409
- image_id = ann['image_id']
410
- image_id_to_annotations.setdefault(image_id,[]).append(ann)
411
-
412
- valid_annotations = []
413
- unlabeled_id = category_name_to_category['unlabeled']['id']
414
-
415
- for ann in annotations:
416
-
417
- if ann['category_id'] != unlabeled_id:
418
- valid_annotations.append(ann)
419
- continue
420
-
421
- # This annotation is 'unlabeled'
422
- image_id = ann['image_id']
423
- image_annotations = image_id_to_annotations[image_id]
424
- image_categories = list(set([a['category_id'] for a in image_annotations]))
425
-
426
- # Was there another category associated with this image?
427
- assert unlabeled_id in image_categories
428
- if len(image_categories) > 1:
429
- continue
430
-
431
- valid_annotations.append(ann)
432
-
433
- print('Removed {} redundant unlabeled annotations'.format(len(annotations)-len(valid_annotations)))
434
-
435
-
436
- #%% Check for un-annnotated images
437
-
438
- # Enumerate all images
439
- # list(relative_path_to_image.keys())[0]
440
-
441
- unmatched_files = []
442
-
443
- for i_image,image_path in enumerate(image_files):
444
-
445
- if image_path not in relative_path_to_image:
446
- unmatched_files.append(image_path)
447
-
448
- print('Finished checking {} images to make sure they\'re in the metadata, found {} un-annotated images'.format(
449
- len(image_files),len(unmatched_files)))
450
-
451
-
452
- #%% Create info struct
453
-
454
- info = {}
455
- info['year'] = 2019
456
- info['version'] = 1
457
- info['description'] = 'COCO style database'
458
- info['secondary_contributor'] = 'Converted to COCO .json by Dan Morris'
459
- info['contributor'] = contributor_name
460
-
461
-
462
- #%% Write output
463
-
464
- json_data = {}
465
- json_data['images'] = images
466
- json_data['annotations'] = annotations
467
- json_data['categories'] = categories
468
- json_data['info'] = info
469
- json.dump(json_data, open(output_file,'w'), indent=1)
470
-
471
- print('Finished writing .json file with {} images, {} annotations, and {} categories'.format(
472
- len(images),len(annotations),len(categories)))
473
-
474
-
475
- #%% Validate the database's integrity
476
-
477
- options = integrity_check_json_db.IntegrityCheckOptions()
478
- sortedCategories,data = integrity_check_json_db.integrity_check_json_db(output_file, options)
479
-
480
-
481
- #%% Render a bunch of images to make sure the labels got carried along correctly
482
-
483
- options = visualize_db.DbVizOptions()
484
- options.num_to_visualize = 1000
485
- options.parallelize_rendering = True
486
- options.sort_by_filename = False
487
- options.classes_to_exclude = ['unlabeled','empty','ambiguous']
488
-
489
- html_output_file,data = visualize_db.visualize_db(output_file,preview_base,file_base,options)
490
- os.startfile(html_output_file)