megadetector 5.0.10__py3-none-any.whl → 5.0.11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of megadetector might be problematic. Click here for more details.

Files changed (226) hide show
  1. {megadetector-5.0.10.dist-info → megadetector-5.0.11.dist-info}/LICENSE +0 -0
  2. {megadetector-5.0.10.dist-info → megadetector-5.0.11.dist-info}/METADATA +12 -11
  3. megadetector-5.0.11.dist-info/RECORD +5 -0
  4. megadetector-5.0.11.dist-info/top_level.txt +1 -0
  5. api/__init__.py +0 -0
  6. api/batch_processing/__init__.py +0 -0
  7. api/batch_processing/api_core/__init__.py +0 -0
  8. api/batch_processing/api_core/batch_service/__init__.py +0 -0
  9. api/batch_processing/api_core/batch_service/score.py +0 -439
  10. api/batch_processing/api_core/server.py +0 -294
  11. api/batch_processing/api_core/server_api_config.py +0 -98
  12. api/batch_processing/api_core/server_app_config.py +0 -55
  13. api/batch_processing/api_core/server_batch_job_manager.py +0 -220
  14. api/batch_processing/api_core/server_job_status_table.py +0 -152
  15. api/batch_processing/api_core/server_orchestration.py +0 -360
  16. api/batch_processing/api_core/server_utils.py +0 -92
  17. api/batch_processing/api_core_support/__init__.py +0 -0
  18. api/batch_processing/api_core_support/aggregate_results_manually.py +0 -46
  19. api/batch_processing/api_support/__init__.py +0 -0
  20. api/batch_processing/api_support/summarize_daily_activity.py +0 -152
  21. api/batch_processing/data_preparation/__init__.py +0 -0
  22. api/batch_processing/data_preparation/manage_local_batch.py +0 -2391
  23. api/batch_processing/data_preparation/manage_video_batch.py +0 -327
  24. api/batch_processing/integration/digiKam/setup.py +0 -6
  25. api/batch_processing/integration/digiKam/xmp_integration.py +0 -465
  26. api/batch_processing/integration/eMammal/test_scripts/config_template.py +0 -5
  27. api/batch_processing/integration/eMammal/test_scripts/push_annotations_to_emammal.py +0 -126
  28. api/batch_processing/integration/eMammal/test_scripts/select_images_for_testing.py +0 -55
  29. api/batch_processing/postprocessing/__init__.py +0 -0
  30. api/batch_processing/postprocessing/add_max_conf.py +0 -64
  31. api/batch_processing/postprocessing/categorize_detections_by_size.py +0 -163
  32. api/batch_processing/postprocessing/combine_api_outputs.py +0 -249
  33. api/batch_processing/postprocessing/compare_batch_results.py +0 -958
  34. api/batch_processing/postprocessing/convert_output_format.py +0 -397
  35. api/batch_processing/postprocessing/load_api_results.py +0 -195
  36. api/batch_processing/postprocessing/md_to_coco.py +0 -310
  37. api/batch_processing/postprocessing/md_to_labelme.py +0 -330
  38. api/batch_processing/postprocessing/merge_detections.py +0 -401
  39. api/batch_processing/postprocessing/postprocess_batch_results.py +0 -1904
  40. api/batch_processing/postprocessing/remap_detection_categories.py +0 -170
  41. api/batch_processing/postprocessing/render_detection_confusion_matrix.py +0 -661
  42. api/batch_processing/postprocessing/repeat_detection_elimination/find_repeat_detections.py +0 -211
  43. api/batch_processing/postprocessing/repeat_detection_elimination/remove_repeat_detections.py +0 -82
  44. api/batch_processing/postprocessing/repeat_detection_elimination/repeat_detections_core.py +0 -1631
  45. api/batch_processing/postprocessing/separate_detections_into_folders.py +0 -731
  46. api/batch_processing/postprocessing/subset_json_detector_output.py +0 -696
  47. api/batch_processing/postprocessing/top_folders_to_bottom.py +0 -223
  48. api/synchronous/__init__.py +0 -0
  49. api/synchronous/api_core/animal_detection_api/__init__.py +0 -0
  50. api/synchronous/api_core/animal_detection_api/api_backend.py +0 -152
  51. api/synchronous/api_core/animal_detection_api/api_frontend.py +0 -266
  52. api/synchronous/api_core/animal_detection_api/config.py +0 -35
  53. api/synchronous/api_core/animal_detection_api/data_management/annotations/annotation_constants.py +0 -47
  54. api/synchronous/api_core/animal_detection_api/detection/detector_training/copy_checkpoints.py +0 -43
  55. api/synchronous/api_core/animal_detection_api/detection/detector_training/model_main_tf2.py +0 -114
  56. api/synchronous/api_core/animal_detection_api/detection/process_video.py +0 -543
  57. api/synchronous/api_core/animal_detection_api/detection/pytorch_detector.py +0 -304
  58. api/synchronous/api_core/animal_detection_api/detection/run_detector.py +0 -627
  59. api/synchronous/api_core/animal_detection_api/detection/run_detector_batch.py +0 -1029
  60. api/synchronous/api_core/animal_detection_api/detection/run_inference_with_yolov5_val.py +0 -581
  61. api/synchronous/api_core/animal_detection_api/detection/run_tiled_inference.py +0 -754
  62. api/synchronous/api_core/animal_detection_api/detection/tf_detector.py +0 -165
  63. api/synchronous/api_core/animal_detection_api/detection/video_utils.py +0 -495
  64. api/synchronous/api_core/animal_detection_api/md_utils/azure_utils.py +0 -174
  65. api/synchronous/api_core/animal_detection_api/md_utils/ct_utils.py +0 -262
  66. api/synchronous/api_core/animal_detection_api/md_utils/directory_listing.py +0 -251
  67. api/synchronous/api_core/animal_detection_api/md_utils/matlab_porting_tools.py +0 -97
  68. api/synchronous/api_core/animal_detection_api/md_utils/path_utils.py +0 -416
  69. api/synchronous/api_core/animal_detection_api/md_utils/process_utils.py +0 -110
  70. api/synchronous/api_core/animal_detection_api/md_utils/sas_blob_utils.py +0 -509
  71. api/synchronous/api_core/animal_detection_api/md_utils/string_utils.py +0 -59
  72. api/synchronous/api_core/animal_detection_api/md_utils/url_utils.py +0 -144
  73. api/synchronous/api_core/animal_detection_api/md_utils/write_html_image_list.py +0 -226
  74. api/synchronous/api_core/animal_detection_api/md_visualization/visualization_utils.py +0 -841
  75. api/synchronous/api_core/tests/__init__.py +0 -0
  76. api/synchronous/api_core/tests/load_test.py +0 -110
  77. classification/__init__.py +0 -0
  78. classification/aggregate_classifier_probs.py +0 -108
  79. classification/analyze_failed_images.py +0 -227
  80. classification/cache_batchapi_outputs.py +0 -198
  81. classification/create_classification_dataset.py +0 -627
  82. classification/crop_detections.py +0 -516
  83. classification/csv_to_json.py +0 -226
  84. classification/detect_and_crop.py +0 -855
  85. classification/efficientnet/__init__.py +0 -9
  86. classification/efficientnet/model.py +0 -415
  87. classification/efficientnet/utils.py +0 -610
  88. classification/evaluate_model.py +0 -520
  89. classification/identify_mislabeled_candidates.py +0 -152
  90. classification/json_to_azcopy_list.py +0 -63
  91. classification/json_validator.py +0 -695
  92. classification/map_classification_categories.py +0 -276
  93. classification/merge_classification_detection_output.py +0 -506
  94. classification/prepare_classification_script.py +0 -194
  95. classification/prepare_classification_script_mc.py +0 -228
  96. classification/run_classifier.py +0 -286
  97. classification/save_mislabeled.py +0 -110
  98. classification/train_classifier.py +0 -825
  99. classification/train_classifier_tf.py +0 -724
  100. classification/train_utils.py +0 -322
  101. data_management/__init__.py +0 -0
  102. data_management/annotations/__init__.py +0 -0
  103. data_management/annotations/annotation_constants.py +0 -34
  104. data_management/camtrap_dp_to_coco.py +0 -238
  105. data_management/cct_json_utils.py +0 -395
  106. data_management/cct_to_md.py +0 -176
  107. data_management/cct_to_wi.py +0 -289
  108. data_management/coco_to_labelme.py +0 -272
  109. data_management/coco_to_yolo.py +0 -662
  110. data_management/databases/__init__.py +0 -0
  111. data_management/databases/add_width_and_height_to_db.py +0 -33
  112. data_management/databases/combine_coco_camera_traps_files.py +0 -206
  113. data_management/databases/integrity_check_json_db.py +0 -477
  114. data_management/databases/subset_json_db.py +0 -115
  115. data_management/generate_crops_from_cct.py +0 -149
  116. data_management/get_image_sizes.py +0 -188
  117. data_management/importers/add_nacti_sizes.py +0 -52
  118. data_management/importers/add_timestamps_to_icct.py +0 -79
  119. data_management/importers/animl_results_to_md_results.py +0 -158
  120. data_management/importers/auckland_doc_test_to_json.py +0 -372
  121. data_management/importers/auckland_doc_to_json.py +0 -200
  122. data_management/importers/awc_to_json.py +0 -189
  123. data_management/importers/bellevue_to_json.py +0 -273
  124. data_management/importers/cacophony-thermal-importer.py +0 -796
  125. data_management/importers/carrizo_shrubfree_2018.py +0 -268
  126. data_management/importers/carrizo_trail_cam_2017.py +0 -287
  127. data_management/importers/cct_field_adjustments.py +0 -57
  128. data_management/importers/channel_islands_to_cct.py +0 -913
  129. data_management/importers/eMammal/copy_and_unzip_emammal.py +0 -180
  130. data_management/importers/eMammal/eMammal_helpers.py +0 -249
  131. data_management/importers/eMammal/make_eMammal_json.py +0 -223
  132. data_management/importers/ena24_to_json.py +0 -275
  133. data_management/importers/filenames_to_json.py +0 -385
  134. data_management/importers/helena_to_cct.py +0 -282
  135. data_management/importers/idaho-camera-traps.py +0 -1407
  136. data_management/importers/idfg_iwildcam_lila_prep.py +0 -294
  137. data_management/importers/jb_csv_to_json.py +0 -150
  138. data_management/importers/mcgill_to_json.py +0 -250
  139. data_management/importers/missouri_to_json.py +0 -489
  140. data_management/importers/nacti_fieldname_adjustments.py +0 -79
  141. data_management/importers/noaa_seals_2019.py +0 -181
  142. data_management/importers/pc_to_json.py +0 -365
  143. data_management/importers/plot_wni_giraffes.py +0 -123
  144. data_management/importers/prepare-noaa-fish-data-for-lila.py +0 -359
  145. data_management/importers/prepare_zsl_imerit.py +0 -131
  146. data_management/importers/rspb_to_json.py +0 -356
  147. data_management/importers/save_the_elephants_survey_A.py +0 -320
  148. data_management/importers/save_the_elephants_survey_B.py +0 -332
  149. data_management/importers/snapshot_safari_importer.py +0 -758
  150. data_management/importers/snapshot_safari_importer_reprise.py +0 -665
  151. data_management/importers/snapshot_serengeti_lila.py +0 -1067
  152. data_management/importers/snapshotserengeti/make_full_SS_json.py +0 -150
  153. data_management/importers/snapshotserengeti/make_per_season_SS_json.py +0 -153
  154. data_management/importers/sulross_get_exif.py +0 -65
  155. data_management/importers/timelapse_csv_set_to_json.py +0 -490
  156. data_management/importers/ubc_to_json.py +0 -399
  157. data_management/importers/umn_to_json.py +0 -507
  158. data_management/importers/wellington_to_json.py +0 -263
  159. data_management/importers/wi_to_json.py +0 -441
  160. data_management/importers/zamba_results_to_md_results.py +0 -181
  161. data_management/labelme_to_coco.py +0 -548
  162. data_management/labelme_to_yolo.py +0 -272
  163. data_management/lila/__init__.py +0 -0
  164. data_management/lila/add_locations_to_island_camera_traps.py +0 -97
  165. data_management/lila/add_locations_to_nacti.py +0 -147
  166. data_management/lila/create_lila_blank_set.py +0 -557
  167. data_management/lila/create_lila_test_set.py +0 -151
  168. data_management/lila/create_links_to_md_results_files.py +0 -106
  169. data_management/lila/download_lila_subset.py +0 -177
  170. data_management/lila/generate_lila_per_image_labels.py +0 -515
  171. data_management/lila/get_lila_annotation_counts.py +0 -170
  172. data_management/lila/get_lila_image_counts.py +0 -111
  173. data_management/lila/lila_common.py +0 -300
  174. data_management/lila/test_lila_metadata_urls.py +0 -132
  175. data_management/ocr_tools.py +0 -874
  176. data_management/read_exif.py +0 -681
  177. data_management/remap_coco_categories.py +0 -84
  178. data_management/remove_exif.py +0 -66
  179. data_management/resize_coco_dataset.py +0 -189
  180. data_management/wi_download_csv_to_coco.py +0 -246
  181. data_management/yolo_output_to_md_output.py +0 -441
  182. data_management/yolo_to_coco.py +0 -676
  183. detection/__init__.py +0 -0
  184. detection/detector_training/__init__.py +0 -0
  185. detection/detector_training/model_main_tf2.py +0 -114
  186. detection/process_video.py +0 -703
  187. detection/pytorch_detector.py +0 -337
  188. detection/run_detector.py +0 -779
  189. detection/run_detector_batch.py +0 -1219
  190. detection/run_inference_with_yolov5_val.py +0 -917
  191. detection/run_tiled_inference.py +0 -935
  192. detection/tf_detector.py +0 -188
  193. detection/video_utils.py +0 -606
  194. docs/source/conf.py +0 -43
  195. md_utils/__init__.py +0 -0
  196. md_utils/azure_utils.py +0 -174
  197. md_utils/ct_utils.py +0 -612
  198. md_utils/directory_listing.py +0 -246
  199. md_utils/md_tests.py +0 -968
  200. md_utils/path_utils.py +0 -1044
  201. md_utils/process_utils.py +0 -157
  202. md_utils/sas_blob_utils.py +0 -509
  203. md_utils/split_locations_into_train_val.py +0 -228
  204. md_utils/string_utils.py +0 -92
  205. md_utils/url_utils.py +0 -323
  206. md_utils/write_html_image_list.py +0 -225
  207. md_visualization/__init__.py +0 -0
  208. md_visualization/plot_utils.py +0 -293
  209. md_visualization/render_images_with_thumbnails.py +0 -275
  210. md_visualization/visualization_utils.py +0 -1537
  211. md_visualization/visualize_db.py +0 -551
  212. md_visualization/visualize_detector_output.py +0 -406
  213. megadetector-5.0.10.dist-info/RECORD +0 -224
  214. megadetector-5.0.10.dist-info/top_level.txt +0 -8
  215. taxonomy_mapping/__init__.py +0 -0
  216. taxonomy_mapping/map_lila_taxonomy_to_wi_taxonomy.py +0 -491
  217. taxonomy_mapping/map_new_lila_datasets.py +0 -154
  218. taxonomy_mapping/prepare_lila_taxonomy_release.py +0 -142
  219. taxonomy_mapping/preview_lila_taxonomy.py +0 -591
  220. taxonomy_mapping/retrieve_sample_image.py +0 -71
  221. taxonomy_mapping/simple_image_download.py +0 -218
  222. taxonomy_mapping/species_lookup.py +0 -834
  223. taxonomy_mapping/taxonomy_csv_checker.py +0 -159
  224. taxonomy_mapping/taxonomy_graph.py +0 -346
  225. taxonomy_mapping/validate_lila_category_mappings.py +0 -83
  226. {megadetector-5.0.10.dist-info → megadetector-5.0.11.dist-info}/WHEEL +0 -0
@@ -1,515 +0,0 @@
1
- """
2
-
3
- generate_lila_per_image_labels.py
4
-
5
- Generate a .csv file with one row per annotation, containing full URLs to every
6
- camera trap image on LILA, with taxonomically expanded labels.
7
-
8
- Typically there will be one row per image, though images with multiple annotations
9
- will have multiple rows.
10
-
11
- Some images may not physically exist, particularly images that are labeled as "human".
12
- This script does not validate image URLs.
13
-
14
- Does not include bounding box annotations.
15
-
16
- """
17
-
18
- #%% Constants and imports
19
-
20
- import os
21
- import json
22
- import pandas as pd
23
- import numpy as np
24
- import dateparser
25
- import csv
26
-
27
- from collections import defaultdict
28
- from tqdm import tqdm
29
-
30
- from data_management.lila.lila_common import read_lila_metadata, \
31
- read_metadata_file_for_dataset, \
32
- read_lila_taxonomy_mapping
33
-
34
- from md_utils import write_html_image_list
35
- from md_utils.path_utils import zip_file
36
- from md_utils.path_utils import open_file
37
-
38
- # We'll write images, metadata downloads, and temporary files here
39
- lila_local_base = os.path.expanduser('~/lila')
40
- preview_folder = os.path.join(lila_local_base,'csv_preview')
41
-
42
- os.makedirs(lila_local_base,exist_ok=True)
43
-
44
- metadata_dir = os.path.join(lila_local_base,'metadata')
45
- os.makedirs(metadata_dir,exist_ok=True)
46
-
47
- output_file = os.path.join(lila_local_base,'lila_image_urls_and_labels.csv')
48
-
49
- # Some datasets don't have "sequence_level_annotation" fields populated, but we know their
50
- # annotation level
51
- ds_name_to_annotation_level = {}
52
- ds_name_to_annotation_level['Caltech Camera Traps'] = 'image'
53
- ds_name_to_annotation_level['ENA24'] = 'image'
54
- ds_name_to_annotation_level['Island Conservation Camera Traps'] = 'image'
55
- ds_name_to_annotation_level['Channel IslandsCamera Traps'] = 'image'
56
- ds_name_to_annotation_level['WCS Camera Traps'] = 'sequence'
57
- ds_name_to_annotation_level['Wellington Camera Traps'] = 'sequence'
58
- ds_name_to_annotation_level['NACTI'] = 'unknown'
59
-
60
- known_unmapped_labels = set(['WCS Camera Traps:#ref!'])
61
-
62
- debug_max_images_per_dataset = -1
63
- if debug_max_images_per_dataset > 0:
64
- print('Running in debug mode')
65
- output_file = output_file.replace('.csv','_debug.csv')
66
-
67
-
68
- #%% Download and parse the metadata file
69
-
70
- metadata_table = read_lila_metadata(metadata_dir)
71
-
72
- # To select an individual data set for debugging
73
- if False:
74
- k = 'Idaho Camera Traps'
75
- metadata_table = {k:metadata_table[k]}
76
-
77
-
78
- #%% Download and extract metadata for each dataset
79
-
80
- for ds_name in metadata_table.keys():
81
- metadata_table[ds_name]['metadata_filename'] = read_metadata_file_for_dataset(ds_name=ds_name,
82
- metadata_dir=metadata_dir,
83
- metadata_table=metadata_table)
84
-
85
- #%% Load taxonomy data
86
-
87
- taxonomy_df = read_lila_taxonomy_mapping(metadata_dir)
88
-
89
-
90
- #%% Build a dictionary that maps each [dataset,query] pair to the full taxonomic label set
91
-
92
- ds_label_to_taxonomy = {}
93
-
94
- # i_row = 0; row = taxonomy_df.iloc[i_row]
95
- for i_row,row in taxonomy_df.iterrows():
96
-
97
- ds_label = row['dataset_name'] + ':' + row['query']
98
- assert ds_label.strip() == ds_label
99
- assert ds_label not in ds_label_to_taxonomy
100
- ds_label_to_taxonomy[ds_label] = row.to_dict()
101
-
102
-
103
- #%% Process annotations for each dataset
104
-
105
- # Takes several hours
106
-
107
- # The order of these headers needs to match the order in which fields are added later in this cell;
108
- # don't mess with this order.
109
- header = ['dataset_name','url_gcp','url_aws','url_azure',
110
- 'image_id','sequence_id','location_id','frame_num',
111
- 'original_label','scientific_name','common_name','datetime','annotation_level']
112
-
113
- taxonomy_levels_to_include = \
114
- ['kingdom','phylum','subphylum','superclass','class','subclass','infraclass','superorder','order',
115
- 'suborder','infraorder','superfamily','family','subfamily','tribe','genus','species','subspecies',
116
- 'variety']
117
-
118
- header.extend(taxonomy_levels_to_include)
119
-
120
- missing_annotations = set()
121
-
122
- def clearnan(v):
123
- if isinstance(v,float):
124
- assert np.isnan(v)
125
- v = ''
126
- assert isinstance(v,str)
127
- return v
128
-
129
- with open(output_file,'w',encoding='utf-8',newline='') as f:
130
-
131
- csv_writer = csv.writer(f)
132
- csv_writer.writerow(header)
133
-
134
- # ds_name = list(metadata_table.keys())[0]
135
- for ds_name in metadata_table.keys():
136
-
137
- if 'bbox' in ds_name:
138
- print('Skipping bbox dataset {}'.format(ds_name))
139
- continue
140
-
141
- print('Processing dataset {}'.format(ds_name))
142
-
143
- json_filename = metadata_table[ds_name]['metadata_filename']
144
- with open(json_filename, 'r') as f:
145
- data = json.load(f)
146
-
147
- categories = data['categories']
148
- category_ids = [c['id'] for c in categories]
149
- for c in categories:
150
- category_id_to_name = {c['id']:c['name'] for c in categories}
151
-
152
- annotations = data['annotations']
153
- images = data['images']
154
-
155
- image_id_to_annotations = defaultdict(list)
156
-
157
- # Go through annotations, marking each image with the categories that are present
158
- #
159
- # ann = annotations[0]
160
- for ann in annotations:
161
- image_id_to_annotations[ann['image_id']].append(ann)
162
-
163
- unannotated_images = []
164
-
165
- found_date = False
166
- found_location = False
167
- found_annotation_level = False
168
-
169
- if ds_name in ds_name_to_annotation_level:
170
- expected_annotation_level = ds_name_to_annotation_level[ds_name]
171
- else:
172
- expected_annotation_level = None
173
-
174
- # im = images[10]
175
- for i_image,im in enumerate(images):
176
-
177
- if (debug_max_images_per_dataset is not None) and (debug_max_images_per_dataset > 0) \
178
- and (i_image >= debug_max_images_per_dataset):
179
- break
180
-
181
- file_name = im['file_name'].replace('\\','/')
182
- base_url_gcp = metadata_table[ds_name]['image_base_url_gcp']
183
- base_url_aws = metadata_table[ds_name]['image_base_url_aws']
184
- base_url_azure = metadata_table[ds_name]['image_base_url_azure']
185
- assert not base_url_gcp.endswith('/')
186
- assert not base_url_aws.endswith('/')
187
- assert not base_url_azure.endswith('/')
188
-
189
- url_gcp = base_url_gcp + '/' + file_name
190
- url_aws = base_url_aws + '/' + file_name
191
- url_azure = base_url_azure + '/' + file_name
192
-
193
- for k in im.keys():
194
- if ('date' in k or 'time' in k) and (k not in ['datetime','date_captured']):
195
- raise ValueError('Unrecognized datetime field')
196
-
197
- # This field name was only used for Caltech Camera Traps
198
- if 'date_captured' in im:
199
- assert ds_name == 'Caltech Camera Traps'
200
- im['datetime'] = im['date_captured']
201
-
202
- def has_valid_datetime(im):
203
- if 'datetime' not in im:
204
- return False
205
- v = im['datetime']
206
- if v is None:
207
- return False
208
- if isinstance(v,str):
209
- return len(v) > 0
210
- else:
211
- assert isinstance(v,float) and np.isnan(v)
212
- return False
213
-
214
- dt_string = ''
215
- if (has_valid_datetime(im)):
216
-
217
- dt = dateparser.parse(im['datetime'])
218
-
219
- if dt is None or dt.year < 1990 or dt.year > 2025:
220
-
221
- # raise ValueError('Suspicious date parsing result')
222
-
223
- # Special case we don't want to print a warning about... this is
224
- # in invalid date that very likely originates on the camera, not at
225
- # some intermediate processing step.
226
- #
227
- # print('Suspicious date for image {}: {} ({})'.format(
228
- # im['id'], im['datetime'], ds_name))
229
- pass
230
-
231
- else:
232
-
233
- found_date = True
234
- dt_string = dt.strftime("%m-%d-%Y %H:%M:%S")
235
-
236
- # Location, sequence, and image IDs are only guaranteed to be unique within
237
- # a dataset, so for the output .csv file, include both
238
- if 'location' in im:
239
- found_location = True
240
- location_id = ds_name + ' : ' + str(im['location'])
241
- else:
242
- location_id = ds_name
243
-
244
- image_id = ds_name + ' : ' + str(im['id'])
245
-
246
- if 'seq_id' in im:
247
- sequence_id = ds_name + ' : ' + str(im['seq_id'])
248
- else:
249
- sequence_id = ds_name + ' : ' + 'unknown'
250
-
251
- if 'frame_num' in im:
252
- frame_num = im['frame_num']
253
- else:
254
- frame_num = -1
255
-
256
- annotations_this_image = image_id_to_annotations[im['id']]
257
-
258
- categories_this_image = set()
259
-
260
- annotation_level = 'unknown'
261
-
262
- for ann in annotations_this_image:
263
- assert ann['image_id'] == im['id']
264
- categories_this_image.add(category_id_to_name[ann['category_id']])
265
- if 'sequence_level_annotation' in ann:
266
- found_annotation_level = True
267
- if ann['sequence_level_annotation']:
268
- annotation_level = 'sequence'
269
- else:
270
- annotation_level = 'image'
271
- if expected_annotation_level is not None:
272
- assert expected_annotation_level == annotation_level,\
273
- 'Unexpected annotation level'
274
- elif expected_annotation_level is not None:
275
- annotation_level = expected_annotation_level
276
-
277
- if len(categories_this_image) == 0:
278
- unannotated_images.append(im)
279
- continue
280
-
281
- # category_name = list(categories_this_image)[0]
282
- for category_name in categories_this_image:
283
-
284
- ds_label = ds_name + ':' + category_name.lower()
285
-
286
- if ds_label not in ds_label_to_taxonomy:
287
-
288
- assert ds_label in known_unmapped_labels
289
-
290
- # Only print a warning the first time we see an unmapped label
291
- if ds_label not in missing_annotations:
292
- print('Warning: {} not in taxonomy file'.format(ds_label))
293
- missing_annotations.add(ds_label)
294
- continue
295
-
296
- taxonomy_labels = ds_label_to_taxonomy[ds_label]
297
-
298
- """
299
- header =
300
- ['dataset_name','url','image_id','sequence_id','location_id',
301
- 'frame_num','original_label','scientific_name','common_name',
302
- 'datetime','annotation_level']
303
- """
304
-
305
- row = []
306
- row.append(ds_name)
307
- row.append(url_gcp)
308
- row.append(url_aws)
309
- row.append(url_azure)
310
- row.append(image_id)
311
- row.append(sequence_id)
312
- row.append(location_id)
313
- row.append(frame_num)
314
- row.append(taxonomy_labels['query'])
315
- row.append(clearnan(taxonomy_labels['scientific_name']))
316
- row.append(clearnan(taxonomy_labels['common_name']))
317
- row.append(dt_string)
318
- row.append(annotation_level)
319
-
320
- for s in taxonomy_levels_to_include:
321
- row.append(clearnan(taxonomy_labels[s]))
322
-
323
- assert len(row) == len(header)
324
-
325
- csv_writer.writerow(row)
326
-
327
- # ...for each category that was applied at least once to this image
328
-
329
- # ...for each image in this dataset
330
-
331
- if not found_date:
332
- pass
333
- # print('Warning: no date information available for this dataset')
334
-
335
- if not found_location:
336
- pass
337
- # print('Warning: no location information available for this dataset')
338
-
339
- if not found_annotation_level and (ds_name not in ds_name_to_annotation_level):
340
- print('Warning: no annotation level information available for this dataset')
341
-
342
- if len(unannotated_images) > 0:
343
- print('Warning: {} of {} images are un-annotated\n'.\
344
- format(len(unannotated_images),len(images)))
345
-
346
- # ...for each dataset
347
-
348
- # ...with open()
349
-
350
- print('Processed {} datasets'.format(len(metadata_table)))
351
-
352
-
353
- #%% Read the .csv back
354
-
355
- df = pd.read_csv(output_file)
356
- print('Read {} lines from {}'.format(len(df),output_file))
357
-
358
-
359
- #%% Do some post-hoc integrity checking
360
-
361
- # Takes ~10 minutes without using apply()
362
-
363
- tqdm.pandas()
364
-
365
- def isint(v):
366
- return isinstance(v,int) or isinstance(v,np.int64)
367
-
368
- valid_annotation_levels = set(['sequence','image','unknown'])
369
-
370
- # Collect a list of locations within each dataset; we'll use this
371
- # in the next cell to look for datasets that only have a single location
372
- dataset_name_to_locations = defaultdict(set)
373
-
374
- def check_row(row):
375
-
376
- assert row['dataset_name'] in metadata_table.keys()
377
- for url_column in ['url_gcp','url_aws','url_azure']:
378
- assert row[url_column].startswith('https://') or row[url_column].startswith('http://')
379
- assert ' : ' in row['image_id']
380
- assert 'seq' not in row['location_id'].lower()
381
- assert row['annotation_level'] in valid_annotation_levels
382
-
383
- # frame_num should either be NaN or an integer
384
- if isinstance(row['frame_num'],float):
385
- assert np.isnan(row['frame_num'])
386
- else:
387
- # -1 is sometimes used for sequences of unknown length
388
- assert isint(row['frame_num']) and row['frame_num'] >= -1
389
-
390
- ds_name = row['dataset_name']
391
- dataset_name_to_locations[ds_name].add(row['location_id'])
392
-
393
- # Faster, but more annoying to debug
394
- if False:
395
-
396
- df.progress_apply(check_row, axis=1)
397
-
398
- else:
399
-
400
- # i_row = 0; row = df.iloc[i_row]
401
- for i_row,row in tqdm(df.iterrows(),total=len(df)):
402
- check_row(row)
403
-
404
-
405
- #%% Check for datasets that have only one location string
406
-
407
- # Expected: ENA24, Missouri Camera Traps
408
-
409
- for ds_name in dataset_name_to_locations.keys():
410
- if len(dataset_name_to_locations[ds_name]) == 1:
411
- print('No location information for {}'.format(ds_name))
412
-
413
-
414
- #%% Preview constants
415
-
416
- n_empty_images_per_dataset = 3
417
- n_non_empty_images_per_dataset = 10
418
-
419
- os.makedirs(preview_folder,exist_ok=True)
420
-
421
-
422
- #%% Choose images to download
423
-
424
- np.random.seed(0)
425
- images_to_download = []
426
-
427
- # ds_name = list(metadata_table.keys())[2]
428
- for ds_name in metadata_table.keys():
429
-
430
- if 'bbox' in ds_name:
431
- continue
432
-
433
- # Find all rows for this dataset
434
- ds_rows = df.loc[df['dataset_name'] == ds_name]
435
-
436
- print('{} rows available for {}'.format(len(ds_rows),ds_name))
437
- assert len(ds_rows) > 0
438
-
439
- empty_rows = ds_rows[ds_rows['scientific_name'].isnull()]
440
- non_empty_rows = ds_rows[~ds_rows['scientific_name'].isnull()]
441
-
442
- if len(empty_rows) == 0:
443
- print('No empty images available for {}'.format(ds_name))
444
- elif len(empty_rows) > n_empty_images_per_dataset:
445
- empty_rows = empty_rows.sample(n=n_empty_images_per_dataset)
446
- images_to_download.extend(empty_rows.to_dict('records'))
447
-
448
- if len(non_empty_rows) == 0:
449
- print('No non-empty images available for {}'.format(ds_name))
450
- elif len(non_empty_rows) > n_non_empty_images_per_dataset:
451
- non_empty_rows = non_empty_rows.sample(n=n_non_empty_images_per_dataset)
452
- images_to_download.extend(non_empty_rows.to_dict('records'))
453
-
454
- # ...for each dataset
455
-
456
- print('Selected {} total images'.format(len(images_to_download)))
457
-
458
-
459
- #%% Download images (prep)
460
-
461
- # Expect a few errors for images with human or vehicle labels (or things like "ignore" that *could* be humans)
462
-
463
- preferred_cloud = 'aws'
464
-
465
- url_to_target_file = {}
466
-
467
- # i_image = 10; image = images_to_download[i_image]
468
- for i_image,image in tqdm(enumerate(images_to_download),total=len(images_to_download)):
469
-
470
- url = image['url_' + preferred_cloud]
471
- ext = os.path.splitext(url)[1]
472
- fn_relative = 'image_{}'.format(str(i_image).zfill(4)) + ext
473
- fn_abs = os.path.join(preview_folder,fn_relative)
474
- image['relative_file'] = fn_relative
475
- image['url'] = url
476
- url_to_target_file[url] = fn_abs
477
-
478
-
479
- #%% Download images (execution)
480
-
481
- from md_utils.url_utils import parallel_download_urls
482
- download_results = parallel_download_urls(url_to_target_file,verbose=False,overwrite=True,
483
- n_workers=20,pool_type='thread')
484
-
485
-
486
- #%% Write preview HTML
487
-
488
- html_filename = os.path.join(preview_folder,'index.html')
489
-
490
- html_images = []
491
-
492
- # im = images_to_download[0]
493
- for im in images_to_download:
494
-
495
- if im['relative_file'] is None:
496
- continue
497
-
498
- output_im = {}
499
- output_im['filename'] = im['relative_file']
500
- output_im['linkTarget'] = im['url']
501
- output_im['title'] = '<b>{}: {}</b><br/><br/>'.format(im['dataset_name'],im['original_label']) + str(im)
502
- output_im['imageStyle'] = 'width:600px;'
503
- output_im['textStyle'] = 'font-weight:normal;font-size:100%;'
504
- html_images.append(output_im)
505
-
506
- write_html_image_list.write_html_image_list(html_filename,html_images)
507
-
508
- open_file(html_filename)
509
-
510
-
511
- #%% Zip output file
512
-
513
- zipped_output_file = zip_file(output_file,verbose=True)
514
-
515
- print('Zipped {} to {}'.format(output_file,zipped_output_file))
@@ -1,170 +0,0 @@
1
- """
2
-
3
- get_lila_annotation_counts.py
4
-
5
- Generates a .json-formatted dictionary mapping each LILA dataset to all categories
6
- that exist for that dataset, with counts for the number of occurrences of each category
7
- (the number of *annotations* for each category, not the number of *images*).
8
-
9
- Also loads the taxonomy mapping file, to include scientific names for each category.
10
-
11
- get_lila_image_counts.py counts the number of *images* for each category in each dataset.
12
-
13
- """
14
-
15
- #%% Constants and imports
16
-
17
- import json
18
- import os
19
-
20
- from data_management.lila.lila_common import read_lila_metadata,\
21
- read_metadata_file_for_dataset, read_lila_taxonomy_mapping
22
-
23
- # cloud provider to use for downloading images; options are 'gcp', 'azure', or 'aws'
24
- preferred_cloud = 'gcp'
25
-
26
- # array to fill for output
27
- category_list = []
28
-
29
- # We'll write images, metadata downloads, and temporary files here
30
- lila_local_base = os.path.expanduser('~/lila')
31
-
32
- output_dir = os.path.join(lila_local_base,'lila_categories_list')
33
- os.makedirs(output_dir,exist_ok=True)
34
-
35
- metadata_dir = os.path.join(lila_local_base,'metadata')
36
- os.makedirs(metadata_dir,exist_ok=True)
37
-
38
- output_file = os.path.join(output_dir,'lila_dataset_to_categories.json')
39
-
40
-
41
- #%% Load category and taxonomy files
42
-
43
- taxonomy_df = read_lila_taxonomy_mapping(metadata_dir)
44
-
45
-
46
- #%% Map dataset names and category names to scientific names
47
-
48
- ds_query_to_scientific_name = {}
49
-
50
- unmapped_queries = set()
51
-
52
- datasets_with_taxonomy_mapping = set()
53
-
54
- # i_row = 1; row = taxonomy_df.iloc[i_row]; row
55
- for i_row,row in taxonomy_df.iterrows():
56
-
57
- datasets_with_taxonomy_mapping.add(row['dataset_name'])
58
-
59
- ds_query = row['dataset_name'] + ':' + row['query']
60
- ds_query = ds_query.lower()
61
-
62
- if not isinstance(row['scientific_name'],str):
63
- unmapped_queries.add(ds_query)
64
- ds_query_to_scientific_name[ds_query] = 'unmapped'
65
- continue
66
-
67
- ds_query_to_scientific_name[ds_query] = row['scientific_name']
68
-
69
- print('Loaded taxonomy mappings for {} datasets'.format(len(datasets_with_taxonomy_mapping)))
70
-
71
-
72
- #%% Download and parse the metadata file
73
-
74
- metadata_table = read_lila_metadata(metadata_dir)
75
-
76
- print('Loaded metadata URLs for {} datasets'.format(len(metadata_table)))
77
-
78
-
79
- #%% Download and extract metadata for each dataset
80
-
81
- for ds_name in metadata_table.keys():
82
- metadata_table[ds_name]['json_filename'] = read_metadata_file_for_dataset(ds_name=ds_name,
83
- metadata_dir=metadata_dir,
84
- metadata_table=metadata_table)
85
-
86
-
87
- #%% Get category names and counts for each dataset
88
-
89
- from collections import defaultdict
90
-
91
- dataset_to_categories = {}
92
-
93
- # ds_name = 'NACTI'
94
- for ds_name in metadata_table.keys():
95
-
96
- taxonomy_mapping_available = (ds_name in datasets_with_taxonomy_mapping)
97
-
98
- if not taxonomy_mapping_available:
99
- print('Warning: taxonomy mapping not available for {}'.format(ds_name))
100
-
101
- print('Finding categories in {}'.format(ds_name))
102
-
103
- json_filename = metadata_table[ds_name]['json_filename']
104
- base_url = metadata_table[ds_name]['image_base_url_' + preferred_cloud]
105
- assert not base_url.endswith('/')
106
-
107
- # Open the metadata file
108
- with open(json_filename, 'r') as f:
109
- data = json.load(f)
110
-
111
- # Collect list of categories and mappings to category name
112
- categories = data['categories']
113
-
114
- category_id_to_count = defaultdict(int)
115
- annotations = data['annotations']
116
-
117
- # ann = annotations[0]
118
- for ann in annotations:
119
- category_id_to_count[ann['category_id']] = category_id_to_count[ann['category_id']] + 1
120
-
121
- # c = categories[0]
122
- for c in categories:
123
- count = category_id_to_count[c['id']]
124
- if 'count' in c:
125
- assert 'bbox' in ds_name or c['count'] == count
126
- c['count'] = count
127
-
128
- # Don't do taxonomy mapping for bbox data sets, which are sometimes just binary and are
129
- # always redundant with the class-level data sets.
130
- if 'bbox' in ds_name:
131
- c['scientific_name_from_taxonomy_mapping'] = None
132
- elif not taxonomy_mapping_available:
133
- c['scientific_name_from_taxonomy_mapping'] = None
134
- else:
135
- taxonomy_query_string = ds_name.lower().strip() + ':' + c['name'].lower()
136
- if taxonomy_query_string not in ds_query_to_scientific_name:
137
- print('No match for query string {}'.format(taxonomy_query_string))
138
- # As of right now, this is the only quirky case
139
- assert '#ref!' in taxonomy_query_string and 'wcs' in ds_name.lower()
140
- c['scientific_name_from_taxonomy_mapping'] = None
141
- else:
142
- sn = ds_query_to_scientific_name[taxonomy_query_string]
143
- assert sn is not None and len(sn) > 0
144
- c['scientific_name_from_taxonomy_mapping'] = sn
145
-
146
- dataset_to_categories[ds_name] = categories
147
-
148
- # ...for each dataset
149
-
150
-
151
- #%% Print the results
152
-
153
- # ds_name = list(dataset_to_categories.keys())[0]
154
- for ds_name in dataset_to_categories:
155
-
156
- print('\n** Category counts for {} **\n'.format(ds_name))
157
-
158
- categories = dataset_to_categories[ds_name]
159
- categories = sorted(categories, key=lambda x: x['count'], reverse=True)
160
-
161
- for c in categories:
162
- print('{} ({}): {}'.format(c['name'],c['scientific_name_from_taxonomy_mapping'],c['count']))
163
-
164
- # ...for each dataset
165
-
166
-
167
- #%% Save the results
168
-
169
- with open(output_file, 'w') as f:
170
- json.dump(dataset_to_categories,f,indent=1)