megadetector 5.0.10__py3-none-any.whl → 5.0.11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of megadetector might be problematic. Click here for more details.

Files changed (226) hide show
  1. {megadetector-5.0.10.dist-info → megadetector-5.0.11.dist-info}/LICENSE +0 -0
  2. {megadetector-5.0.10.dist-info → megadetector-5.0.11.dist-info}/METADATA +12 -11
  3. megadetector-5.0.11.dist-info/RECORD +5 -0
  4. megadetector-5.0.11.dist-info/top_level.txt +1 -0
  5. api/__init__.py +0 -0
  6. api/batch_processing/__init__.py +0 -0
  7. api/batch_processing/api_core/__init__.py +0 -0
  8. api/batch_processing/api_core/batch_service/__init__.py +0 -0
  9. api/batch_processing/api_core/batch_service/score.py +0 -439
  10. api/batch_processing/api_core/server.py +0 -294
  11. api/batch_processing/api_core/server_api_config.py +0 -98
  12. api/batch_processing/api_core/server_app_config.py +0 -55
  13. api/batch_processing/api_core/server_batch_job_manager.py +0 -220
  14. api/batch_processing/api_core/server_job_status_table.py +0 -152
  15. api/batch_processing/api_core/server_orchestration.py +0 -360
  16. api/batch_processing/api_core/server_utils.py +0 -92
  17. api/batch_processing/api_core_support/__init__.py +0 -0
  18. api/batch_processing/api_core_support/aggregate_results_manually.py +0 -46
  19. api/batch_processing/api_support/__init__.py +0 -0
  20. api/batch_processing/api_support/summarize_daily_activity.py +0 -152
  21. api/batch_processing/data_preparation/__init__.py +0 -0
  22. api/batch_processing/data_preparation/manage_local_batch.py +0 -2391
  23. api/batch_processing/data_preparation/manage_video_batch.py +0 -327
  24. api/batch_processing/integration/digiKam/setup.py +0 -6
  25. api/batch_processing/integration/digiKam/xmp_integration.py +0 -465
  26. api/batch_processing/integration/eMammal/test_scripts/config_template.py +0 -5
  27. api/batch_processing/integration/eMammal/test_scripts/push_annotations_to_emammal.py +0 -126
  28. api/batch_processing/integration/eMammal/test_scripts/select_images_for_testing.py +0 -55
  29. api/batch_processing/postprocessing/__init__.py +0 -0
  30. api/batch_processing/postprocessing/add_max_conf.py +0 -64
  31. api/batch_processing/postprocessing/categorize_detections_by_size.py +0 -163
  32. api/batch_processing/postprocessing/combine_api_outputs.py +0 -249
  33. api/batch_processing/postprocessing/compare_batch_results.py +0 -958
  34. api/batch_processing/postprocessing/convert_output_format.py +0 -397
  35. api/batch_processing/postprocessing/load_api_results.py +0 -195
  36. api/batch_processing/postprocessing/md_to_coco.py +0 -310
  37. api/batch_processing/postprocessing/md_to_labelme.py +0 -330
  38. api/batch_processing/postprocessing/merge_detections.py +0 -401
  39. api/batch_processing/postprocessing/postprocess_batch_results.py +0 -1904
  40. api/batch_processing/postprocessing/remap_detection_categories.py +0 -170
  41. api/batch_processing/postprocessing/render_detection_confusion_matrix.py +0 -661
  42. api/batch_processing/postprocessing/repeat_detection_elimination/find_repeat_detections.py +0 -211
  43. api/batch_processing/postprocessing/repeat_detection_elimination/remove_repeat_detections.py +0 -82
  44. api/batch_processing/postprocessing/repeat_detection_elimination/repeat_detections_core.py +0 -1631
  45. api/batch_processing/postprocessing/separate_detections_into_folders.py +0 -731
  46. api/batch_processing/postprocessing/subset_json_detector_output.py +0 -696
  47. api/batch_processing/postprocessing/top_folders_to_bottom.py +0 -223
  48. api/synchronous/__init__.py +0 -0
  49. api/synchronous/api_core/animal_detection_api/__init__.py +0 -0
  50. api/synchronous/api_core/animal_detection_api/api_backend.py +0 -152
  51. api/synchronous/api_core/animal_detection_api/api_frontend.py +0 -266
  52. api/synchronous/api_core/animal_detection_api/config.py +0 -35
  53. api/synchronous/api_core/animal_detection_api/data_management/annotations/annotation_constants.py +0 -47
  54. api/synchronous/api_core/animal_detection_api/detection/detector_training/copy_checkpoints.py +0 -43
  55. api/synchronous/api_core/animal_detection_api/detection/detector_training/model_main_tf2.py +0 -114
  56. api/synchronous/api_core/animal_detection_api/detection/process_video.py +0 -543
  57. api/synchronous/api_core/animal_detection_api/detection/pytorch_detector.py +0 -304
  58. api/synchronous/api_core/animal_detection_api/detection/run_detector.py +0 -627
  59. api/synchronous/api_core/animal_detection_api/detection/run_detector_batch.py +0 -1029
  60. api/synchronous/api_core/animal_detection_api/detection/run_inference_with_yolov5_val.py +0 -581
  61. api/synchronous/api_core/animal_detection_api/detection/run_tiled_inference.py +0 -754
  62. api/synchronous/api_core/animal_detection_api/detection/tf_detector.py +0 -165
  63. api/synchronous/api_core/animal_detection_api/detection/video_utils.py +0 -495
  64. api/synchronous/api_core/animal_detection_api/md_utils/azure_utils.py +0 -174
  65. api/synchronous/api_core/animal_detection_api/md_utils/ct_utils.py +0 -262
  66. api/synchronous/api_core/animal_detection_api/md_utils/directory_listing.py +0 -251
  67. api/synchronous/api_core/animal_detection_api/md_utils/matlab_porting_tools.py +0 -97
  68. api/synchronous/api_core/animal_detection_api/md_utils/path_utils.py +0 -416
  69. api/synchronous/api_core/animal_detection_api/md_utils/process_utils.py +0 -110
  70. api/synchronous/api_core/animal_detection_api/md_utils/sas_blob_utils.py +0 -509
  71. api/synchronous/api_core/animal_detection_api/md_utils/string_utils.py +0 -59
  72. api/synchronous/api_core/animal_detection_api/md_utils/url_utils.py +0 -144
  73. api/synchronous/api_core/animal_detection_api/md_utils/write_html_image_list.py +0 -226
  74. api/synchronous/api_core/animal_detection_api/md_visualization/visualization_utils.py +0 -841
  75. api/synchronous/api_core/tests/__init__.py +0 -0
  76. api/synchronous/api_core/tests/load_test.py +0 -110
  77. classification/__init__.py +0 -0
  78. classification/aggregate_classifier_probs.py +0 -108
  79. classification/analyze_failed_images.py +0 -227
  80. classification/cache_batchapi_outputs.py +0 -198
  81. classification/create_classification_dataset.py +0 -627
  82. classification/crop_detections.py +0 -516
  83. classification/csv_to_json.py +0 -226
  84. classification/detect_and_crop.py +0 -855
  85. classification/efficientnet/__init__.py +0 -9
  86. classification/efficientnet/model.py +0 -415
  87. classification/efficientnet/utils.py +0 -610
  88. classification/evaluate_model.py +0 -520
  89. classification/identify_mislabeled_candidates.py +0 -152
  90. classification/json_to_azcopy_list.py +0 -63
  91. classification/json_validator.py +0 -695
  92. classification/map_classification_categories.py +0 -276
  93. classification/merge_classification_detection_output.py +0 -506
  94. classification/prepare_classification_script.py +0 -194
  95. classification/prepare_classification_script_mc.py +0 -228
  96. classification/run_classifier.py +0 -286
  97. classification/save_mislabeled.py +0 -110
  98. classification/train_classifier.py +0 -825
  99. classification/train_classifier_tf.py +0 -724
  100. classification/train_utils.py +0 -322
  101. data_management/__init__.py +0 -0
  102. data_management/annotations/__init__.py +0 -0
  103. data_management/annotations/annotation_constants.py +0 -34
  104. data_management/camtrap_dp_to_coco.py +0 -238
  105. data_management/cct_json_utils.py +0 -395
  106. data_management/cct_to_md.py +0 -176
  107. data_management/cct_to_wi.py +0 -289
  108. data_management/coco_to_labelme.py +0 -272
  109. data_management/coco_to_yolo.py +0 -662
  110. data_management/databases/__init__.py +0 -0
  111. data_management/databases/add_width_and_height_to_db.py +0 -33
  112. data_management/databases/combine_coco_camera_traps_files.py +0 -206
  113. data_management/databases/integrity_check_json_db.py +0 -477
  114. data_management/databases/subset_json_db.py +0 -115
  115. data_management/generate_crops_from_cct.py +0 -149
  116. data_management/get_image_sizes.py +0 -188
  117. data_management/importers/add_nacti_sizes.py +0 -52
  118. data_management/importers/add_timestamps_to_icct.py +0 -79
  119. data_management/importers/animl_results_to_md_results.py +0 -158
  120. data_management/importers/auckland_doc_test_to_json.py +0 -372
  121. data_management/importers/auckland_doc_to_json.py +0 -200
  122. data_management/importers/awc_to_json.py +0 -189
  123. data_management/importers/bellevue_to_json.py +0 -273
  124. data_management/importers/cacophony-thermal-importer.py +0 -796
  125. data_management/importers/carrizo_shrubfree_2018.py +0 -268
  126. data_management/importers/carrizo_trail_cam_2017.py +0 -287
  127. data_management/importers/cct_field_adjustments.py +0 -57
  128. data_management/importers/channel_islands_to_cct.py +0 -913
  129. data_management/importers/eMammal/copy_and_unzip_emammal.py +0 -180
  130. data_management/importers/eMammal/eMammal_helpers.py +0 -249
  131. data_management/importers/eMammal/make_eMammal_json.py +0 -223
  132. data_management/importers/ena24_to_json.py +0 -275
  133. data_management/importers/filenames_to_json.py +0 -385
  134. data_management/importers/helena_to_cct.py +0 -282
  135. data_management/importers/idaho-camera-traps.py +0 -1407
  136. data_management/importers/idfg_iwildcam_lila_prep.py +0 -294
  137. data_management/importers/jb_csv_to_json.py +0 -150
  138. data_management/importers/mcgill_to_json.py +0 -250
  139. data_management/importers/missouri_to_json.py +0 -489
  140. data_management/importers/nacti_fieldname_adjustments.py +0 -79
  141. data_management/importers/noaa_seals_2019.py +0 -181
  142. data_management/importers/pc_to_json.py +0 -365
  143. data_management/importers/plot_wni_giraffes.py +0 -123
  144. data_management/importers/prepare-noaa-fish-data-for-lila.py +0 -359
  145. data_management/importers/prepare_zsl_imerit.py +0 -131
  146. data_management/importers/rspb_to_json.py +0 -356
  147. data_management/importers/save_the_elephants_survey_A.py +0 -320
  148. data_management/importers/save_the_elephants_survey_B.py +0 -332
  149. data_management/importers/snapshot_safari_importer.py +0 -758
  150. data_management/importers/snapshot_safari_importer_reprise.py +0 -665
  151. data_management/importers/snapshot_serengeti_lila.py +0 -1067
  152. data_management/importers/snapshotserengeti/make_full_SS_json.py +0 -150
  153. data_management/importers/snapshotserengeti/make_per_season_SS_json.py +0 -153
  154. data_management/importers/sulross_get_exif.py +0 -65
  155. data_management/importers/timelapse_csv_set_to_json.py +0 -490
  156. data_management/importers/ubc_to_json.py +0 -399
  157. data_management/importers/umn_to_json.py +0 -507
  158. data_management/importers/wellington_to_json.py +0 -263
  159. data_management/importers/wi_to_json.py +0 -441
  160. data_management/importers/zamba_results_to_md_results.py +0 -181
  161. data_management/labelme_to_coco.py +0 -548
  162. data_management/labelme_to_yolo.py +0 -272
  163. data_management/lila/__init__.py +0 -0
  164. data_management/lila/add_locations_to_island_camera_traps.py +0 -97
  165. data_management/lila/add_locations_to_nacti.py +0 -147
  166. data_management/lila/create_lila_blank_set.py +0 -557
  167. data_management/lila/create_lila_test_set.py +0 -151
  168. data_management/lila/create_links_to_md_results_files.py +0 -106
  169. data_management/lila/download_lila_subset.py +0 -177
  170. data_management/lila/generate_lila_per_image_labels.py +0 -515
  171. data_management/lila/get_lila_annotation_counts.py +0 -170
  172. data_management/lila/get_lila_image_counts.py +0 -111
  173. data_management/lila/lila_common.py +0 -300
  174. data_management/lila/test_lila_metadata_urls.py +0 -132
  175. data_management/ocr_tools.py +0 -874
  176. data_management/read_exif.py +0 -681
  177. data_management/remap_coco_categories.py +0 -84
  178. data_management/remove_exif.py +0 -66
  179. data_management/resize_coco_dataset.py +0 -189
  180. data_management/wi_download_csv_to_coco.py +0 -246
  181. data_management/yolo_output_to_md_output.py +0 -441
  182. data_management/yolo_to_coco.py +0 -676
  183. detection/__init__.py +0 -0
  184. detection/detector_training/__init__.py +0 -0
  185. detection/detector_training/model_main_tf2.py +0 -114
  186. detection/process_video.py +0 -703
  187. detection/pytorch_detector.py +0 -337
  188. detection/run_detector.py +0 -779
  189. detection/run_detector_batch.py +0 -1219
  190. detection/run_inference_with_yolov5_val.py +0 -917
  191. detection/run_tiled_inference.py +0 -935
  192. detection/tf_detector.py +0 -188
  193. detection/video_utils.py +0 -606
  194. docs/source/conf.py +0 -43
  195. md_utils/__init__.py +0 -0
  196. md_utils/azure_utils.py +0 -174
  197. md_utils/ct_utils.py +0 -612
  198. md_utils/directory_listing.py +0 -246
  199. md_utils/md_tests.py +0 -968
  200. md_utils/path_utils.py +0 -1044
  201. md_utils/process_utils.py +0 -157
  202. md_utils/sas_blob_utils.py +0 -509
  203. md_utils/split_locations_into_train_val.py +0 -228
  204. md_utils/string_utils.py +0 -92
  205. md_utils/url_utils.py +0 -323
  206. md_utils/write_html_image_list.py +0 -225
  207. md_visualization/__init__.py +0 -0
  208. md_visualization/plot_utils.py +0 -293
  209. md_visualization/render_images_with_thumbnails.py +0 -275
  210. md_visualization/visualization_utils.py +0 -1537
  211. md_visualization/visualize_db.py +0 -551
  212. md_visualization/visualize_detector_output.py +0 -406
  213. megadetector-5.0.10.dist-info/RECORD +0 -224
  214. megadetector-5.0.10.dist-info/top_level.txt +0 -8
  215. taxonomy_mapping/__init__.py +0 -0
  216. taxonomy_mapping/map_lila_taxonomy_to_wi_taxonomy.py +0 -491
  217. taxonomy_mapping/map_new_lila_datasets.py +0 -154
  218. taxonomy_mapping/prepare_lila_taxonomy_release.py +0 -142
  219. taxonomy_mapping/preview_lila_taxonomy.py +0 -591
  220. taxonomy_mapping/retrieve_sample_image.py +0 -71
  221. taxonomy_mapping/simple_image_download.py +0 -218
  222. taxonomy_mapping/species_lookup.py +0 -834
  223. taxonomy_mapping/taxonomy_csv_checker.py +0 -159
  224. taxonomy_mapping/taxonomy_graph.py +0 -346
  225. taxonomy_mapping/validate_lila_category_mappings.py +0 -83
  226. {megadetector-5.0.10.dist-info → megadetector-5.0.11.dist-info}/WHEEL +0 -0
@@ -1,491 +0,0 @@
1
- """
2
-
3
- map_lila_taxonomy_to_wi_taxonomy.py
4
-
5
- Loads the LILA category mapping (in which taxonomy information comes from an
6
- iNat taxonomy snapshot) and tries to map each class to the Wildlife Insights taxonomy.
7
-
8
- """
9
-
10
- #%% Constants and imports
11
-
12
- import numpy as np
13
- import json
14
- import os
15
-
16
- from tqdm import tqdm
17
-
18
- from data_management.lila.lila_common import read_lila_taxonomy_mapping, \
19
- read_wildlife_insights_taxonomy_mapping
20
-
21
-
22
- #%% Prevent execution during infrastructural imports
23
-
24
- if False:
25
-
26
- #%%
27
-
28
- lila_local_base = os.path.expanduser('~/lila')
29
-
30
- metadata_dir = os.path.join(lila_local_base, 'metadata')
31
- os.makedirs(metadata_dir, exist_ok=True)
32
-
33
- # Created by get_lila_category_list.py... contains counts for each category
34
- category_list_dir = os.path.join(lila_local_base, 'lila_categories_list')
35
- lila_dataset_to_categories_file = os.path.join(
36
- category_list_dir, 'lila_dataset_to_categories.json')
37
-
38
- # This is a manually-curated file used to store mappings that had to be made manually
39
- lila_to_wi_supplementary_mapping_file = os.path.expanduser(
40
- '~/git/MegaDetector/taxonomy_mapping/lila_to_wi_supplementary_mapping_file.csv')
41
-
42
- assert os.path.isfile(lila_dataset_to_categories_file)
43
-
44
- # This is the main output file from this whole process
45
- wi_mapping_table_file = os.path.join(lila_local_base,'lila_wi_mapping_table.csv')
46
-
47
- id_column = 'uniqueIdentifier' # 'id'
48
-
49
-
50
- #%% Load category and taxonomy files
51
-
52
- with open(lila_dataset_to_categories_file, 'r') as f:
53
- lila_dataset_to_categories = json.load(f)
54
-
55
- lila_taxonomy_df = read_lila_taxonomy_mapping(metadata_dir)
56
-
57
- wi_taxonomy_df = read_wildlife_insights_taxonomy_mapping(metadata_dir)
58
-
59
-
60
- #%% Pull everything out of pandas
61
-
62
- lila_taxonomy = lila_taxonomy_df.to_dict('records')
63
- wi_taxonomy = wi_taxonomy_df.to_dict('records')
64
-
65
-
66
- #%% Cache WI taxonomy lookups
67
-
68
- def is_empty_wi_item(v):
69
- if isinstance(v, str):
70
- return len(v) == 0
71
- elif v is None:
72
- return True
73
- else:
74
- assert isinstance(v, float) and np.isnan(v), 'Invalid item: {}'.format(str(v))
75
- return True
76
-
77
-
78
- def taxonomy_items_equal(a, b):
79
- if isinstance(a, str) and (not isinstance(b, str)):
80
- return False
81
- if isinstance(b, str) and (not isinstance(a, str)):
82
- return False
83
- if (not isinstance(a, str)) or (not isinstance(b, str)):
84
- assert (a is None and b is None) or (isinstance(a, float) and isinstance(b, float))
85
- return True
86
- return a == b
87
-
88
-
89
- for taxon in wi_taxonomy:
90
- taxon['taxon_name'] = None
91
-
92
- from collections import defaultdict
93
- wi_taxon_name_to_taxa = defaultdict(list)
94
-
95
- # This is just a handy lookup table that we'll use to debug mismatches
96
- wi_common_name_to_taxon = {}
97
-
98
- blank_taxon_name = 'blank'
99
- blank_taxon = None
100
-
101
- animal_taxon_name = 'animal'
102
- animal_taxon = None
103
-
104
- unknown_taxon_name = 'unknown'
105
- unknown_taxon = None
106
-
107
- ignore_taxa = set(['No CV Result', 'CV Needed', 'CV Failed'])
108
-
109
- known_problematic_taxon_ids = ['f94e6d97-59cf-4d38-a05a-a75efdd2863b']
110
-
111
- human_taxa = []
112
-
113
- # taxon = wi_taxonomy[21653]; print(taxon)
114
- for taxon in tqdm(wi_taxonomy):
115
-
116
- taxon_name = None
117
-
118
- assert taxon['taxonomyType'] == 'object' or taxon['taxonomyType'] == 'biological'
119
-
120
- for k in taxon.keys():
121
- v = taxon[k]
122
- if isinstance(v,str):
123
- taxon[k] = v.strip()
124
-
125
- if taxon['commonNameEnglish'] in ignore_taxa:
126
- continue
127
-
128
- if isinstance(taxon['commonNameEnglish'], str):
129
-
130
- wi_common_name_to_taxon[taxon['commonNameEnglish'].strip(
131
- ).lower()] = taxon
132
-
133
- special_taxon = False
134
-
135
- # Look for keywords that don't refer to specific taxa: blank/animal/unknown
136
- if taxon['commonNameEnglish'].strip().lower() == blank_taxon_name:
137
- blank_taxon = taxon
138
- special_taxon = True
139
-
140
- elif taxon['commonNameEnglish'].strip().lower() == animal_taxon_name:
141
- animal_taxon = taxon
142
- special_taxon = True
143
-
144
- elif taxon['commonNameEnglish'].strip().lower() == unknown_taxon_name:
145
- unknown_taxon = taxon
146
- special_taxon = True
147
-
148
- if special_taxon:
149
- taxon_name = taxon['commonNameEnglish'].strip().lower()
150
- taxon['taxon_name'] = taxon_name
151
- wi_taxon_name_to_taxa[taxon_name].append(taxon)
152
- continue
153
-
154
- # Do we have a species name?
155
- if not is_empty_wi_item(taxon['species']):
156
-
157
- # If 'species' is populated, 'genus' should always be populated; one item currently breaks
158
- # this rule.
159
- assert not is_empty_wi_item(taxon['genus'])
160
-
161
- taxon_name = (taxon['genus'].strip() + ' ' +
162
- taxon['species'].strip()).strip().lower()
163
- assert not is_empty_wi_item(taxon['class']) and \
164
- not is_empty_wi_item(taxon['order']) and \
165
- not is_empty_wi_item(taxon['family'])
166
-
167
- elif not is_empty_wi_item(taxon['genus']):
168
-
169
- assert not is_empty_wi_item(taxon['class']) and \
170
- not is_empty_wi_item(taxon['order']) and \
171
- not is_empty_wi_item(taxon['family'])
172
- taxon_name = taxon['genus'].strip().lower()
173
-
174
- elif not is_empty_wi_item(taxon['family']):
175
-
176
- assert not is_empty_wi_item(taxon['class']) and \
177
- not is_empty_wi_item(taxon['order'])
178
- taxon_name = taxon['family'].strip().lower()
179
-
180
- elif not is_empty_wi_item(taxon['order']):
181
-
182
- assert not is_empty_wi_item(taxon['class'])
183
- taxon_name = taxon['order'].strip().lower()
184
-
185
- elif not is_empty_wi_item(taxon['class']):
186
-
187
- taxon_name = taxon['class'].strip().lower()
188
-
189
- if taxon_name is not None:
190
- assert taxon['taxonomyType'] == 'biological'
191
- else:
192
- assert taxon['taxonomyType'] == 'object'
193
- taxon_name = taxon['commonNameEnglish'].strip().lower()
194
-
195
- if taxon_name in wi_taxon_name_to_taxa:
196
- if taxon[id_column] in known_problematic_taxon_ids:
197
- print('Skipping problematic taxon ID {}'.format(taxon[id_column]))
198
- else:
199
- previous_taxa = wi_taxon_name_to_taxa[taxon_name]
200
- for previous_taxon in previous_taxa:
201
- for level in ['class', 'order', 'family', 'genus', 'species']:
202
- error_string = 'Error: taxon {} appeared previously in {} {} (as {}), now in {} {}'.format(
203
- taxon_name,
204
- level,previous_taxon[level],
205
- previous_taxon['taxon_name'],
206
- level,taxon[level])
207
- assert taxonomy_items_equal(previous_taxon[level], taxon[level]), error_string
208
-
209
- taxon['taxon_name'] = taxon_name
210
- if taxon_name == 'homo sapiens':
211
- human_taxa.append(taxon)
212
- wi_taxon_name_to_taxa[taxon_name].append(taxon)
213
-
214
- # ...for each taxon
215
-
216
- assert unknown_taxon is not None
217
- assert animal_taxon is not None
218
- assert blank_taxon is not None
219
-
220
-
221
- #%% Find redundant taxa
222
-
223
- taxon_names_with_multiple_entries = []
224
- for wi_taxon_name in wi_taxon_name_to_taxa:
225
- if len(wi_taxon_name_to_taxa[wi_taxon_name]) > 1:
226
- taxon_names_with_multiple_entries.append(wi_taxon_name)
227
-
228
- print('{} names have multiple entries\n:'.format(len(taxon_names_with_multiple_entries)))
229
-
230
- for s in taxon_names_with_multiple_entries:
231
- print(s)
232
-
233
- if False:
234
- pass
235
-
236
- #%% Manual review of redundant taxa
237
-
238
- s = taxon_names_with_multiple_entries[15]
239
- taxa = wi_taxon_name_to_taxa[s]
240
- for t in taxa:
241
- for k in t.keys():
242
- print('{}: {}'.format(k,t[k]))
243
- print()
244
- # print(t,end='\n\n')
245
-
246
-
247
- #%% Clean up redundant taxa
248
-
249
- taxon_name_to_preferred_taxon_id = {}
250
-
251
- # "helmeted guineafowl" vs "domestic guineafowl"
252
- taxon_name_to_preferred_taxon_id['numida meleagris'] = '83133617-8358-4910-82ee-4c23e40ba3dc' # 2005826
253
-
254
- # "domestic turkey" vs. "wild turkey"
255
- taxon_name_to_preferred_taxon_id['meleagris gallopavo'] = 'c10547c3-1748-48bf-a451-8066c820f22f' # 2021598
256
-
257
- # multiple sensible human entries
258
- taxon_name_to_preferred_taxon_id['homo sapiens'] = '990ae9dd-7a59-4344-afcb-1b7b21368000' # 2002045
259
-
260
- # "domestic dog" and "dog-on-leash"
261
- taxon_name_to_preferred_taxon_id['canis familiaris'] = '3d80f1d6-b1df-4966-9ff4-94053c7a902a' # 2021548
262
-
263
- # "small mammal" vs. "mammal"
264
- taxon_name_to_preferred_taxon_id['mammalia'] = 'f2d233e3-80e3-433d-9687-e29ecc7a467a' # 2021108
265
-
266
- # "Hispaniolan Mango" vs. NaN
267
- taxon_name_to_preferred_taxon_id['anthracothorax dominicus'] = 'f94e6d97-59cf-4d38-a05a-a75efdd2863b'
268
-
269
- # "millipedes" vs. "Millipede"
270
- taxon_name_to_preferred_taxon_id['diplopoda'] = '065884eb-4e64-4233-84dc-de25bd06ffd2' # 2021760
271
-
272
- # Different suborders: Squamata vs. Lacertilia
273
- taxon_name_to_preferred_taxon_id['squamata'] = '710c4066-bd5d-4313-bcf4-0217c4c84da7' # 2021703
274
-
275
- # Redundancy (both "beautiful firetail")
276
- taxon_name_to_preferred_taxon_id['stagonopleura bella'] = '7fec8e7e-fd3b-4d7f-99fd-3ade6f3bbaa5' # 2021939
277
-
278
- # "yellow wagtail" vs. "yellow crowned-wagtail"
279
- taxon_name_to_preferred_taxon_id['motacilla flava'] = 'ac6669bc-9f9e-4473-b609-b9082f9bf50c' # 2016194
280
-
281
- # "dremomys species" vs. "dremomys genus"
282
- taxon_name_to_preferred_taxon_id['dremomys'] = '1507d153-af11-46f1-bfb8-77918d035ab3' # 2019370
283
-
284
- # "elk" vs. "domestic elk"
285
- taxon_name_to_preferred_taxon_id['cervus canadensis'] = 'c5ce946f-8f0d-4379-992b-cc0982381f5e'
286
-
287
- # "American bison" vs. "domestic bison"
288
- taxon_name_to_preferred_taxon_id['bison bison'] = '539ebd55-081b-429a-9ae6-5a6a0f6999d4' # 2021593
289
-
290
- # "woodrat or rat or mouse species" vs. "mouse species"
291
- taxon_name_to_preferred_taxon_id['muridae'] = 'e7503287-468c-45af-a1bd-a17821bb62f2' # 2021642
292
-
293
- # both "southern sand frog"
294
- taxon_name_to_preferred_taxon_id['tomopterna adiastola'] = 'a5dc63cb-41be-4090-84a7-b944b16dcee4' # 2021834
295
-
296
- # sericornis species vs. scrubwren species
297
- taxon_name_to_preferred_taxon_id['sericornis'] = 'ad82c0ac-df48-4028-bf71-d2b2f4bc4129' # 2021776
298
-
299
-
300
- # taxon_name = list(taxon_name_to_preferred_taxon_id.keys())[0]
301
- for taxon_name in taxon_name_to_preferred_taxon_id.keys():
302
-
303
- candidate_taxa = wi_taxon_name_to_taxa[taxon_name]
304
-
305
- # If we've gotten this far, we should be choosing from multiple taxa.
306
- #
307
- # This will become untrue if any of these are resolved later, at which point we should
308
- # remove them from taxon_name_to_preferred_id
309
- assert len(candidate_taxa) > 1, 'Only one taxon available for {}'.format(taxon_name)
310
-
311
- # Choose the preferred taxa
312
- selected_taxa = [t for t in candidate_taxa if t[id_column] == \
313
- taxon_name_to_preferred_taxon_id[taxon_name]]
314
- assert len(selected_taxa) == 1
315
- wi_taxon_name_to_taxa[taxon_name] = selected_taxa
316
-
317
- wi_taxon_name_to_taxon = {}
318
-
319
- for taxon_name in wi_taxon_name_to_taxa.keys():
320
- taxa = wi_taxon_name_to_taxa[taxon_name]
321
- assert len(taxa) == 1
322
- wi_taxon_name_to_taxon[taxon_name] = taxa[0]
323
-
324
-
325
- #%% Read supplementary mappings
326
-
327
- with open(lila_to_wi_supplementary_mapping_file, 'r') as f:
328
- lines = f.readlines()
329
-
330
- supplementary_lila_query_to_wi_query = {}
331
-
332
- for line in lines:
333
- # Each line is [lila query],[WI taxon name],[notes]
334
- tokens = line.strip().split(',')
335
- assert len(tokens) == 3
336
- lila_query = tokens[0].strip().lower()
337
- wi_taxon_name = tokens[1].strip().lower()
338
- assert wi_taxon_name in wi_taxon_name_to_taxa
339
- supplementary_lila_query_to_wi_query[lila_query] = wi_taxon_name
340
-
341
-
342
- #%% Map LILA categories to WI categories
343
-
344
- mismatches = set()
345
- mismatches_with_common_mappings = set()
346
- supplementary_mappings = set()
347
-
348
- all_searches = set()
349
-
350
- # Must be ordered from kingdom --> species
351
- lila_taxonomy_levels = ['kingdom', 'phylum', 'subphylum', 'superclass', 'class', 'subclass',
352
- 'infraclass', 'superorder', 'order', 'suborder', 'infraorder',
353
- 'superfamily', 'family', 'subfamily', 'tribe', 'genus', 'species']
354
-
355
- unknown_queries = set(
356
- ['unidentifiable', 'other', 'unidentified', 'unknown', 'unclassifiable'])
357
- blank_queries = set(['empty'])
358
- animal_queries = set(['animalia'])
359
-
360
- lila_dataset_category_to_wi_taxon = {}
361
-
362
- # i_taxon = 0; taxon = lila_taxonomy[i_taxon]; print(taxon)
363
- for i_taxon, lila_taxon in enumerate(lila_taxonomy):
364
-
365
- query = None
366
-
367
- lila_dataset_category = lila_taxon['dataset_name'] + ':' + lila_taxon['query']
368
-
369
- # Go from kingdom --> species, choosing the lowest-level description as the query
370
- for level in lila_taxonomy_levels:
371
- if isinstance(lila_taxon[level], str):
372
- query = lila_taxon[level]
373
- all_searches.add(query)
374
-
375
- if query is None:
376
- # E.g., 'car'
377
- query = lila_taxon['query']
378
-
379
- wi_taxon = None
380
-
381
- if query in unknown_queries:
382
-
383
- wi_taxon = unknown_taxon
384
-
385
- elif query in blank_queries:
386
-
387
- wi_taxon = blank_taxon
388
-
389
- elif query in animal_queries:
390
-
391
- wi_taxon = animal_taxon
392
-
393
- elif query in wi_taxon_name_to_taxon:
394
-
395
- wi_taxon = wi_taxon_name_to_taxon[query]
396
-
397
- elif query in supplementary_lila_query_to_wi_query:
398
-
399
- wi_taxon = wi_taxon_name_to_taxon[supplementary_lila_query_to_wi_query[query]]
400
- supplementary_mappings.add(query)
401
- # print('Made a supplementary mapping from {} to {}'.format(query,wi_taxon['taxon_name']))
402
-
403
- else:
404
-
405
- # print('No match for {}'.format(query))
406
- lila_common_name = lila_taxon['common_name']
407
-
408
- if lila_common_name in wi_common_name_to_taxon:
409
- wi_taxon = wi_common_name_to_taxon[lila_common_name]
410
- wi_common_name = wi_taxon['commonNameEnglish']
411
- wi_taxon_name = wi_taxon['taxon_name']
412
- if False:
413
- print('LILA common name {} maps to WI taxon {} ({})'.format(lila_common_name,
414
- wi_taxon_name,
415
- wi_common_name))
416
- mismatches_with_common_mappings.add(query)
417
-
418
- else:
419
-
420
- mismatches.add(query)
421
-
422
- lila_dataset_category_to_wi_taxon[lila_dataset_category] = wi_taxon
423
-
424
- # ...for each LILA taxon
425
-
426
- print('Of {} entities, there are {} mismatches ({} mapped by common name) ({} mapped by supplementary mapping file)'.format(
427
- len(all_searches), len(mismatches), len(mismatches_with_common_mappings), len(supplementary_mappings)))
428
-
429
- assert len(mismatches) == 0
430
-
431
-
432
- #%% Manual mapping
433
-
434
- if not os.path.isfile(lila_to_wi_supplementary_mapping_file):
435
- print('Creating mapping file {}'.format(
436
- lila_to_wi_supplementary_mapping_file))
437
- with open(lila_to_wi_supplementary_mapping_file, 'w') as f:
438
- for query in mismatches:
439
- f.write(query + ',' + '\n')
440
- else:
441
- print('{} exists, not re-writing'.format(lila_to_wi_supplementary_mapping_file))
442
-
443
-
444
- #%% Build a dictionary from LILA dataset names and categories to LILA taxa
445
-
446
- lila_dataset_category_to_lila_taxon = {}
447
-
448
- # i_d = 0; d = lila_taxonomy[i_d]
449
- for i_d,d in enumerate(lila_taxonomy):
450
- lila_dataset_category = d['dataset_name'] + ':' + d['query']
451
- assert lila_dataset_category not in lila_dataset_category_to_lila_taxon
452
- lila_dataset_category_to_lila_taxon[lila_dataset_category] = d
453
-
454
-
455
- #%% Map LILA datasets to WI taxa, and count the number of each taxon available in each dataset
456
-
457
- with open(wi_mapping_table_file,'w') as f:
458
-
459
- f.write('lila_dataset_name,lila_category_name,wi_guid,wi_taxon_name,wi_common,count\n')
460
-
461
- # dataset_name = list(lila_dataset_to_categories.keys())[0]
462
- for dataset_name in lila_dataset_to_categories.keys():
463
-
464
- if '_bbox' in dataset_name:
465
- continue
466
-
467
- dataset_categories = lila_dataset_to_categories[dataset_name]
468
-
469
- # dataset_category = dataset_categories[0]
470
- for category in dataset_categories:
471
-
472
- lila_dataset_category = dataset_name + ':' + category['name'].strip().lower()
473
- if '#' in lila_dataset_category:
474
- continue
475
- assert lila_dataset_category in lila_dataset_category_to_lila_taxon
476
- assert lila_dataset_category in lila_dataset_category_to_wi_taxon
477
- assert 'count' in category
478
-
479
- wi_taxon = lila_dataset_category_to_wi_taxon[lila_dataset_category]
480
-
481
- # Write out the dataset name, category name, WI GUID, WI scientific name, WI common name,
482
- # and count
483
- s = f"{dataset_name},{category['name']},{wi_taxon['uniqueIdentifier']},"+\
484
- f"{wi_taxon['taxon_name']},{wi_taxon['commonNameEnglish']},{category['count']}\n"
485
- f.write(s)
486
-
487
- # ...for each category in this dataset
488
-
489
- # ...for each dataset
490
-
491
- # ...with open()
@@ -1,154 +0,0 @@
1
- """
2
-
3
- map_new_lila_datasets.py
4
-
5
- Given a subset of LILA datasets, find all the categories, and start the taxonomy
6
- mapping process.
7
-
8
- """
9
-
10
- #%% Constants and imports
11
-
12
- import os
13
- import json
14
-
15
- # Created by get_lila_category_list.py
16
- input_lila_category_list_file = os.path.expanduser('~/lila/lila_categories_list/lila_dataset_to_categories.json')
17
-
18
- output_file = os.path.expanduser('~/lila/lila_additions_2023.12.29.csv')
19
-
20
- datasets_to_map = [
21
- 'Trail Camera Images of New Zealand Animals'
22
- ]
23
-
24
-
25
- #%% Initialize taxonomic lookup
26
-
27
- from taxonomy_mapping.species_lookup import (
28
- initialize_taxonomy_lookup,
29
- get_preferred_taxonomic_match)
30
-
31
- # from taxonomy_mapping.species_lookup import (
32
- # get_taxonomic_info, print_taxonomy_matche)
33
-
34
- initialize_taxonomy_lookup(force_init=False)
35
-
36
-
37
- #%% Read the list of datasets
38
-
39
- with open(input_lila_category_list_file,'r') as f:
40
- input_lila_categories = json.load(f)
41
-
42
- lila_datasets = set()
43
-
44
- for dataset_name in input_lila_categories.keys():
45
- # The script that generates this dictionary creates a separate entry for bounding box
46
- # metadata files, but those don't represent new dataset names
47
- lila_datasets.add(dataset_name.replace('_bbox',''))
48
-
49
- for s in datasets_to_map:
50
- assert s in lila_datasets
51
-
52
-
53
- #%% Find all categories
54
-
55
- category_mappings = []
56
-
57
- # dataset_name = datasets_to_map[0]
58
- for dataset_name in datasets_to_map:
59
-
60
- ds_categories = input_lila_categories[dataset_name]
61
- for category in ds_categories:
62
- category_name = category['name']
63
- assert ':' not in category_name
64
- mapping_name = dataset_name + ':' + category_name
65
- category_mappings.append(mapping_name)
66
-
67
- print('Need to create {} mappings'.format(len(category_mappings)))
68
-
69
-
70
- #%% Match every query against our taxonomies
71
-
72
- output_rows = []
73
-
74
- taxonomy_preference = 'inat'
75
-
76
- allow_non_preferred_matches = True
77
-
78
- # mapping_string = category_mappings[1]; print(mapping_string)
79
- for mapping_string in category_mappings:
80
-
81
- tokens = mapping_string.split(':')
82
- assert len(tokens) == 2
83
-
84
- dataset_name = tokens[0]
85
- query = tokens[1]
86
-
87
- taxonomic_match = get_preferred_taxonomic_match(query,taxonomy_preference=taxonomy_preference)
88
-
89
- if (taxonomic_match.source == taxonomy_preference) or allow_non_preferred_matches:
90
-
91
- output_row = {
92
- 'dataset_name': dataset_name,
93
- 'query': query,
94
- 'source': taxonomic_match.source,
95
- 'taxonomy_level': taxonomic_match.taxonomic_level,
96
- 'scientific_name': taxonomic_match.scientific_name,
97
- 'common_name': taxonomic_match.common_name,
98
- 'taxonomy_string': taxonomic_match.taxonomy_string
99
- }
100
-
101
- else:
102
-
103
- output_row = {
104
- 'dataset_name': dataset_name,
105
- 'query': query,
106
- 'source': '',
107
- 'taxonomy_level': '',
108
- 'scientific_name': '',
109
- 'common_name': '',
110
- 'taxonomy_string': ''
111
- }
112
-
113
- output_rows.append(output_row)
114
-
115
- # ...for each mapping
116
-
117
-
118
- #%% Write output rows
119
-
120
- import os
121
- import pandas as pd
122
-
123
- assert not os.path.isfile(output_file), 'Delete the output file before re-generating'
124
-
125
- output_df = pd.DataFrame(data=output_rows, columns=[
126
- 'dataset_name', 'query', 'source', 'taxonomy_level',
127
- 'scientific_name', 'common_name', 'taxonomy_string'])
128
- output_df.to_csv(output_file, index=None, header=True)
129
-
130
-
131
- #%% Manual lookup
132
-
133
- if False:
134
-
135
- #%%
136
-
137
- # q = 'white-throated monkey'
138
- # q = 'cingulata'
139
- # q = 'notamacropus'
140
- q = 'porzana'
141
- taxonomy_preference = 'inat'
142
- m = get_preferred_taxonomic_match(q,taxonomy_preference)
143
- # print(m.scientific_name); import clipboard; clipboard.copy(m.scientific_name)
144
-
145
- if m is None:
146
- print('No match')
147
- else:
148
- if m.source != taxonomy_preference:
149
- print('\n*** non-preferred match ***\n')
150
- # raise ValueError('')
151
- print(m.source)
152
- print(m.taxonomy_string)
153
- # print(m.scientific_name); import clipboard; clipboard.copy(m.scientific_name)
154
- import clipboard; clipboard.copy(m.taxonomy_string)