megadetector 5.0.8__py3-none-any.whl → 5.0.10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of megadetector might be problematic. Click here for more details.

Files changed (190) hide show
  1. api/__init__.py +0 -0
  2. api/batch_processing/__init__.py +0 -0
  3. api/batch_processing/api_core/__init__.py +0 -0
  4. api/batch_processing/api_core/batch_service/__init__.py +0 -0
  5. api/batch_processing/api_core/batch_service/score.py +0 -1
  6. api/batch_processing/api_core/server_job_status_table.py +0 -1
  7. api/batch_processing/api_core_support/__init__.py +0 -0
  8. api/batch_processing/api_core_support/aggregate_results_manually.py +0 -1
  9. api/batch_processing/api_support/__init__.py +0 -0
  10. api/batch_processing/api_support/summarize_daily_activity.py +0 -1
  11. api/batch_processing/data_preparation/__init__.py +0 -0
  12. api/batch_processing/data_preparation/manage_local_batch.py +65 -65
  13. api/batch_processing/data_preparation/manage_video_batch.py +8 -8
  14. api/batch_processing/integration/digiKam/xmp_integration.py +0 -1
  15. api/batch_processing/integration/eMammal/test_scripts/push_annotations_to_emammal.py +0 -1
  16. api/batch_processing/postprocessing/__init__.py +0 -0
  17. api/batch_processing/postprocessing/add_max_conf.py +12 -12
  18. api/batch_processing/postprocessing/categorize_detections_by_size.py +32 -14
  19. api/batch_processing/postprocessing/combine_api_outputs.py +68 -54
  20. api/batch_processing/postprocessing/compare_batch_results.py +113 -43
  21. api/batch_processing/postprocessing/convert_output_format.py +41 -16
  22. api/batch_processing/postprocessing/load_api_results.py +16 -17
  23. api/batch_processing/postprocessing/md_to_coco.py +31 -21
  24. api/batch_processing/postprocessing/md_to_labelme.py +52 -22
  25. api/batch_processing/postprocessing/merge_detections.py +14 -14
  26. api/batch_processing/postprocessing/postprocess_batch_results.py +246 -174
  27. api/batch_processing/postprocessing/remap_detection_categories.py +32 -25
  28. api/batch_processing/postprocessing/render_detection_confusion_matrix.py +60 -27
  29. api/batch_processing/postprocessing/repeat_detection_elimination/find_repeat_detections.py +53 -44
  30. api/batch_processing/postprocessing/repeat_detection_elimination/remove_repeat_detections.py +25 -14
  31. api/batch_processing/postprocessing/repeat_detection_elimination/repeat_detections_core.py +242 -158
  32. api/batch_processing/postprocessing/separate_detections_into_folders.py +159 -114
  33. api/batch_processing/postprocessing/subset_json_detector_output.py +146 -169
  34. api/batch_processing/postprocessing/top_folders_to_bottom.py +77 -43
  35. api/synchronous/__init__.py +0 -0
  36. api/synchronous/api_core/animal_detection_api/__init__.py +0 -0
  37. api/synchronous/api_core/animal_detection_api/api_backend.py +0 -2
  38. api/synchronous/api_core/animal_detection_api/api_frontend.py +266 -268
  39. api/synchronous/api_core/animal_detection_api/config.py +35 -35
  40. api/synchronous/api_core/tests/__init__.py +0 -0
  41. api/synchronous/api_core/tests/load_test.py +109 -109
  42. classification/__init__.py +0 -0
  43. classification/aggregate_classifier_probs.py +21 -24
  44. classification/analyze_failed_images.py +11 -13
  45. classification/cache_batchapi_outputs.py +51 -51
  46. classification/create_classification_dataset.py +69 -68
  47. classification/crop_detections.py +54 -53
  48. classification/csv_to_json.py +97 -100
  49. classification/detect_and_crop.py +105 -105
  50. classification/evaluate_model.py +43 -42
  51. classification/identify_mislabeled_candidates.py +47 -46
  52. classification/json_to_azcopy_list.py +10 -10
  53. classification/json_validator.py +72 -71
  54. classification/map_classification_categories.py +44 -43
  55. classification/merge_classification_detection_output.py +68 -68
  56. classification/prepare_classification_script.py +157 -154
  57. classification/prepare_classification_script_mc.py +228 -228
  58. classification/run_classifier.py +27 -26
  59. classification/save_mislabeled.py +30 -30
  60. classification/train_classifier.py +20 -20
  61. classification/train_classifier_tf.py +21 -22
  62. classification/train_utils.py +10 -10
  63. data_management/__init__.py +0 -0
  64. data_management/annotations/__init__.py +0 -0
  65. data_management/annotations/annotation_constants.py +18 -31
  66. data_management/camtrap_dp_to_coco.py +238 -0
  67. data_management/cct_json_utils.py +102 -59
  68. data_management/cct_to_md.py +176 -158
  69. data_management/cct_to_wi.py +247 -219
  70. data_management/coco_to_labelme.py +272 -263
  71. data_management/coco_to_yolo.py +79 -58
  72. data_management/databases/__init__.py +0 -0
  73. data_management/databases/add_width_and_height_to_db.py +20 -16
  74. data_management/databases/combine_coco_camera_traps_files.py +35 -31
  75. data_management/databases/integrity_check_json_db.py +62 -24
  76. data_management/databases/subset_json_db.py +24 -15
  77. data_management/generate_crops_from_cct.py +27 -45
  78. data_management/get_image_sizes.py +188 -162
  79. data_management/importers/add_nacti_sizes.py +8 -8
  80. data_management/importers/add_timestamps_to_icct.py +78 -78
  81. data_management/importers/animl_results_to_md_results.py +158 -158
  82. data_management/importers/auckland_doc_test_to_json.py +9 -9
  83. data_management/importers/auckland_doc_to_json.py +8 -8
  84. data_management/importers/awc_to_json.py +7 -7
  85. data_management/importers/bellevue_to_json.py +15 -15
  86. data_management/importers/cacophony-thermal-importer.py +13 -13
  87. data_management/importers/carrizo_shrubfree_2018.py +8 -8
  88. data_management/importers/carrizo_trail_cam_2017.py +8 -8
  89. data_management/importers/cct_field_adjustments.py +9 -9
  90. data_management/importers/channel_islands_to_cct.py +10 -10
  91. data_management/importers/eMammal/copy_and_unzip_emammal.py +1 -0
  92. data_management/importers/ena24_to_json.py +7 -7
  93. data_management/importers/filenames_to_json.py +8 -8
  94. data_management/importers/helena_to_cct.py +7 -7
  95. data_management/importers/idaho-camera-traps.py +7 -7
  96. data_management/importers/idfg_iwildcam_lila_prep.py +10 -10
  97. data_management/importers/jb_csv_to_json.py +9 -9
  98. data_management/importers/mcgill_to_json.py +8 -8
  99. data_management/importers/missouri_to_json.py +18 -18
  100. data_management/importers/nacti_fieldname_adjustments.py +10 -10
  101. data_management/importers/noaa_seals_2019.py +7 -7
  102. data_management/importers/pc_to_json.py +7 -7
  103. data_management/importers/plot_wni_giraffes.py +7 -7
  104. data_management/importers/prepare-noaa-fish-data-for-lila.py +359 -359
  105. data_management/importers/prepare_zsl_imerit.py +7 -7
  106. data_management/importers/rspb_to_json.py +8 -8
  107. data_management/importers/save_the_elephants_survey_A.py +8 -8
  108. data_management/importers/save_the_elephants_survey_B.py +9 -9
  109. data_management/importers/snapshot_safari_importer.py +26 -26
  110. data_management/importers/snapshot_safari_importer_reprise.py +665 -665
  111. data_management/importers/snapshot_serengeti_lila.py +14 -14
  112. data_management/importers/sulross_get_exif.py +8 -9
  113. data_management/importers/timelapse_csv_set_to_json.py +11 -11
  114. data_management/importers/ubc_to_json.py +13 -13
  115. data_management/importers/umn_to_json.py +7 -7
  116. data_management/importers/wellington_to_json.py +8 -8
  117. data_management/importers/wi_to_json.py +9 -9
  118. data_management/importers/zamba_results_to_md_results.py +181 -181
  119. data_management/labelme_to_coco.py +65 -24
  120. data_management/labelme_to_yolo.py +8 -8
  121. data_management/lila/__init__.py +0 -0
  122. data_management/lila/add_locations_to_island_camera_traps.py +9 -9
  123. data_management/lila/add_locations_to_nacti.py +147 -147
  124. data_management/lila/create_lila_blank_set.py +13 -13
  125. data_management/lila/create_lila_test_set.py +8 -8
  126. data_management/lila/create_links_to_md_results_files.py +106 -106
  127. data_management/lila/download_lila_subset.py +44 -110
  128. data_management/lila/generate_lila_per_image_labels.py +55 -42
  129. data_management/lila/get_lila_annotation_counts.py +18 -15
  130. data_management/lila/get_lila_image_counts.py +11 -11
  131. data_management/lila/lila_common.py +96 -33
  132. data_management/lila/test_lila_metadata_urls.py +132 -116
  133. data_management/ocr_tools.py +173 -128
  134. data_management/read_exif.py +110 -97
  135. data_management/remap_coco_categories.py +83 -83
  136. data_management/remove_exif.py +58 -62
  137. data_management/resize_coco_dataset.py +30 -23
  138. data_management/wi_download_csv_to_coco.py +246 -239
  139. data_management/yolo_output_to_md_output.py +86 -73
  140. data_management/yolo_to_coco.py +300 -60
  141. detection/__init__.py +0 -0
  142. detection/detector_training/__init__.py +0 -0
  143. detection/process_video.py +85 -33
  144. detection/pytorch_detector.py +43 -25
  145. detection/run_detector.py +157 -72
  146. detection/run_detector_batch.py +179 -113
  147. detection/run_inference_with_yolov5_val.py +108 -48
  148. detection/run_tiled_inference.py +111 -40
  149. detection/tf_detector.py +51 -29
  150. detection/video_utils.py +606 -521
  151. docs/source/conf.py +43 -0
  152. md_utils/__init__.py +0 -0
  153. md_utils/azure_utils.py +9 -9
  154. md_utils/ct_utils.py +228 -68
  155. md_utils/directory_listing.py +59 -64
  156. md_utils/md_tests.py +968 -871
  157. md_utils/path_utils.py +460 -134
  158. md_utils/process_utils.py +157 -133
  159. md_utils/sas_blob_utils.py +20 -20
  160. md_utils/split_locations_into_train_val.py +45 -32
  161. md_utils/string_utils.py +33 -10
  162. md_utils/url_utils.py +176 -60
  163. md_utils/write_html_image_list.py +40 -33
  164. md_visualization/__init__.py +0 -0
  165. md_visualization/plot_utils.py +102 -109
  166. md_visualization/render_images_with_thumbnails.py +34 -34
  167. md_visualization/visualization_utils.py +597 -291
  168. md_visualization/visualize_db.py +76 -48
  169. md_visualization/visualize_detector_output.py +61 -42
  170. {megadetector-5.0.8.dist-info → megadetector-5.0.10.dist-info}/METADATA +13 -7
  171. megadetector-5.0.10.dist-info/RECORD +224 -0
  172. {megadetector-5.0.8.dist-info → megadetector-5.0.10.dist-info}/top_level.txt +1 -0
  173. taxonomy_mapping/__init__.py +0 -0
  174. taxonomy_mapping/map_lila_taxonomy_to_wi_taxonomy.py +342 -335
  175. taxonomy_mapping/map_new_lila_datasets.py +154 -154
  176. taxonomy_mapping/prepare_lila_taxonomy_release.py +142 -134
  177. taxonomy_mapping/preview_lila_taxonomy.py +591 -591
  178. taxonomy_mapping/retrieve_sample_image.py +12 -12
  179. taxonomy_mapping/simple_image_download.py +11 -11
  180. taxonomy_mapping/species_lookup.py +10 -10
  181. taxonomy_mapping/taxonomy_csv_checker.py +18 -18
  182. taxonomy_mapping/taxonomy_graph.py +47 -47
  183. taxonomy_mapping/validate_lila_category_mappings.py +83 -76
  184. data_management/cct_json_to_filename_json.py +0 -89
  185. data_management/cct_to_csv.py +0 -140
  186. data_management/databases/remove_corrupted_images_from_db.py +0 -191
  187. detection/detector_training/copy_checkpoints.py +0 -43
  188. megadetector-5.0.8.dist-info/RECORD +0 -205
  189. {megadetector-5.0.8.dist-info → megadetector-5.0.10.dist-info}/LICENSE +0 -0
  190. {megadetector-5.0.8.dist-info → megadetector-5.0.10.dist-info}/WHEEL +0 -0
@@ -1,158 +1,158 @@
1
- ########
2
- #
3
- # animl_results_to_md_results.py
4
- #
5
- # Convert a .csv file produced by the Animl package:
6
- #
7
- # https://github.com/conservationtechlab/animl-py
8
- #
9
- # ...to a MD results file suitable for import into Timelapse.
10
- #
11
- # Columns are expected to be:
12
- #
13
- # file
14
- # category (MD category identifies: 1==animal, 2==person, 3==vehicle)
15
- # detection_conf
16
- # bbox1,bbox2,bbox3,bbox4
17
- # class
18
- # classification_conf
19
- #
20
- ########
21
-
22
- #%% Imports and constants
23
-
24
- import pandas as pd
25
- import json
26
-
27
- # It's a little icky to hard-code this here rather than importing from elsewhere
28
- # in the MD repo, but it seemed silly to take a dependency on lots of MD code
29
- # just for this, so, hard-coding.
30
- detection_category_id_to_name = {'1':'animal','2':'person','3':'vehicle'}
31
-
32
-
33
- #%% Main function
34
-
35
- def animl_results_to_md_results(input_file,output_file=None):
36
- """
37
- Converts the .csv file [input_file] to the MD-formatted .json file [output_file].
38
-
39
- If [output_file] is None, '.json' will be appended to the input file.
40
- """
41
-
42
- if output_file is None:
43
- output_file = input_file + '.json'
44
-
45
- df = pd.read_csv(input_file)
46
-
47
- expected_columns = ('file','category','detection_conf',
48
- 'bbox1','bbox2','bbox3','bbox4','class','classification_conf')
49
-
50
- for s in expected_columns:
51
- assert s in df.columns,\
52
- 'Expected column {} not found'.format(s)
53
-
54
- classification_category_name_to_id = {}
55
- filename_to_results = {}
56
-
57
- # i_row = 0; row = df.iloc[i_row]
58
- for i_row,row in df.iterrows():
59
-
60
- # Is this the first detection we've seen for this file?
61
- if row['file'] in filename_to_results:
62
- im = filename_to_results[row['file']]
63
- else:
64
- im = {}
65
- im['detections'] = []
66
- im['file'] = row['file']
67
- filename_to_results[im['file']] = im
68
-
69
- assert isinstance(row['category'],int),'Invalid category identifier in row {}'.format(im['file'])
70
- detection_category_id = str(row['category'])
71
- assert detection_category_id in detection_category_id_to_name,\
72
- 'Unrecognized detection category ID {}'.format(detection_category_id)
73
-
74
- detection = {}
75
- detection['category'] = detection_category_id
76
- detection['conf'] = row['detection_conf']
77
- bbox = [row['bbox1'],row['bbox2'],row['bbox3'],row['bbox4']]
78
- detection['bbox'] = bbox
79
- classification_category_name = row['class']
80
-
81
- # Have we seen this classification category before?
82
- if classification_category_name in classification_category_name_to_id:
83
- classification_category_id = \
84
- classification_category_name_to_id[classification_category_name]
85
- else:
86
- classification_category_id = str(len(classification_category_name_to_id))
87
- classification_category_name_to_id[classification_category_name] = \
88
- classification_category_id
89
-
90
- classifications = [[classification_category_id,row['classification_conf']]]
91
- detection['classifications'] = classifications
92
-
93
- im['detections'].append(detection)
94
-
95
- # ...for each row
96
-
97
- info = {}
98
- info['format_version'] = '1.3'
99
- info['detector'] = 'Animl'
100
- info['classifier'] = 'Animl'
101
-
102
- results = {}
103
- results['info'] = info
104
- results['detection_categories'] = detection_category_id_to_name
105
- results['classification_categories'] = \
106
- {v: k for k, v in classification_category_name_to_id.items()}
107
- results['images'] = list(filename_to_results.values())
108
-
109
- with open(output_file,'w') as f:
110
- json.dump(results,f,indent=1)
111
-
112
- # ...animl_results_to_md_results(...)
113
-
114
-
115
- #%% Interactive driver
116
-
117
- if False:
118
-
119
- pass
120
-
121
- #%%
122
-
123
- input_file = r"G:\temp\animl-runs\animl-runs\Coati_v2\manifest.csv"
124
- output_file = None
125
- animl_results_to_md_results(input_file,output_file)
126
-
127
-
128
- #%% Command-line driver
129
-
130
- import sys,argparse
131
-
132
- def main():
133
-
134
- parser = argparse.ArgumentParser(
135
- description='Convert an Animl-formatted .csv results file to MD-formatted .json results file')
136
-
137
- parser.add_argument(
138
- 'input_file',
139
- type=str,
140
- help='input .csv file')
141
-
142
- parser.add_argument(
143
- '--output_file',
144
- type=str,
145
- default=None,
146
- help='output .json file (defaults to input file appended with ".json")')
147
-
148
- if len(sys.argv[1:]) == 0:
149
- parser.print_help()
150
- parser.exit()
151
-
152
- args = parser.parse_args()
153
-
154
- animl_results_to_md_results(args.input_file,args.output_file)
155
-
156
- if __name__ == '__main__':
157
- main()
158
-
1
+ """
2
+
3
+ animl_results_to_md_results.py
4
+
5
+ Convert a .csv file produced by the Animl package:
6
+
7
+ https://github.com/conservationtechlab/animl-py
8
+
9
+ ...to a MD results file suitable for import into Timelapse.
10
+
11
+ Columns are expected to be:
12
+
13
+ file
14
+ category (MD category identifies: 1==animal, 2==person, 3==vehicle)
15
+ detection_conf
16
+ bbox1,bbox2,bbox3,bbox4
17
+ class
18
+ classification_conf
19
+
20
+ """
21
+
22
+ #%% Imports and constants
23
+
24
+ import pandas as pd
25
+ import json
26
+
27
+ # It's a little icky to hard-code this here rather than importing from elsewhere
28
+ # in the MD repo, but it seemed silly to take a dependency on lots of MD code
29
+ # just for this, so, hard-coding.
30
+ detection_category_id_to_name = {'1':'animal','2':'person','3':'vehicle'}
31
+
32
+
33
+ #%% Main function
34
+
35
+ def animl_results_to_md_results(input_file,output_file=None):
36
+ """
37
+ Converts the .csv file [input_file] to the MD-formatted .json file [output_file].
38
+
39
+ If [output_file] is None, '.json' will be appended to the input file.
40
+ """
41
+
42
+ if output_file is None:
43
+ output_file = input_file + '.json'
44
+
45
+ df = pd.read_csv(input_file)
46
+
47
+ expected_columns = ('file','category','detection_conf',
48
+ 'bbox1','bbox2','bbox3','bbox4','class','classification_conf')
49
+
50
+ for s in expected_columns:
51
+ assert s in df.columns,\
52
+ 'Expected column {} not found'.format(s)
53
+
54
+ classification_category_name_to_id = {}
55
+ filename_to_results = {}
56
+
57
+ # i_row = 0; row = df.iloc[i_row]
58
+ for i_row,row in df.iterrows():
59
+
60
+ # Is this the first detection we've seen for this file?
61
+ if row['file'] in filename_to_results:
62
+ im = filename_to_results[row['file']]
63
+ else:
64
+ im = {}
65
+ im['detections'] = []
66
+ im['file'] = row['file']
67
+ filename_to_results[im['file']] = im
68
+
69
+ assert isinstance(row['category'],int),'Invalid category identifier in row {}'.format(im['file'])
70
+ detection_category_id = str(row['category'])
71
+ assert detection_category_id in detection_category_id_to_name,\
72
+ 'Unrecognized detection category ID {}'.format(detection_category_id)
73
+
74
+ detection = {}
75
+ detection['category'] = detection_category_id
76
+ detection['conf'] = row['detection_conf']
77
+ bbox = [row['bbox1'],row['bbox2'],row['bbox3'],row['bbox4']]
78
+ detection['bbox'] = bbox
79
+ classification_category_name = row['class']
80
+
81
+ # Have we seen this classification category before?
82
+ if classification_category_name in classification_category_name_to_id:
83
+ classification_category_id = \
84
+ classification_category_name_to_id[classification_category_name]
85
+ else:
86
+ classification_category_id = str(len(classification_category_name_to_id))
87
+ classification_category_name_to_id[classification_category_name] = \
88
+ classification_category_id
89
+
90
+ classifications = [[classification_category_id,row['classification_conf']]]
91
+ detection['classifications'] = classifications
92
+
93
+ im['detections'].append(detection)
94
+
95
+ # ...for each row
96
+
97
+ info = {}
98
+ info['format_version'] = '1.3'
99
+ info['detector'] = 'Animl'
100
+ info['classifier'] = 'Animl'
101
+
102
+ results = {}
103
+ results['info'] = info
104
+ results['detection_categories'] = detection_category_id_to_name
105
+ results['classification_categories'] = \
106
+ {v: k for k, v in classification_category_name_to_id.items()}
107
+ results['images'] = list(filename_to_results.values())
108
+
109
+ with open(output_file,'w') as f:
110
+ json.dump(results,f,indent=1)
111
+
112
+ # ...animl_results_to_md_results(...)
113
+
114
+
115
+ #%% Interactive driver
116
+
117
+ if False:
118
+
119
+ pass
120
+
121
+ #%%
122
+
123
+ input_file = r"G:\temp\animl-runs\animl-runs\Coati_v2\manifest.csv"
124
+ output_file = None
125
+ animl_results_to_md_results(input_file,output_file)
126
+
127
+
128
+ #%% Command-line driver
129
+
130
+ import sys,argparse
131
+
132
+ def main():
133
+
134
+ parser = argparse.ArgumentParser(
135
+ description='Convert an Animl-formatted .csv results file to MD-formatted .json results file')
136
+
137
+ parser.add_argument(
138
+ 'input_file',
139
+ type=str,
140
+ help='input .csv file')
141
+
142
+ parser.add_argument(
143
+ '--output_file',
144
+ type=str,
145
+ default=None,
146
+ help='output .json file (defaults to input file appended with ".json")')
147
+
148
+ if len(sys.argv[1:]) == 0:
149
+ parser.print_help()
150
+ parser.exit()
151
+
152
+ args = parser.parse_args()
153
+
154
+ animl_results_to_md_results(args.input_file,args.output_file)
155
+
156
+ if __name__ == '__main__':
157
+ main()
158
+
@@ -1,12 +1,12 @@
1
- ########
2
- #
3
- # auckland_doc_test_to_json.py
4
- #
5
- # Convert Auckland DOC data set to COCO camera traps format. This was
6
- # for a testing data set where a .csv file was provided with class
7
- # information.
8
- #
9
- ########
1
+ """
2
+
3
+ auckland_doc_test_to_json.py
4
+
5
+ Convert Auckland DOC data set to COCO camera traps format. This was
6
+ for a testing data set where a .csv file was provided with class
7
+ information.
8
+
9
+ """
10
10
 
11
11
  #%% Constants and imports
12
12
 
@@ -1,11 +1,11 @@
1
- ########
2
- #
3
- # auckland_doc_to_json.py
4
- #
5
- # Convert Auckland DOC data set to COCO camera traps format. This was
6
- # for a training data set where class names were encoded in path names.
7
- #
8
- ########
1
+ """
2
+
3
+ auckland_doc_to_json.py
4
+
5
+ Convert Auckland DOC data set to COCO camera traps format. This was
6
+ for a training data set where class names were encoded in path names.
7
+
8
+ """
9
9
 
10
10
  #%% Constants and imports
11
11
 
@@ -1,10 +1,10 @@
1
- ########
2
- #
3
- # awc_to_json.py
4
- #
5
- # Convert a particular .csv file from Australian Wildlife Conservancy to CCT format.
6
- #
7
- ########
1
+ """
2
+
3
+ awc_to_json.py
4
+
5
+ Convert a particular .csv file from Australian Wildlife Conservancy to CCT format.
6
+
7
+ """
8
8
 
9
9
  #%% Constants and environment
10
10
 
@@ -1,18 +1,18 @@
1
- ########
2
- #
3
- # bellevue_to_json.py
4
- #
5
- # "Bellevue Camera Traps" is the rather unremarkable camera trap data set
6
- # used by one of the repo's maintainers for testing. It's organized as:
7
- #
8
- # approximate_date/[loose_camera_specifier/]/species
9
- #
10
- # E.g.:
11
- #
12
- # "2018.03.30\coyote\DSCF0091.JPG"
13
- # "2018.07.18\oldcam\empty\DSCF0001.JPG"
14
- #
15
- ########
1
+ """
2
+
3
+ bellevue_to_json.py
4
+
5
+ "Bellevue Camera Traps" is the rather unremarkable camera trap data set
6
+ used by one of the repo's maintainers for testing. It's organized as:
7
+
8
+ approximate_date/[loose_camera_specifier/]/species
9
+
10
+ E.g.:
11
+
12
+ "2018.03.30\coyote\DSCF0091.JPG"
13
+ "2018.07.18\oldcam\empty\DSCF0001.JPG"
14
+
15
+ """
16
16
 
17
17
  #%% Constants and imports
18
18
 
@@ -1,16 +1,16 @@
1
- ########
2
- #
3
- # cacophony-thermal-importer.py
4
- #
5
- # Create data and metadata for LILA from the Cacophony thermal dataset. Takes a folder
6
- # of HDF files, and produces .json metadata, along with compressed/normalized videos for
7
- # each HDF file.
8
- #
9
- # Source format notes for this dataset:
10
- #
11
- # https://docs.google.com/document/d/12sw5JtwdMf9MiXuNCBcvhvZ04Jwa1TH2Lf6LnJmF8Bk/edit
12
- #
13
- ########
1
+ """
2
+
3
+ cacophony-thermal-importer.py
4
+
5
+ Create data and metadata for LILA from the Cacophony thermal dataset. Takes a folder
6
+ of HDF files, and produces .json metadata, along with compressed/normalized videos for
7
+ each HDF file.
8
+
9
+ Source format notes for this dataset:
10
+
11
+ https://docs.google.com/document/d/12sw5JtwdMf9MiXuNCBcvhvZ04Jwa1TH2Lf6LnJmF8Bk/edit
12
+
13
+ """
14
14
 
15
15
  #%% Imports and constants
16
16
 
@@ -1,11 +1,11 @@
1
- ########
2
- #
3
- # carrizo_shrubfree_2018.py
4
- #
5
- # Convert the .csv file provided for the Carrizo Mojave data set to a
6
- # COCO-camera-traps .json file
7
- #
8
- ########
1
+ """
2
+
3
+ carrizo_shrubfree_2018.py
4
+
5
+ Convert the .csv file provided for the Carrizo Mojave data set to a
6
+ COCO-camera-traps .json file
7
+
8
+ """
9
9
 
10
10
  #%% Constants and environment
11
11
 
@@ -1,11 +1,11 @@
1
- ########
2
- #
3
- # carrizo_trail_cam_2017.py
4
- #
5
- # Convert the .csv files provided for the "Trail Cam Carrizo" 2017 data set to
6
- # a COCO-camera-traps .json file.
7
- #
8
- ########
1
+ """
2
+
3
+ carrizo_trail_cam_2017.py
4
+
5
+ Convert the .csv files provided for the "Trail Cam Carrizo" 2017 data set to
6
+ a COCO-camera-traps .json file.
7
+
8
+ """
9
9
 
10
10
  #%% Constants and environment
11
11
 
@@ -1,12 +1,12 @@
1
- ########
2
- #
3
- # cct_field_adjustments.py
4
- #
5
- # CCT metadata was posted with int locations instead of strings.
6
- #
7
- # This script fixes those issues and rev's the version number.
8
- #
9
- ########
1
+ """
2
+
3
+ cct_field_adjustments.py
4
+
5
+ CCT metadata was posted with int locations instead of strings.
6
+
7
+ This script fixes those issues and rev's the version number.
8
+
9
+ """
10
10
 
11
11
  #%% Constants and environment
12
12
 
@@ -1,13 +1,13 @@
1
- ########
2
- #
3
- # channel_islands_to_cct.py
4
- #
5
- # Convert the Channel Islands data set to a COCO-camera-traps .json file
6
- #
7
- # Uses the command-line tool ExifTool (exiftool.org) to pull EXIF tags from images,
8
- # because every Python package we tried failed to pull the "Maker Notes" field properly.
9
- #
10
- ########
1
+ """
2
+
3
+ channel_islands_to_cct.py
4
+
5
+ Convert the Channel Islands data set to a COCO-camera-traps .json file
6
+
7
+ Uses the command-line tool ExifTool (exiftool.org) to pull EXIF tags from images,
8
+ because every Python package we tried failed to pull the "Maker Notes" field properly.
9
+
10
+ """
11
11
 
12
12
  #%% Imports, constants, paths
13
13
 
@@ -155,6 +155,7 @@ def download_from_container(dest_folder: str,
155
155
  #%% Command-line driver
156
156
 
157
157
  if __name__ == '__main__':
158
+
158
159
  if origin == 'cloud':
159
160
  container = 'wpz'
160
161
  desired_blob_prefix = 'emammal/0Robert Long/'
@@ -1,10 +1,10 @@
1
- ########
2
- #
3
- # ena24_to_json_2017.py
4
- #
5
- # Convert the ENA24 data set to a COCO-camera-traps .json file
6
- #
7
- ########
1
+ """
2
+
3
+ ena24_to_json_2017.py
4
+
5
+ Convert the ENA24 data set to a COCO-camera-traps .json file
6
+
7
+ """
8
8
 
9
9
  #%% Constants and environment
10
10
 
@@ -1,11 +1,11 @@
1
- ########
2
- #
3
- # filenames_to_json.py
4
- #
5
- # Take a directory of images in which species labels are encoded by folder
6
- # names, and produces a COCO-style .json file
7
- #
8
- ########
1
+ """
2
+
3
+ filenames_to_json.py
4
+
5
+ Take a directory of images in which species labels are encoded by folder
6
+ names, and produces a COCO-style .json file
7
+
8
+ """
9
9
 
10
10
  #%% Constants and imports
11
11
 
@@ -1,10 +1,10 @@
1
- ########
2
- #
3
- # helena_to_cct.py
4
- #
5
- # Convert the Helena Detections data set to a COCO-camera-traps .json file
6
- #
7
- ########
1
+ """
2
+
3
+ helena_to_cct.py
4
+
5
+ Convert the Helena Detections data set to a COCO-camera-traps .json file
6
+
7
+ """
8
8
 
9
9
  #%% Constants and environment
10
10
 
@@ -1,10 +1,10 @@
1
- ########
2
- #
3
- # idaho-camera-traps.py
4
- #
5
- # Prepare the Idaho Camera Traps dataset for release on LILA.
6
- #
7
- ########
1
+ """
2
+
3
+ idaho-camera-traps.py
4
+
5
+ Prepare the Idaho Camera Traps dataset for release on LILA.
6
+
7
+ """
8
8
 
9
9
  #%% Imports and constants
10
10
 
@@ -1,13 +1,13 @@
1
- ########
2
- #
3
- # idfg_iwildcam_lila_prep.py
4
- #
5
- # Adding class labels (from the private test .csv) to the iWildCam 2019 IDFG
6
- # test set, in preparation for release on LILA.
7
- #
8
- # This version works with the public iWildCam release images.
9
- #
10
- ########
1
+ """
2
+
3
+ idfg_iwildcam_lila_prep.py
4
+
5
+ Adding class labels (from the private test .csv) to the iWildCam 2019 IDFG
6
+ test set, in preparation for release on LILA.
7
+
8
+ This version works with the public iWildCam release images.
9
+
10
+ """
11
11
 
12
12
  #%% ############ Take one, from iWildCam .json files ############
13
13
 
@@ -1,12 +1,12 @@
1
- ########
2
- #
3
- # jb_csv_to_json.py
4
- #
5
- # Convert a particular .csv file to CCT format. Images were not available at
6
- # the time I wrote this script, so this is much shorter than other scripts
7
- # in this folder.
8
- #
9
- ########
1
+ """
2
+
3
+ jb_csv_to_json.py
4
+
5
+ Convert a particular .csv file to CCT format. Images were not available at
6
+ the time I wrote this script, so this is much shorter than other scripts
7
+ in this folder.
8
+
9
+ """
10
10
 
11
11
  #%% Constants and environment
12
12