megadetector 5.0.10__py3-none-any.whl → 5.0.11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of megadetector might be problematic. Click here for more details.

Files changed (226) hide show
  1. {megadetector-5.0.10.dist-info → megadetector-5.0.11.dist-info}/LICENSE +0 -0
  2. {megadetector-5.0.10.dist-info → megadetector-5.0.11.dist-info}/METADATA +12 -11
  3. megadetector-5.0.11.dist-info/RECORD +5 -0
  4. megadetector-5.0.11.dist-info/top_level.txt +1 -0
  5. api/__init__.py +0 -0
  6. api/batch_processing/__init__.py +0 -0
  7. api/batch_processing/api_core/__init__.py +0 -0
  8. api/batch_processing/api_core/batch_service/__init__.py +0 -0
  9. api/batch_processing/api_core/batch_service/score.py +0 -439
  10. api/batch_processing/api_core/server.py +0 -294
  11. api/batch_processing/api_core/server_api_config.py +0 -98
  12. api/batch_processing/api_core/server_app_config.py +0 -55
  13. api/batch_processing/api_core/server_batch_job_manager.py +0 -220
  14. api/batch_processing/api_core/server_job_status_table.py +0 -152
  15. api/batch_processing/api_core/server_orchestration.py +0 -360
  16. api/batch_processing/api_core/server_utils.py +0 -92
  17. api/batch_processing/api_core_support/__init__.py +0 -0
  18. api/batch_processing/api_core_support/aggregate_results_manually.py +0 -46
  19. api/batch_processing/api_support/__init__.py +0 -0
  20. api/batch_processing/api_support/summarize_daily_activity.py +0 -152
  21. api/batch_processing/data_preparation/__init__.py +0 -0
  22. api/batch_processing/data_preparation/manage_local_batch.py +0 -2391
  23. api/batch_processing/data_preparation/manage_video_batch.py +0 -327
  24. api/batch_processing/integration/digiKam/setup.py +0 -6
  25. api/batch_processing/integration/digiKam/xmp_integration.py +0 -465
  26. api/batch_processing/integration/eMammal/test_scripts/config_template.py +0 -5
  27. api/batch_processing/integration/eMammal/test_scripts/push_annotations_to_emammal.py +0 -126
  28. api/batch_processing/integration/eMammal/test_scripts/select_images_for_testing.py +0 -55
  29. api/batch_processing/postprocessing/__init__.py +0 -0
  30. api/batch_processing/postprocessing/add_max_conf.py +0 -64
  31. api/batch_processing/postprocessing/categorize_detections_by_size.py +0 -163
  32. api/batch_processing/postprocessing/combine_api_outputs.py +0 -249
  33. api/batch_processing/postprocessing/compare_batch_results.py +0 -958
  34. api/batch_processing/postprocessing/convert_output_format.py +0 -397
  35. api/batch_processing/postprocessing/load_api_results.py +0 -195
  36. api/batch_processing/postprocessing/md_to_coco.py +0 -310
  37. api/batch_processing/postprocessing/md_to_labelme.py +0 -330
  38. api/batch_processing/postprocessing/merge_detections.py +0 -401
  39. api/batch_processing/postprocessing/postprocess_batch_results.py +0 -1904
  40. api/batch_processing/postprocessing/remap_detection_categories.py +0 -170
  41. api/batch_processing/postprocessing/render_detection_confusion_matrix.py +0 -661
  42. api/batch_processing/postprocessing/repeat_detection_elimination/find_repeat_detections.py +0 -211
  43. api/batch_processing/postprocessing/repeat_detection_elimination/remove_repeat_detections.py +0 -82
  44. api/batch_processing/postprocessing/repeat_detection_elimination/repeat_detections_core.py +0 -1631
  45. api/batch_processing/postprocessing/separate_detections_into_folders.py +0 -731
  46. api/batch_processing/postprocessing/subset_json_detector_output.py +0 -696
  47. api/batch_processing/postprocessing/top_folders_to_bottom.py +0 -223
  48. api/synchronous/__init__.py +0 -0
  49. api/synchronous/api_core/animal_detection_api/__init__.py +0 -0
  50. api/synchronous/api_core/animal_detection_api/api_backend.py +0 -152
  51. api/synchronous/api_core/animal_detection_api/api_frontend.py +0 -266
  52. api/synchronous/api_core/animal_detection_api/config.py +0 -35
  53. api/synchronous/api_core/animal_detection_api/data_management/annotations/annotation_constants.py +0 -47
  54. api/synchronous/api_core/animal_detection_api/detection/detector_training/copy_checkpoints.py +0 -43
  55. api/synchronous/api_core/animal_detection_api/detection/detector_training/model_main_tf2.py +0 -114
  56. api/synchronous/api_core/animal_detection_api/detection/process_video.py +0 -543
  57. api/synchronous/api_core/animal_detection_api/detection/pytorch_detector.py +0 -304
  58. api/synchronous/api_core/animal_detection_api/detection/run_detector.py +0 -627
  59. api/synchronous/api_core/animal_detection_api/detection/run_detector_batch.py +0 -1029
  60. api/synchronous/api_core/animal_detection_api/detection/run_inference_with_yolov5_val.py +0 -581
  61. api/synchronous/api_core/animal_detection_api/detection/run_tiled_inference.py +0 -754
  62. api/synchronous/api_core/animal_detection_api/detection/tf_detector.py +0 -165
  63. api/synchronous/api_core/animal_detection_api/detection/video_utils.py +0 -495
  64. api/synchronous/api_core/animal_detection_api/md_utils/azure_utils.py +0 -174
  65. api/synchronous/api_core/animal_detection_api/md_utils/ct_utils.py +0 -262
  66. api/synchronous/api_core/animal_detection_api/md_utils/directory_listing.py +0 -251
  67. api/synchronous/api_core/animal_detection_api/md_utils/matlab_porting_tools.py +0 -97
  68. api/synchronous/api_core/animal_detection_api/md_utils/path_utils.py +0 -416
  69. api/synchronous/api_core/animal_detection_api/md_utils/process_utils.py +0 -110
  70. api/synchronous/api_core/animal_detection_api/md_utils/sas_blob_utils.py +0 -509
  71. api/synchronous/api_core/animal_detection_api/md_utils/string_utils.py +0 -59
  72. api/synchronous/api_core/animal_detection_api/md_utils/url_utils.py +0 -144
  73. api/synchronous/api_core/animal_detection_api/md_utils/write_html_image_list.py +0 -226
  74. api/synchronous/api_core/animal_detection_api/md_visualization/visualization_utils.py +0 -841
  75. api/synchronous/api_core/tests/__init__.py +0 -0
  76. api/synchronous/api_core/tests/load_test.py +0 -110
  77. classification/__init__.py +0 -0
  78. classification/aggregate_classifier_probs.py +0 -108
  79. classification/analyze_failed_images.py +0 -227
  80. classification/cache_batchapi_outputs.py +0 -198
  81. classification/create_classification_dataset.py +0 -627
  82. classification/crop_detections.py +0 -516
  83. classification/csv_to_json.py +0 -226
  84. classification/detect_and_crop.py +0 -855
  85. classification/efficientnet/__init__.py +0 -9
  86. classification/efficientnet/model.py +0 -415
  87. classification/efficientnet/utils.py +0 -610
  88. classification/evaluate_model.py +0 -520
  89. classification/identify_mislabeled_candidates.py +0 -152
  90. classification/json_to_azcopy_list.py +0 -63
  91. classification/json_validator.py +0 -695
  92. classification/map_classification_categories.py +0 -276
  93. classification/merge_classification_detection_output.py +0 -506
  94. classification/prepare_classification_script.py +0 -194
  95. classification/prepare_classification_script_mc.py +0 -228
  96. classification/run_classifier.py +0 -286
  97. classification/save_mislabeled.py +0 -110
  98. classification/train_classifier.py +0 -825
  99. classification/train_classifier_tf.py +0 -724
  100. classification/train_utils.py +0 -322
  101. data_management/__init__.py +0 -0
  102. data_management/annotations/__init__.py +0 -0
  103. data_management/annotations/annotation_constants.py +0 -34
  104. data_management/camtrap_dp_to_coco.py +0 -238
  105. data_management/cct_json_utils.py +0 -395
  106. data_management/cct_to_md.py +0 -176
  107. data_management/cct_to_wi.py +0 -289
  108. data_management/coco_to_labelme.py +0 -272
  109. data_management/coco_to_yolo.py +0 -662
  110. data_management/databases/__init__.py +0 -0
  111. data_management/databases/add_width_and_height_to_db.py +0 -33
  112. data_management/databases/combine_coco_camera_traps_files.py +0 -206
  113. data_management/databases/integrity_check_json_db.py +0 -477
  114. data_management/databases/subset_json_db.py +0 -115
  115. data_management/generate_crops_from_cct.py +0 -149
  116. data_management/get_image_sizes.py +0 -188
  117. data_management/importers/add_nacti_sizes.py +0 -52
  118. data_management/importers/add_timestamps_to_icct.py +0 -79
  119. data_management/importers/animl_results_to_md_results.py +0 -158
  120. data_management/importers/auckland_doc_test_to_json.py +0 -372
  121. data_management/importers/auckland_doc_to_json.py +0 -200
  122. data_management/importers/awc_to_json.py +0 -189
  123. data_management/importers/bellevue_to_json.py +0 -273
  124. data_management/importers/cacophony-thermal-importer.py +0 -796
  125. data_management/importers/carrizo_shrubfree_2018.py +0 -268
  126. data_management/importers/carrizo_trail_cam_2017.py +0 -287
  127. data_management/importers/cct_field_adjustments.py +0 -57
  128. data_management/importers/channel_islands_to_cct.py +0 -913
  129. data_management/importers/eMammal/copy_and_unzip_emammal.py +0 -180
  130. data_management/importers/eMammal/eMammal_helpers.py +0 -249
  131. data_management/importers/eMammal/make_eMammal_json.py +0 -223
  132. data_management/importers/ena24_to_json.py +0 -275
  133. data_management/importers/filenames_to_json.py +0 -385
  134. data_management/importers/helena_to_cct.py +0 -282
  135. data_management/importers/idaho-camera-traps.py +0 -1407
  136. data_management/importers/idfg_iwildcam_lila_prep.py +0 -294
  137. data_management/importers/jb_csv_to_json.py +0 -150
  138. data_management/importers/mcgill_to_json.py +0 -250
  139. data_management/importers/missouri_to_json.py +0 -489
  140. data_management/importers/nacti_fieldname_adjustments.py +0 -79
  141. data_management/importers/noaa_seals_2019.py +0 -181
  142. data_management/importers/pc_to_json.py +0 -365
  143. data_management/importers/plot_wni_giraffes.py +0 -123
  144. data_management/importers/prepare-noaa-fish-data-for-lila.py +0 -359
  145. data_management/importers/prepare_zsl_imerit.py +0 -131
  146. data_management/importers/rspb_to_json.py +0 -356
  147. data_management/importers/save_the_elephants_survey_A.py +0 -320
  148. data_management/importers/save_the_elephants_survey_B.py +0 -332
  149. data_management/importers/snapshot_safari_importer.py +0 -758
  150. data_management/importers/snapshot_safari_importer_reprise.py +0 -665
  151. data_management/importers/snapshot_serengeti_lila.py +0 -1067
  152. data_management/importers/snapshotserengeti/make_full_SS_json.py +0 -150
  153. data_management/importers/snapshotserengeti/make_per_season_SS_json.py +0 -153
  154. data_management/importers/sulross_get_exif.py +0 -65
  155. data_management/importers/timelapse_csv_set_to_json.py +0 -490
  156. data_management/importers/ubc_to_json.py +0 -399
  157. data_management/importers/umn_to_json.py +0 -507
  158. data_management/importers/wellington_to_json.py +0 -263
  159. data_management/importers/wi_to_json.py +0 -441
  160. data_management/importers/zamba_results_to_md_results.py +0 -181
  161. data_management/labelme_to_coco.py +0 -548
  162. data_management/labelme_to_yolo.py +0 -272
  163. data_management/lila/__init__.py +0 -0
  164. data_management/lila/add_locations_to_island_camera_traps.py +0 -97
  165. data_management/lila/add_locations_to_nacti.py +0 -147
  166. data_management/lila/create_lila_blank_set.py +0 -557
  167. data_management/lila/create_lila_test_set.py +0 -151
  168. data_management/lila/create_links_to_md_results_files.py +0 -106
  169. data_management/lila/download_lila_subset.py +0 -177
  170. data_management/lila/generate_lila_per_image_labels.py +0 -515
  171. data_management/lila/get_lila_annotation_counts.py +0 -170
  172. data_management/lila/get_lila_image_counts.py +0 -111
  173. data_management/lila/lila_common.py +0 -300
  174. data_management/lila/test_lila_metadata_urls.py +0 -132
  175. data_management/ocr_tools.py +0 -874
  176. data_management/read_exif.py +0 -681
  177. data_management/remap_coco_categories.py +0 -84
  178. data_management/remove_exif.py +0 -66
  179. data_management/resize_coco_dataset.py +0 -189
  180. data_management/wi_download_csv_to_coco.py +0 -246
  181. data_management/yolo_output_to_md_output.py +0 -441
  182. data_management/yolo_to_coco.py +0 -676
  183. detection/__init__.py +0 -0
  184. detection/detector_training/__init__.py +0 -0
  185. detection/detector_training/model_main_tf2.py +0 -114
  186. detection/process_video.py +0 -703
  187. detection/pytorch_detector.py +0 -337
  188. detection/run_detector.py +0 -779
  189. detection/run_detector_batch.py +0 -1219
  190. detection/run_inference_with_yolov5_val.py +0 -917
  191. detection/run_tiled_inference.py +0 -935
  192. detection/tf_detector.py +0 -188
  193. detection/video_utils.py +0 -606
  194. docs/source/conf.py +0 -43
  195. md_utils/__init__.py +0 -0
  196. md_utils/azure_utils.py +0 -174
  197. md_utils/ct_utils.py +0 -612
  198. md_utils/directory_listing.py +0 -246
  199. md_utils/md_tests.py +0 -968
  200. md_utils/path_utils.py +0 -1044
  201. md_utils/process_utils.py +0 -157
  202. md_utils/sas_blob_utils.py +0 -509
  203. md_utils/split_locations_into_train_val.py +0 -228
  204. md_utils/string_utils.py +0 -92
  205. md_utils/url_utils.py +0 -323
  206. md_utils/write_html_image_list.py +0 -225
  207. md_visualization/__init__.py +0 -0
  208. md_visualization/plot_utils.py +0 -293
  209. md_visualization/render_images_with_thumbnails.py +0 -275
  210. md_visualization/visualization_utils.py +0 -1537
  211. md_visualization/visualize_db.py +0 -551
  212. md_visualization/visualize_detector_output.py +0 -406
  213. megadetector-5.0.10.dist-info/RECORD +0 -224
  214. megadetector-5.0.10.dist-info/top_level.txt +0 -8
  215. taxonomy_mapping/__init__.py +0 -0
  216. taxonomy_mapping/map_lila_taxonomy_to_wi_taxonomy.py +0 -491
  217. taxonomy_mapping/map_new_lila_datasets.py +0 -154
  218. taxonomy_mapping/prepare_lila_taxonomy_release.py +0 -142
  219. taxonomy_mapping/preview_lila_taxonomy.py +0 -591
  220. taxonomy_mapping/retrieve_sample_image.py +0 -71
  221. taxonomy_mapping/simple_image_download.py +0 -218
  222. taxonomy_mapping/species_lookup.py +0 -834
  223. taxonomy_mapping/taxonomy_csv_checker.py +0 -159
  224. taxonomy_mapping/taxonomy_graph.py +0 -346
  225. taxonomy_mapping/validate_lila_category_mappings.py +0 -83
  226. {megadetector-5.0.10.dist-info → megadetector-5.0.11.dist-info}/WHEEL +0 -0
@@ -1,724 +0,0 @@
1
- """
2
-
3
- train_classifier_tf.py
4
-
5
- Train an EfficientNet classifier.
6
-
7
- Currently the implementation of multi-label multi-class classification is
8
- non-functional.
9
-
10
- During training, start tensorboard from within the classification/ directory:
11
- tensorboard --logdir run --bind_all --samples_per_plugin scalars=0,images=0
12
-
13
- """
14
-
15
- #%% Imports and constants
16
-
17
- from __future__ import annotations
18
-
19
- import argparse
20
- from collections import defaultdict
21
- from collections.abc import Callable, Mapping, MutableMapping, Sequence
22
- from datetime import datetime
23
- import json
24
- import os
25
- from typing import Any, Optional
26
- import uuid
27
-
28
- import numpy as np
29
- import sklearn.metrics
30
- import tensorflow as tf
31
- from tensorboard.plugins.hparams import api as hp
32
- import tqdm
33
-
34
- from classification.train_utils import (
35
- HeapItem, recall_from_confusion_matrix, add_to_heap, fig_to_img,
36
- imgs_with_confidences, load_dataset_csv, prefix_all_keys)
37
- from md_visualization import plot_utils
38
-
39
- AUTOTUNE = tf.data.experimental.AUTOTUNE
40
-
41
- # match pytorch EfficientNet model names
42
- EFFICIENTNET_MODELS: Mapping[str, Mapping[str, Any]] = {
43
- 'efficientnet-b0': dict(cls='EfficientNetB0', img_size=224, dropout=0.2),
44
- 'efficientnet-b1': dict(cls='EfficientNetB1', img_size=240, dropout=0.2),
45
- 'efficientnet-b2': dict(cls='EfficientNetB2', img_size=260, dropout=0.3),
46
- 'efficientnet-b3': dict(cls='EfficientNetB3', img_size=300, dropout=0.3),
47
- 'efficientnet-b4': dict(cls='EfficientNetB4', img_size=380, dropout=0.4),
48
- 'efficientnet-b5': dict(cls='EfficientNetB5', img_size=456, dropout=0.4),
49
- 'efficientnet-b6': dict(cls='EfficientNetB6', img_size=528, dropout=0.5),
50
- 'efficientnet-b7': dict(cls='EfficientNetB7', img_size=600, dropout=0.5)
51
- }
52
-
53
-
54
- #%% Example usage
55
-
56
- """
57
- python train_classifier_tf.py run_idfg /ssd/crops_sq \
58
- -m "efficientnet-b0" --pretrained --finetune --label-weighted \
59
- --epochs 50 --batch-size 512 --lr 1e-4 \
60
- --seed 123 \
61
- --logdir run_idfg
62
- """
63
-
64
-
65
- #%% Support functions
66
-
67
- def create_dataset(
68
- img_files: Sequence[str],
69
- labels: Sequence[Any],
70
- sample_weights: Optional[Sequence[float]] = None,
71
- img_base_dir: str = '',
72
- transform: Optional[Callable[[tf.Tensor], Any]] = None,
73
- target_transform: Optional[Callable[[Any], Any]] = None,
74
- cache: bool | str = False
75
- ) -> tf.data.Dataset:
76
- """
77
- Create a tf.data.Dataset.
78
-
79
- The dataset returns elements (img, label, img_file, sample_weight) if
80
- sample_weights is not None, or (img, label, img_file) if
81
- sample_weights=None.
82
- img: tf.Tensor, shape [H, W, 3], type uint8
83
- label: tf.Tensor
84
- img_file: tf.Tensor, scalar, type str
85
- sample_weight: tf.Tensor, scalar, type float32
86
-
87
- Args:
88
- img_files: list of str, relative paths from img_base_dir
89
- labels: list of int if multilabel=False
90
- sample_weights: optional list of float
91
- img_base_dir: str, base directory for images
92
- transform: optional transform to apply to a single uint8 JPEG image
93
- target_transform: optional transform to apply to a single label
94
- cache: bool or str, cache images in memory if True, cache images to
95
- a file on disk if a str
96
-
97
- Returns: tf.data.Dataset
98
- """
99
-
100
- # images dataset
101
- img_ds = tf.data.Dataset.from_tensor_slices(img_files)
102
- img_ds = img_ds.map(lambda p: tf.io.read_file(img_base_dir + os.sep + p),
103
- num_parallel_calls=AUTOTUNE)
104
-
105
- # for smaller disk / memory usage, we cache the raw JPEG bytes instead
106
- # of the decoded Tensor
107
- if isinstance(cache, str):
108
- img_ds = img_ds.cache(cache)
109
- elif cache:
110
- img_ds = img_ds.cache()
111
-
112
- # convert JPEG bytes to a 3D uint8 Tensor
113
- # keras EfficientNet already includes normalization from [0, 255] to [0, 1],
114
- # so we don't need to do that here
115
- img_ds = img_ds.map(lambda img: tf.io.decode_jpeg(img, channels=3))
116
-
117
- if transform:
118
- img_ds = img_ds.map(transform, num_parallel_calls=AUTOTUNE)
119
-
120
- # labels dataset
121
- labels_ds = tf.data.Dataset.from_tensor_slices(labels)
122
- if target_transform:
123
- labels_ds = labels_ds.map(target_transform, num_parallel_calls=AUTOTUNE)
124
-
125
- # img_files dataset
126
- img_files_ds = tf.data.Dataset.from_tensor_slices(img_files)
127
-
128
- if sample_weights is None:
129
- return tf.data.Dataset.zip((img_ds, labels_ds, img_files_ds))
130
-
131
- # weights dataset
132
- weights_ds = tf.data.Dataset.from_tensor_slices(sample_weights)
133
- return tf.data.Dataset.zip((img_ds, labels_ds, img_files_ds, weights_ds))
134
-
135
-
136
- def create_dataloaders(
137
- dataset_csv_path: str,
138
- label_index_json_path: str,
139
- splits_json_path: str,
140
- cropped_images_dir: str,
141
- img_size: int,
142
- multilabel: bool,
143
- label_weighted: bool,
144
- weight_by_detection_conf: bool | str,
145
- batch_size: int,
146
- augment_train: bool,
147
- cache_splits: Sequence[str]
148
- ) -> tuple[dict[str, tf.data.Dataset], list[str]]:
149
- """
150
- Args:
151
- dataset_csv_path: str, path to CSV file with columns
152
- ['dataset', 'location', 'label'], where label is a comma-delimited
153
- list of labels
154
- splits_json_path: str, path to JSON file
155
- augment_train: bool, whether to shuffle/augment the training set
156
- cache_splits: list of str, splits to cache
157
- training set is cached at /mnt/tempds/random_file_name
158
- validation and test sets are cached in memory
159
-
160
- Returns:
161
- datasets: dict, maps split to DataLoader
162
- label_names: list of str, label names in order of label id
163
- """
164
-
165
- df, label_names, split_to_locs = load_dataset_csv(
166
- dataset_csv_path, label_index_json_path, splits_json_path,
167
- multilabel=multilabel, label_weighted=label_weighted,
168
- weight_by_detection_conf=weight_by_detection_conf)
169
-
170
- # define the transforms
171
-
172
- # efficientnet data preprocessing:
173
- # - train:
174
- # 1) random crop: aspect_ratio_range=(0.75, 1.33), area_range=(0.08, 1.0)
175
- # 2) bicubic resize to img_size
176
- # 3) random horizontal flip
177
- # - test:
178
- # 1) center crop
179
- # 2) bicubic resize to img_size
180
-
181
- @tf.function
182
- def train_transform(img: tf.Tensor) -> tf.Tensor:
183
- """Returns: tf.Tensor, shape [img_size, img_size, C], type float32"""
184
- img = tf.image.resize_with_pad(img, img_size, img_size,
185
- method=tf.image.ResizeMethod.BICUBIC)
186
- img = tf.image.random_flip_left_right(img)
187
- img = tf.image.random_brightness(img, max_delta=0.25)
188
- img = tf.image.random_contrast(img, lower=0.75, upper=1.25)
189
- img = tf.image.random_saturation(img, lower=0.75, upper=1.25)
190
- return img
191
-
192
- @tf.function
193
- def test_transform(img: tf.Tensor) -> tf.Tensor:
194
- """Returns: tf.Tensor, shape [img_size, img_size, C], type float32"""
195
- img = tf.image.resize_with_pad(img, img_size, img_size,
196
- method=tf.image.ResizeMethod.BICUBIC)
197
- return img
198
-
199
- dataloaders = {}
200
- for split, locs in split_to_locs.items():
201
- is_train = (split == 'train') and augment_train
202
- split_df = df[df['dataset_location'].isin(locs)]
203
-
204
- weights = None
205
- if label_weighted or weight_by_detection_conf:
206
- # weights sums to:
207
- # - if weight_by_detection_conf: (# images in split - conf delta)
208
- # - otherwise: (# images in split)
209
- weights = split_df['weights'].tolist()
210
- if not weight_by_detection_conf:
211
- assert np.isclose(sum(weights), len(split_df))
212
-
213
- cache: bool | str = (split in cache_splits)
214
- if split == 'train' and 'train' in cache_splits:
215
- unique_filename = str(uuid.uuid4())
216
- os.makedirs('/mnt/tempds/', exist_ok=True)
217
- cache = f'/mnt/tempds/{unique_filename}'
218
-
219
- ds = create_dataset(
220
- img_files=split_df['path'].tolist(),
221
- labels=split_df['label_index'].tolist(),
222
- sample_weights=weights,
223
- img_base_dir=cropped_images_dir,
224
- transform=train_transform if is_train else test_transform,
225
- target_transform=None,
226
- cache=cache)
227
- if is_train:
228
- ds = ds.shuffle(1000, reshuffle_each_iteration=True)
229
- ds = ds.batch(batch_size).prefetch(buffer_size=AUTOTUNE)
230
- dataloaders[split] = ds
231
-
232
- return dataloaders, label_names
233
-
234
-
235
- def build_model(model_name: str, num_classes: int, img_size: int,
236
- pretrained: bool, finetune: bool) -> tf.keras.Model:
237
- """
238
- Creates a model with an EfficientNet base.
239
- """
240
-
241
- class_name = EFFICIENTNET_MODELS[model_name]['cls']
242
- dropout = EFFICIENTNET_MODELS[model_name]['dropout']
243
-
244
- model_class = tf.keras.applications.__dict__[class_name]
245
- weights = 'imagenet' if pretrained else None
246
- inputs = tf.keras.layers.Input(shape=(img_size, img_size, 3))
247
- base_model = model_class(
248
- input_tensor=inputs, weights=weights, include_top=False, pooling='avg')
249
-
250
- if finetune:
251
- # freeze the base model's weights, including BatchNorm statistics
252
- # https://www.tensorflow.org/guide/keras/transfer_learning#fine-tuning
253
- base_model.trainable = False
254
-
255
- # rebuild output
256
- x = tf.keras.layers.Dropout(dropout, name='top_dropout')(base_model.output)
257
- outputs = tf.keras.layers.Dense(
258
- num_classes,
259
- kernel_initializer=tf.keras.initializers.VarianceScaling(
260
- scale=1. / 3., mode='fan_out', distribution='uniform'),
261
- name='logits')(x)
262
- model = tf.keras.Model(inputs, outputs, name='complete_model')
263
- model.base_model = base_model # cache this so that we can turn off finetune
264
- return model
265
-
266
-
267
- def log_images_with_confidence(
268
- heap_dict: Mapping[int, list[HeapItem]],
269
- label_names: Sequence[str],
270
- epoch: int,
271
- tag: str) -> None:
272
- """
273
- Args:
274
- heap_dict: dict, maps label_id to list of HeapItem, where each HeapItem
275
- data is a list [img, target, top3_conf, top3_preds, img_file],
276
- and img is a tf.Tensor of shape [H, W, 3]
277
- label_names: list of str, label names in order of label id
278
- epoch: int
279
- tag: str
280
- """
281
-
282
- for label_id, heap in heap_dict.items():
283
- label_name = label_names[label_id]
284
-
285
- sorted_heap = sorted(heap, reverse=True) # sort largest to smallest
286
- imgs_list = [item.data for item in sorted_heap]
287
- fig, img_files = imgs_with_confidences(imgs_list, label_names)
288
-
289
- # tf.summary.image requires input of shape [N, H, W, C]
290
- fig_img = tf.convert_to_tensor(fig_to_img(fig)[np.newaxis, ...])
291
- tf.summary.image(f'{label_name}/{tag}', fig_img, step=epoch)
292
- tf.summary.text(f'{label_name}/{tag}_files', '\n\n'.join(img_files),
293
- step=epoch)
294
-
295
-
296
- def track_extreme_examples(tp_heaps: dict[int, list[HeapItem]],
297
- fp_heaps: dict[int, list[HeapItem]],
298
- fn_heaps: dict[int, list[HeapItem]],
299
- inputs: tf.Tensor,
300
- labels: tf.Tensor,
301
- img_files: tf.Tensor,
302
- logits: tf.Tensor) -> None:
303
- """
304
- Updates the 5 most extreme true-positive (tp), false-positive (fp), and
305
- false-negative (fn) examples with examples from this batch.
306
-
307
- Each HeapItem's data attribute is a tuple with:
308
- - img: np.ndarray, shape [H, W, 3], type uint8
309
- - label: int
310
- - top3_conf: list of float
311
- - top3_preds: list of float
312
- - img_file: str
313
-
314
- Args:
315
- *_heaps: dict, maps label_id (int) to heap of HeapItems
316
- inputs: tf.Tensor, shape [batch_size, H, W, 3], type float32
317
- labels: tf.Tensor, shape [batch_size]
318
- img_files: tf.Tensor, shape [batch_size], type tf.string
319
- logits: tf.Tensor, shape [batch_size, num_classes]
320
- """
321
-
322
- labels = labels.numpy().tolist()
323
- inputs = inputs.numpy().astype(np.uint8)
324
- img_files = img_files.numpy().astype(str).tolist()
325
- batch_probs = tf.nn.softmax(logits, axis=1)
326
- iterable = zip(labels, inputs, img_files, batch_probs)
327
- for label, img, img_file, confs in iterable:
328
- label_conf = confs[label].numpy().item()
329
-
330
- top3_conf, top3_preds = tf.math.top_k(confs, k=3, sorted=True)
331
- top3_conf = top3_conf.numpy().tolist()
332
- top3_preds = top3_preds.numpy().tolist()
333
-
334
- data = (img, label, top3_conf, top3_preds, img_file)
335
- if top3_preds[0] == label: # true positive
336
- item = HeapItem(priority=label_conf - top3_conf[1], data=data)
337
- add_to_heap(tp_heaps[label], item, k=5)
338
- else:
339
- # false positive for top3_pred[0]
340
- # false negative for label
341
- item = HeapItem(priority=top3_conf[0] - label_conf, data=data)
342
- add_to_heap(fp_heaps[top3_preds[0]], item, k=5)
343
- add_to_heap(fn_heaps[label], item, k=5)
344
-
345
-
346
- def run_epoch(model: tf.keras.Model,
347
- loader: tf.data.Dataset,
348
- weighted: bool,
349
- top: Sequence[int] = (1, 3),
350
- loss_fn: Optional[tf.keras.losses.Loss] = None,
351
- weight_decay: float = 0,
352
- finetune: bool = False,
353
- optimizer: Optional[tf.keras.optimizers.Optimizer] = None,
354
- return_extreme_images: bool = False
355
- ) -> tuple[
356
- dict[str, float],
357
- dict[str, dict[int, list[HeapItem]]],
358
- np.ndarray
359
- ]:
360
- """
361
- Runs for 1 epoch.
362
-
363
- Args:
364
- model: tf.keras.Model
365
- loader: tf.data.Dataset
366
- weighted: bool, whether to use sample weights in calculating loss and
367
- accuracy
368
- top: tuple of int, list of values of k for calculating top-K accuracy
369
- loss_fn: optional loss function, calculates the mean loss over a batch
370
- weight_decay: float, L2-regularization constant
371
- finetune: bool, if true sets model's dropout and BN layers to eval mode
372
- optimizer: optional optimizer
373
-
374
- Returns:
375
- metrics: dict, metrics from epoch, contains keys:
376
- 'loss': float, mean per-example loss over entire epoch,
377
- only included if loss_fn is not None
378
- 'acc_top{k}': float, accuracy@k over the entire epoch
379
- heaps: dict, keys are ['tp', 'fp', 'fn'], values are heap_dicts,
380
- each heap_dict maps label_id (int) to a heap of <= 5 HeapItems with
381
- data attribute (img, target, top3_conf, top3_preds, img_file)
382
- - 'tp': priority is the difference between target confidence and
383
- 2nd highest confidence
384
- - 'fp': priority is the difference between highest confidence and
385
- target confidence
386
- - 'fn': same as 'fp'
387
- confusion_matrix: np.ndarray, shape [num_classes, num_classes],
388
- C[i, j] = # of samples with true label i, predicted as label j
389
- """
390
- # if evaluating or finetuning, set dropout & BN layers to eval mode
391
- is_train = False
392
- train_dropout_and_bn = False
393
-
394
- if optimizer is not None:
395
- assert loss_fn is not None
396
- is_train = True
397
-
398
- if not finetune:
399
- train_dropout_and_bn = True
400
- reg_vars = [
401
- v for v in model.trainable_variables if 'kernel' in v.name]
402
-
403
- if loss_fn is not None:
404
- losses = tf.keras.metrics.Mean()
405
- accuracies_topk = {
406
- k: tf.keras.metrics.SparseTopKCategoricalAccuracy(k) for k in top
407
- }
408
-
409
- # for each label, track 5 most-confident and least-confident examples
410
- tp_heaps: dict[int, list[HeapItem]] = defaultdict(list)
411
- fp_heaps: dict[int, list[HeapItem]] = defaultdict(list)
412
- fn_heaps: dict[int, list[HeapItem]] = defaultdict(list)
413
-
414
- all_labels = []
415
- all_preds = []
416
-
417
- tqdm_loader = tqdm.tqdm(loader)
418
- for batch in tqdm_loader:
419
- if weighted:
420
- inputs, labels, img_files, weights = batch
421
- else:
422
- # even if batch contains sample weights, don't use them
423
- inputs, labels, img_files = batch[0:3]
424
- weights = None
425
-
426
- all_labels.append(labels.numpy())
427
- desc = []
428
- with tf.GradientTape(watch_accessed_variables=is_train) as tape:
429
- outputs = model(inputs, training=train_dropout_and_bn)
430
- if loss_fn is not None:
431
- loss = loss_fn(labels, outputs)
432
- if weights is not None:
433
- loss *= weights
434
- # we do not track L2-regularization loss in the loss metric
435
- losses.update_state(loss, sample_weight=weights)
436
- desc.append(f'Loss {losses.result().numpy():.4f}')
437
-
438
- if optimizer is not None:
439
- loss = tf.math.reduce_mean(loss)
440
- if not finetune: # only regularize layers before the final FC
441
- loss += weight_decay * tf.add_n(
442
- tf.nn.l2_loss(v) for v in reg_vars)
443
-
444
- all_preds.append(tf.math.argmax(outputs, axis=1).numpy())
445
-
446
- if optimizer is not None:
447
- gradients = tape.gradient(loss, model.trainable_variables)
448
- optimizer.apply_gradients(zip(gradients, model.trainable_variables))
449
-
450
- for k, acc in accuracies_topk.items():
451
- acc.update_state(labels, outputs, sample_weight=weights)
452
- desc.append(f'Acc@{k} {acc.result().numpy() * 100:.3f}')
453
- tqdm_loader.set_description(' '.join(desc))
454
-
455
- if return_extreme_images:
456
- track_extreme_examples(tp_heaps, fp_heaps, fn_heaps, inputs,
457
- labels, img_files, outputs)
458
-
459
- confusion_matrix = sklearn.metrics.confusion_matrix(
460
- y_true=np.concatenate(all_labels), y_pred=np.concatenate(all_preds))
461
-
462
- metrics = {}
463
- if loss_fn is not None:
464
- metrics['loss'] = losses.result().numpy().item()
465
- for k, acc in accuracies_topk.items():
466
- metrics[f'acc_top{k}'] = acc.result().numpy().item() * 100
467
- heaps = {'tp': tp_heaps, 'fp': fp_heaps, 'fn': fn_heaps}
468
- return metrics, heaps, confusion_matrix
469
-
470
-
471
- def log_run(split: str, epoch: int, writer: tf.summary.SummaryWriter,
472
- label_names: Sequence[str], metrics: MutableMapping[str, float],
473
- heaps: Mapping[str, Mapping[int, list[HeapItem]]], cm: np.ndarray
474
- ) -> None:
475
- """
476
- Logs the outputs (metrics, confusion matrix, tp/fp/fn images) from a
477
- single epoch run to Tensorboard.
478
-
479
- Args:
480
- metrics: dict, keys already prefixed with {split}/
481
- """
482
-
483
- per_class_recall = recall_from_confusion_matrix(cm, label_names)
484
- metrics.update(prefix_all_keys(per_class_recall, f'{split}/label_recall/'))
485
-
486
- # log metrics
487
- for metric, value in metrics.items():
488
- tf.summary.scalar(metric, value, epoch)
489
-
490
- # log confusion matrix
491
- cm_fig = plot_utils.plot_confusion_matrix(cm, classes=label_names,
492
- normalize=True)
493
- cm_fig_img = tf.convert_to_tensor(fig_to_img(cm_fig)[np.newaxis, ...])
494
- tf.summary.image(f'confusion_matrix/{split}', cm_fig_img, step=epoch)
495
-
496
- # log tp/fp/fn images
497
- for heap_type, heap_dict in heaps.items():
498
- log_images_with_confidence(heap_dict, label_names, epoch=epoch,
499
- tag=f'{split}/{heap_type}')
500
- writer.flush()
501
-
502
-
503
- #%% Main function
504
-
505
- def main(dataset_dir: str,
506
- cropped_images_dir: str,
507
- multilabel: bool,
508
- model_name: str,
509
- pretrained: bool,
510
- finetune: int,
511
- label_weighted: bool,
512
- weight_by_detection_conf: bool | str,
513
- epochs: int,
514
- batch_size: int,
515
- lr: float,
516
- weight_decay: float,
517
- seed: Optional[int] = None,
518
- logdir: str = '',
519
- cache_splits: Sequence[str] = ()) -> None:
520
-
521
- # input validation
522
- assert os.path.exists(dataset_dir)
523
- assert os.path.exists(cropped_images_dir)
524
- if isinstance(weight_by_detection_conf, str):
525
- assert os.path.exists(weight_by_detection_conf)
526
-
527
- # set seed
528
- seed = np.random.randint(10_000) if seed is None else seed
529
- np.random.seed(seed)
530
- tf.random.set_seed(seed)
531
-
532
- # create logdir and save params
533
- params = dict(locals()) # make a copy
534
- timestamp = datetime.now().strftime('%Y%m%d_%H%M%S') # '20200722_110816'
535
- logdir = os.path.join(logdir, timestamp)
536
- os.makedirs(logdir, exist_ok=True)
537
- print('Created logdir:', logdir)
538
- with open(os.path.join(logdir, 'params.json'), 'w') as f:
539
- json.dump(params, f, indent=1)
540
-
541
- gpus = tf.config.experimental.list_physical_devices('GPU')
542
- for gpu in gpus:
543
- tf.config.experimental.set_memory_growth(gpu, True)
544
-
545
- img_size = EFFICIENTNET_MODELS[model_name]['img_size']
546
-
547
- # create dataloaders and log the index_to_label mapping
548
- loaders, label_names = create_dataloaders(
549
- dataset_csv_path=os.path.join(dataset_dir, 'classification_ds.csv'),
550
- label_index_json_path=os.path.join(dataset_dir, 'label_index.json'),
551
- splits_json_path=os.path.join(dataset_dir, 'splits.json'),
552
- cropped_images_dir=cropped_images_dir,
553
- img_size=img_size,
554
- multilabel=multilabel,
555
- label_weighted=label_weighted,
556
- weight_by_detection_conf=weight_by_detection_conf,
557
- batch_size=batch_size,
558
- augment_train=True,
559
- cache_splits=cache_splits)
560
-
561
- writer = tf.summary.create_file_writer(logdir)
562
- writer.set_as_default()
563
-
564
- model = build_model(
565
- model_name, num_classes=len(label_names), img_size=img_size,
566
- pretrained=pretrained, finetune=finetune > 0)
567
-
568
- # define loss function and optimizer
569
- loss_fn: tf.keras.losses.Loss
570
- if multilabel:
571
- loss_fn = tf.keras.losses.BinaryCrossentropy(
572
- from_logits=True, reduction=tf.keras.losses.Reduction.NONE)
573
- else:
574
- loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(
575
- from_logits=True, reduction=tf.keras.losses.Reduction.NONE)
576
-
577
- # using EfficientNet training defaults
578
- # - batch norm momentum: 0.99
579
- # - optimizer: RMSProp, decay 0.9 and momentum 0.9
580
- # - epochs: 350
581
- # - learning rate: 0.256, decays by 0.97 every 2.4 epochs
582
- # - weight decay: 1e-5
583
- lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(
584
- lr, decay_steps=1, decay_rate=0.97, staircase=True)
585
- optimizer = tf.keras.optimizers.RMSprop(
586
- learning_rate=lr, rho=0.9, momentum=0.9)
587
-
588
- best_epoch_metrics: dict[str, float] = {}
589
- for epoch in range(epochs):
590
- print(f'Epoch: {epoch}')
591
- optimizer.learning_rate = lr_schedule(epoch)
592
- tf.summary.scalar('lr', optimizer.learning_rate, epoch)
593
-
594
- if epoch > 0 and finetune == epoch:
595
- print('Turning off fine-tune!')
596
- model.base_model.trainable = True
597
-
598
- print('- train:')
599
-
600
- train_metrics, train_heaps, train_cm = run_epoch(
601
- model, loader=loaders['train'], weighted=label_weighted,
602
- loss_fn=loss_fn, weight_decay=weight_decay, optimizer=optimizer,
603
- finetune=finetune > epoch, return_extreme_images=True)
604
- train_metrics = prefix_all_keys(train_metrics, prefix='train/')
605
- log_run('train', epoch, writer, label_names,
606
- metrics=train_metrics, heaps=train_heaps, cm=train_cm)
607
-
608
- print('- val:')
609
- val_metrics, val_heaps, val_cm = run_epoch(
610
- model, loader=loaders['val'], weighted=label_weighted,
611
- loss_fn=loss_fn, return_extreme_images=True)
612
- val_metrics = prefix_all_keys(val_metrics, prefix='val/')
613
- log_run('val', epoch, writer, label_names,
614
- metrics=val_metrics, heaps=val_heaps, cm=val_cm)
615
-
616
- if val_metrics['val/acc_top1'] > best_epoch_metrics.get('val/acc_top1', 0): # pylint: disable=line-too-long
617
- filename = os.path.join(logdir, f'ckpt_{epoch}.h5')
618
- print(f'New best model! Saving checkpoint to {filename}')
619
- model.save(filename)
620
- best_epoch_metrics.update(train_metrics)
621
- best_epoch_metrics.update(val_metrics)
622
- best_epoch_metrics['epoch'] = epoch
623
-
624
- print('- test:')
625
- test_metrics, test_heaps, test_cm = run_epoch(
626
- model, loader=loaders['test'], weighted=label_weighted,
627
- loss_fn=loss_fn, return_extreme_images=True)
628
- test_metrics = prefix_all_keys(test_metrics, prefix='test/')
629
- log_run('test', epoch, writer, label_names,
630
- metrics=test_metrics, heaps=test_heaps, cm=test_cm)
631
-
632
- # stop training after 8 epochs without improvement
633
- if epoch >= best_epoch_metrics['epoch'] + 8:
634
- break
635
-
636
- hparams_dict = {
637
- 'model_name': model_name,
638
- 'multilabel': multilabel,
639
- 'finetune': finetune,
640
- 'batch_size': batch_size,
641
- 'epochs': epochs
642
- }
643
- hp.hparams(hparams_dict)
644
- writer.close()
645
-
646
-
647
- #%% Command-line driver
648
-
649
- def _parse_args() -> argparse.Namespace:
650
- """Parses arguments."""
651
- parser = argparse.ArgumentParser(
652
- formatter_class=argparse.ArgumentDefaultsHelpFormatter,
653
- description='Trains classifier.')
654
- parser.add_argument(
655
- 'dataset_dir',
656
- help='path to directory containing: 1) classification dataset CSV, '
657
- '2) label index JSON, 3) splits JSON')
658
- parser.add_argument(
659
- 'cropped_images_dir',
660
- help='path to local directory where image crops are saved')
661
- parser.add_argument(
662
- '--multilabel', action='store_true',
663
- help='for multi-label, multi-class classification')
664
- parser.add_argument(
665
- '-m', '--model-name', default='efficientnet-b0',
666
- choices=list(EFFICIENTNET_MODELS.keys()),
667
- help='which EfficientNet model')
668
- parser.add_argument(
669
- '--pretrained', action='store_true',
670
- help='start with pretrained model')
671
- parser.add_argument(
672
- '--finetune', type=int, default=0,
673
- help='only fine tune the final fully-connected layer for the first '
674
- '<finetune> epochs')
675
- parser.add_argument(
676
- '--label-weighted', action='store_true',
677
- help='weight training samples to balance labels')
678
- parser.add_argument(
679
- '--weight-by-detection-conf', nargs='?', const=True, default=False,
680
- help='weight training examples by detection confidence. '
681
- 'Optionally takes a .npz file for isotonic calibration.')
682
- parser.add_argument(
683
- '--epochs', type=int, default=0,
684
- help='number of epochs for training, 0 for eval-only')
685
- parser.add_argument(
686
- '--batch-size', type=int, default=256,
687
- help='batch size for both training and eval')
688
- parser.add_argument(
689
- '--lr', type=float, default=None,
690
- help='initial learning rate, defaults to (0.016 * batch_size / 256)')
691
- parser.add_argument(
692
- '--weight-decay', type=float, default=1e-5,
693
- help='weight decay')
694
- parser.add_argument(
695
- '--seed', type=int,
696
- help='random seed')
697
- parser.add_argument(
698
- '--logdir', default='.',
699
- help='directory where TensorBoard logs and a params file are saved')
700
- parser.add_argument(
701
- '--cache', nargs='*', choices=['train', 'val', 'test'], default=(),
702
- help='which splits of the dataset to cache')
703
- return parser.parse_args()
704
-
705
-
706
- if __name__ == '__main__':
707
- args = _parse_args()
708
- if args.lr is None:
709
- args.lr = 0.016 * args.batch_size / 256 # based on TF models repo
710
- main(dataset_dir=args.dataset_dir,
711
- cropped_images_dir=args.cropped_images_dir,
712
- multilabel=args.multilabel,
713
- model_name=args.model_name,
714
- pretrained=args.pretrained,
715
- finetune=args.finetune,
716
- label_weighted=args.label_weighted,
717
- weight_by_detection_conf=args.weight_by_detection_conf,
718
- epochs=args.epochs,
719
- batch_size=args.batch_size,
720
- lr=args.lr,
721
- weight_decay=args.weight_decay,
722
- seed=args.seed,
723
- logdir=args.logdir,
724
- cache_splits=args.cache)