megadetector 5.0.9__py3-none-any.whl → 5.0.11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of megadetector might be problematic. Click here for more details.

Files changed (226) hide show
  1. {megadetector-5.0.9.dist-info → megadetector-5.0.11.dist-info}/LICENSE +0 -0
  2. {megadetector-5.0.9.dist-info → megadetector-5.0.11.dist-info}/METADATA +12 -11
  3. megadetector-5.0.11.dist-info/RECORD +5 -0
  4. megadetector-5.0.11.dist-info/top_level.txt +1 -0
  5. api/__init__.py +0 -0
  6. api/batch_processing/__init__.py +0 -0
  7. api/batch_processing/api_core/__init__.py +0 -0
  8. api/batch_processing/api_core/batch_service/__init__.py +0 -0
  9. api/batch_processing/api_core/batch_service/score.py +0 -439
  10. api/batch_processing/api_core/server.py +0 -294
  11. api/batch_processing/api_core/server_api_config.py +0 -98
  12. api/batch_processing/api_core/server_app_config.py +0 -55
  13. api/batch_processing/api_core/server_batch_job_manager.py +0 -220
  14. api/batch_processing/api_core/server_job_status_table.py +0 -152
  15. api/batch_processing/api_core/server_orchestration.py +0 -360
  16. api/batch_processing/api_core/server_utils.py +0 -92
  17. api/batch_processing/api_core_support/__init__.py +0 -0
  18. api/batch_processing/api_core_support/aggregate_results_manually.py +0 -46
  19. api/batch_processing/api_support/__init__.py +0 -0
  20. api/batch_processing/api_support/summarize_daily_activity.py +0 -152
  21. api/batch_processing/data_preparation/__init__.py +0 -0
  22. api/batch_processing/data_preparation/manage_local_batch.py +0 -2391
  23. api/batch_processing/data_preparation/manage_video_batch.py +0 -327
  24. api/batch_processing/integration/digiKam/setup.py +0 -6
  25. api/batch_processing/integration/digiKam/xmp_integration.py +0 -465
  26. api/batch_processing/integration/eMammal/test_scripts/config_template.py +0 -5
  27. api/batch_processing/integration/eMammal/test_scripts/push_annotations_to_emammal.py +0 -126
  28. api/batch_processing/integration/eMammal/test_scripts/select_images_for_testing.py +0 -55
  29. api/batch_processing/postprocessing/__init__.py +0 -0
  30. api/batch_processing/postprocessing/add_max_conf.py +0 -64
  31. api/batch_processing/postprocessing/categorize_detections_by_size.py +0 -163
  32. api/batch_processing/postprocessing/combine_api_outputs.py +0 -249
  33. api/batch_processing/postprocessing/compare_batch_results.py +0 -958
  34. api/batch_processing/postprocessing/convert_output_format.py +0 -397
  35. api/batch_processing/postprocessing/load_api_results.py +0 -195
  36. api/batch_processing/postprocessing/md_to_coco.py +0 -310
  37. api/batch_processing/postprocessing/md_to_labelme.py +0 -330
  38. api/batch_processing/postprocessing/merge_detections.py +0 -401
  39. api/batch_processing/postprocessing/postprocess_batch_results.py +0 -1904
  40. api/batch_processing/postprocessing/remap_detection_categories.py +0 -170
  41. api/batch_processing/postprocessing/render_detection_confusion_matrix.py +0 -661
  42. api/batch_processing/postprocessing/repeat_detection_elimination/find_repeat_detections.py +0 -211
  43. api/batch_processing/postprocessing/repeat_detection_elimination/remove_repeat_detections.py +0 -82
  44. api/batch_processing/postprocessing/repeat_detection_elimination/repeat_detections_core.py +0 -1631
  45. api/batch_processing/postprocessing/separate_detections_into_folders.py +0 -731
  46. api/batch_processing/postprocessing/subset_json_detector_output.py +0 -696
  47. api/batch_processing/postprocessing/top_folders_to_bottom.py +0 -223
  48. api/synchronous/__init__.py +0 -0
  49. api/synchronous/api_core/animal_detection_api/__init__.py +0 -0
  50. api/synchronous/api_core/animal_detection_api/api_backend.py +0 -152
  51. api/synchronous/api_core/animal_detection_api/api_frontend.py +0 -266
  52. api/synchronous/api_core/animal_detection_api/config.py +0 -35
  53. api/synchronous/api_core/animal_detection_api/data_management/annotations/annotation_constants.py +0 -47
  54. api/synchronous/api_core/animal_detection_api/detection/detector_training/copy_checkpoints.py +0 -43
  55. api/synchronous/api_core/animal_detection_api/detection/detector_training/model_main_tf2.py +0 -114
  56. api/synchronous/api_core/animal_detection_api/detection/process_video.py +0 -543
  57. api/synchronous/api_core/animal_detection_api/detection/pytorch_detector.py +0 -304
  58. api/synchronous/api_core/animal_detection_api/detection/run_detector.py +0 -627
  59. api/synchronous/api_core/animal_detection_api/detection/run_detector_batch.py +0 -1029
  60. api/synchronous/api_core/animal_detection_api/detection/run_inference_with_yolov5_val.py +0 -581
  61. api/synchronous/api_core/animal_detection_api/detection/run_tiled_inference.py +0 -754
  62. api/synchronous/api_core/animal_detection_api/detection/tf_detector.py +0 -165
  63. api/synchronous/api_core/animal_detection_api/detection/video_utils.py +0 -495
  64. api/synchronous/api_core/animal_detection_api/md_utils/azure_utils.py +0 -174
  65. api/synchronous/api_core/animal_detection_api/md_utils/ct_utils.py +0 -262
  66. api/synchronous/api_core/animal_detection_api/md_utils/directory_listing.py +0 -251
  67. api/synchronous/api_core/animal_detection_api/md_utils/matlab_porting_tools.py +0 -97
  68. api/synchronous/api_core/animal_detection_api/md_utils/path_utils.py +0 -416
  69. api/synchronous/api_core/animal_detection_api/md_utils/process_utils.py +0 -110
  70. api/synchronous/api_core/animal_detection_api/md_utils/sas_blob_utils.py +0 -509
  71. api/synchronous/api_core/animal_detection_api/md_utils/string_utils.py +0 -59
  72. api/synchronous/api_core/animal_detection_api/md_utils/url_utils.py +0 -144
  73. api/synchronous/api_core/animal_detection_api/md_utils/write_html_image_list.py +0 -226
  74. api/synchronous/api_core/animal_detection_api/md_visualization/visualization_utils.py +0 -841
  75. api/synchronous/api_core/tests/__init__.py +0 -0
  76. api/synchronous/api_core/tests/load_test.py +0 -110
  77. classification/__init__.py +0 -0
  78. classification/aggregate_classifier_probs.py +0 -108
  79. classification/analyze_failed_images.py +0 -227
  80. classification/cache_batchapi_outputs.py +0 -198
  81. classification/create_classification_dataset.py +0 -627
  82. classification/crop_detections.py +0 -516
  83. classification/csv_to_json.py +0 -226
  84. classification/detect_and_crop.py +0 -855
  85. classification/efficientnet/__init__.py +0 -9
  86. classification/efficientnet/model.py +0 -415
  87. classification/efficientnet/utils.py +0 -610
  88. classification/evaluate_model.py +0 -520
  89. classification/identify_mislabeled_candidates.py +0 -152
  90. classification/json_to_azcopy_list.py +0 -63
  91. classification/json_validator.py +0 -695
  92. classification/map_classification_categories.py +0 -276
  93. classification/merge_classification_detection_output.py +0 -506
  94. classification/prepare_classification_script.py +0 -194
  95. classification/prepare_classification_script_mc.py +0 -228
  96. classification/run_classifier.py +0 -286
  97. classification/save_mislabeled.py +0 -110
  98. classification/train_classifier.py +0 -825
  99. classification/train_classifier_tf.py +0 -724
  100. classification/train_utils.py +0 -322
  101. data_management/__init__.py +0 -0
  102. data_management/annotations/__init__.py +0 -0
  103. data_management/annotations/annotation_constants.py +0 -34
  104. data_management/camtrap_dp_to_coco.py +0 -238
  105. data_management/cct_json_utils.py +0 -395
  106. data_management/cct_to_md.py +0 -176
  107. data_management/cct_to_wi.py +0 -289
  108. data_management/coco_to_labelme.py +0 -272
  109. data_management/coco_to_yolo.py +0 -662
  110. data_management/databases/__init__.py +0 -0
  111. data_management/databases/add_width_and_height_to_db.py +0 -33
  112. data_management/databases/combine_coco_camera_traps_files.py +0 -206
  113. data_management/databases/integrity_check_json_db.py +0 -477
  114. data_management/databases/subset_json_db.py +0 -115
  115. data_management/generate_crops_from_cct.py +0 -149
  116. data_management/get_image_sizes.py +0 -188
  117. data_management/importers/add_nacti_sizes.py +0 -52
  118. data_management/importers/add_timestamps_to_icct.py +0 -79
  119. data_management/importers/animl_results_to_md_results.py +0 -158
  120. data_management/importers/auckland_doc_test_to_json.py +0 -372
  121. data_management/importers/auckland_doc_to_json.py +0 -200
  122. data_management/importers/awc_to_json.py +0 -189
  123. data_management/importers/bellevue_to_json.py +0 -273
  124. data_management/importers/cacophony-thermal-importer.py +0 -796
  125. data_management/importers/carrizo_shrubfree_2018.py +0 -268
  126. data_management/importers/carrizo_trail_cam_2017.py +0 -287
  127. data_management/importers/cct_field_adjustments.py +0 -57
  128. data_management/importers/channel_islands_to_cct.py +0 -913
  129. data_management/importers/eMammal/copy_and_unzip_emammal.py +0 -180
  130. data_management/importers/eMammal/eMammal_helpers.py +0 -249
  131. data_management/importers/eMammal/make_eMammal_json.py +0 -223
  132. data_management/importers/ena24_to_json.py +0 -275
  133. data_management/importers/filenames_to_json.py +0 -385
  134. data_management/importers/helena_to_cct.py +0 -282
  135. data_management/importers/idaho-camera-traps.py +0 -1407
  136. data_management/importers/idfg_iwildcam_lila_prep.py +0 -294
  137. data_management/importers/jb_csv_to_json.py +0 -150
  138. data_management/importers/mcgill_to_json.py +0 -250
  139. data_management/importers/missouri_to_json.py +0 -489
  140. data_management/importers/nacti_fieldname_adjustments.py +0 -79
  141. data_management/importers/noaa_seals_2019.py +0 -181
  142. data_management/importers/pc_to_json.py +0 -365
  143. data_management/importers/plot_wni_giraffes.py +0 -123
  144. data_management/importers/prepare-noaa-fish-data-for-lila.py +0 -359
  145. data_management/importers/prepare_zsl_imerit.py +0 -131
  146. data_management/importers/rspb_to_json.py +0 -356
  147. data_management/importers/save_the_elephants_survey_A.py +0 -320
  148. data_management/importers/save_the_elephants_survey_B.py +0 -332
  149. data_management/importers/snapshot_safari_importer.py +0 -758
  150. data_management/importers/snapshot_safari_importer_reprise.py +0 -665
  151. data_management/importers/snapshot_serengeti_lila.py +0 -1067
  152. data_management/importers/snapshotserengeti/make_full_SS_json.py +0 -150
  153. data_management/importers/snapshotserengeti/make_per_season_SS_json.py +0 -153
  154. data_management/importers/sulross_get_exif.py +0 -65
  155. data_management/importers/timelapse_csv_set_to_json.py +0 -490
  156. data_management/importers/ubc_to_json.py +0 -399
  157. data_management/importers/umn_to_json.py +0 -507
  158. data_management/importers/wellington_to_json.py +0 -263
  159. data_management/importers/wi_to_json.py +0 -441
  160. data_management/importers/zamba_results_to_md_results.py +0 -181
  161. data_management/labelme_to_coco.py +0 -548
  162. data_management/labelme_to_yolo.py +0 -272
  163. data_management/lila/__init__.py +0 -0
  164. data_management/lila/add_locations_to_island_camera_traps.py +0 -97
  165. data_management/lila/add_locations_to_nacti.py +0 -147
  166. data_management/lila/create_lila_blank_set.py +0 -557
  167. data_management/lila/create_lila_test_set.py +0 -151
  168. data_management/lila/create_links_to_md_results_files.py +0 -106
  169. data_management/lila/download_lila_subset.py +0 -177
  170. data_management/lila/generate_lila_per_image_labels.py +0 -515
  171. data_management/lila/get_lila_annotation_counts.py +0 -170
  172. data_management/lila/get_lila_image_counts.py +0 -111
  173. data_management/lila/lila_common.py +0 -300
  174. data_management/lila/test_lila_metadata_urls.py +0 -132
  175. data_management/ocr_tools.py +0 -874
  176. data_management/read_exif.py +0 -681
  177. data_management/remap_coco_categories.py +0 -84
  178. data_management/remove_exif.py +0 -66
  179. data_management/resize_coco_dataset.py +0 -189
  180. data_management/wi_download_csv_to_coco.py +0 -246
  181. data_management/yolo_output_to_md_output.py +0 -441
  182. data_management/yolo_to_coco.py +0 -676
  183. detection/__init__.py +0 -0
  184. detection/detector_training/__init__.py +0 -0
  185. detection/detector_training/model_main_tf2.py +0 -114
  186. detection/process_video.py +0 -703
  187. detection/pytorch_detector.py +0 -337
  188. detection/run_detector.py +0 -779
  189. detection/run_detector_batch.py +0 -1219
  190. detection/run_inference_with_yolov5_val.py +0 -917
  191. detection/run_tiled_inference.py +0 -935
  192. detection/tf_detector.py +0 -188
  193. detection/video_utils.py +0 -606
  194. docs/source/conf.py +0 -43
  195. md_utils/__init__.py +0 -0
  196. md_utils/azure_utils.py +0 -174
  197. md_utils/ct_utils.py +0 -612
  198. md_utils/directory_listing.py +0 -246
  199. md_utils/md_tests.py +0 -968
  200. md_utils/path_utils.py +0 -1044
  201. md_utils/process_utils.py +0 -157
  202. md_utils/sas_blob_utils.py +0 -509
  203. md_utils/split_locations_into_train_val.py +0 -228
  204. md_utils/string_utils.py +0 -92
  205. md_utils/url_utils.py +0 -323
  206. md_utils/write_html_image_list.py +0 -225
  207. md_visualization/__init__.py +0 -0
  208. md_visualization/plot_utils.py +0 -293
  209. md_visualization/render_images_with_thumbnails.py +0 -275
  210. md_visualization/visualization_utils.py +0 -1537
  211. md_visualization/visualize_db.py +0 -551
  212. md_visualization/visualize_detector_output.py +0 -406
  213. megadetector-5.0.9.dist-info/RECORD +0 -224
  214. megadetector-5.0.9.dist-info/top_level.txt +0 -8
  215. taxonomy_mapping/__init__.py +0 -0
  216. taxonomy_mapping/map_lila_taxonomy_to_wi_taxonomy.py +0 -491
  217. taxonomy_mapping/map_new_lila_datasets.py +0 -154
  218. taxonomy_mapping/prepare_lila_taxonomy_release.py +0 -142
  219. taxonomy_mapping/preview_lila_taxonomy.py +0 -591
  220. taxonomy_mapping/retrieve_sample_image.py +0 -71
  221. taxonomy_mapping/simple_image_download.py +0 -218
  222. taxonomy_mapping/species_lookup.py +0 -834
  223. taxonomy_mapping/taxonomy_csv_checker.py +0 -159
  224. taxonomy_mapping/taxonomy_graph.py +0 -346
  225. taxonomy_mapping/validate_lila_category_mappings.py +0 -83
  226. {megadetector-5.0.9.dist-info → megadetector-5.0.11.dist-info}/WHEEL +0 -0
@@ -1,825 +0,0 @@
1
- """
2
-
3
- train_classifier.py
4
-
5
- Train a EfficientNet or ResNet classifier.
6
-
7
- Currently the implementation of multi-label multi-class classification is
8
- non-functional.
9
-
10
- During training, start tensorboard from within the classification/ directory:
11
- tensorboard --logdir run --bind_all --samples_per_plugin scalars=0,images=0
12
-
13
- """
14
-
15
- #%% Imports and constants
16
-
17
- from __future__ import annotations
18
-
19
- import argparse
20
- from collections import defaultdict
21
- from collections.abc import Callable, Mapping, MutableMapping, Sequence
22
- from datetime import datetime
23
- import json
24
- import os
25
- from typing import Any
26
-
27
- import numpy as np
28
- import PIL.Image
29
- import sklearn.metrics
30
- import torch
31
- from torch.utils import tensorboard
32
- import torchvision as tv
33
- from torchvision.datasets.folder import default_loader
34
- import tqdm
35
-
36
- from classification import efficientnet, evaluate_model
37
- from classification.train_utils import (
38
- HeapItem, recall_from_confusion_matrix, add_to_heap, fig_to_img,
39
- imgs_with_confidences, load_dataset_csv, prefix_all_keys)
40
- from md_visualization import plot_utils
41
-
42
-
43
- #%% Example usage
44
-
45
- """
46
- python train_classifier.py run_idfg /ssd/crops_sq \
47
- -m "efficientnet-b0" --pretrained --finetune --label-weighted \
48
- --epochs 50 --batch-size 512 --lr 1e-4 \
49
- --num-workers 12 --seed 123 \
50
- --logdir run_idfg
51
- """
52
-
53
- # mean/std values from https://pytorch.org/docs/stable/torchvision/models.html
54
- MEANS = np.asarray([0.485, 0.456, 0.406])
55
- STDS = np.asarray([0.229, 0.224, 0.225])
56
-
57
- VALID_MODELS = sorted(
58
- set(efficientnet.VALID_MODELS) |
59
- {'resnet101', 'resnet152', 'resnet18', 'resnet34', 'resnet50'})
60
-
61
-
62
- class AverageMeter:
63
- """Computes and stores the average and current value"""
64
- def __init__(self):
65
- self.reset()
66
-
67
- def reset(self) -> None:
68
- self.val = 0.0
69
- self.avg = 0.0
70
- self.sum = 0.0
71
- self.count = 0
72
-
73
- def update(self, val: float, n: int = 1) -> None:
74
- self.val = val
75
- self.sum += val * n
76
- self.count += n
77
- self.avg = self.sum / self.count
78
-
79
-
80
- class SimpleDataset(torch.utils.data.Dataset):
81
- """A simple dataset that simply returns images and labels."""
82
-
83
- def __init__(self,
84
- img_files: Sequence[str],
85
- labels: Sequence[Any],
86
- sample_weights: Sequence[float] | None = None,
87
- img_base_dir: str = '',
88
- transform: Callable[[PIL.Image.Image], Any] | None = None,
89
- target_transform: Callable[[Any], Any] | None = None):
90
- """Creates a SimpleDataset."""
91
- self.img_files = img_files
92
- self.labels = labels
93
- self.sample_weights = sample_weights
94
- self.img_base_dir = img_base_dir
95
- self.transform = transform
96
- self.target_transform = target_transform
97
-
98
- self.len = len(img_files)
99
- assert len(labels) == self.len
100
- if sample_weights is not None:
101
- assert len(sample_weights) == self.len
102
-
103
- def __getitem__(self, index: int) -> tuple[Any, ...]:
104
- """
105
- Args:
106
- index: int
107
-
108
- Returns: tuple, (sample, target) or (sample, target, sample_weight)
109
- """
110
- img_file = self.img_files[index]
111
- img = default_loader(os.path.join(self.img_base_dir, img_file))
112
- if self.transform is not None:
113
- img = self.transform(img)
114
- target = self.labels[index]
115
- if self.target_transform is not None:
116
- target = self.target_transform(target)
117
- if self.sample_weights is not None:
118
- return img, target, img_file, self.sample_weights[index]
119
- return img, target, img_file
120
-
121
- def __len__(self) -> int:
122
- return self.len
123
-
124
-
125
- def create_dataloaders(
126
- dataset_csv_path: str,
127
- label_index_json_path: str,
128
- splits_json_path: str,
129
- cropped_images_dir: str,
130
- img_size: int,
131
- multilabel: bool,
132
- label_weighted: bool,
133
- weight_by_detection_conf: bool | str,
134
- batch_size: int,
135
- num_workers: int,
136
- augment_train: bool
137
- ) -> tuple[dict[str, torch.utils.data.DataLoader], list[str]]:
138
- """
139
- Args:
140
- dataset_csv_path: str, path to CSV file with columns
141
- ['dataset', 'location', 'label'], where label is a comma-delimited
142
- list of labels
143
- splits_json_path: str, path to JSON file
144
- augment_train: bool, whether to shuffle/augment the training set
145
-
146
- Returns:
147
- datasets: dict, maps split to DataLoader
148
- label_names: list of str, label names in order of label id
149
- """
150
- df, label_names, split_to_locs = load_dataset_csv(
151
- dataset_csv_path, label_index_json_path, splits_json_path,
152
- multilabel=multilabel, label_weighted=label_weighted,
153
- weight_by_detection_conf=weight_by_detection_conf)
154
-
155
- # define the transforms
156
- normalize = tv.transforms.Normalize(mean=MEANS, std=STDS, inplace=True)
157
- train_transform = tv.transforms.Compose([
158
- tv.transforms.RandomResizedCrop(img_size),
159
- tv.transforms.RandomRotation(degrees=(-90, 90)),
160
- tv.transforms.RandomHorizontalFlip(p=0.5),
161
- tv.transforms.RandomVerticalFlip(p=0.1),
162
- tv.transforms.RandomGrayscale(p=0.1),
163
- tv.transforms.ColorJitter(brightness=.25, contrast=.25, saturation=.25),
164
- tv.transforms.ToTensor(),
165
- normalize
166
- ])
167
- test_transform = tv.transforms.Compose([
168
- # resizes smaller edge to img_size
169
- tv.transforms.Resize(img_size, interpolation=PIL.Image.BICUBIC),
170
- tv.transforms.CenterCrop(img_size),
171
- tv.transforms.ToTensor(),
172
- normalize
173
- ])
174
-
175
- dataloaders = {}
176
- for split, locs in split_to_locs.items():
177
- is_train = (split == 'train') and augment_train
178
- split_df = df[df['dataset_location'].isin(locs)]
179
-
180
- sampler: torch.utils.data.Sampler | None = None
181
- weights = None
182
- if label_weighted or weight_by_detection_conf:
183
- # weights sums to:
184
- # - if weight_by_detection_conf: (# images in split - conf delta)
185
- # - otherwise: # images in split
186
- weights = split_df['weights'].to_numpy()
187
- if not weight_by_detection_conf:
188
- assert np.isclose(weights.sum(), len(split_df))
189
- if is_train:
190
- sampler = torch.utils.data.WeightedRandomSampler(
191
- weights, num_samples=len(split_df), replacement=True)
192
- elif is_train:
193
- # for normal (non-weighted) shuffling
194
- sampler = torch.utils.data.SubsetRandomSampler(range(len(split_df)))
195
-
196
- dataset = SimpleDataset(
197
- img_files=split_df['path'].tolist(),
198
- labels=split_df['label_index'].tolist(),
199
- sample_weights=weights,
200
- img_base_dir=cropped_images_dir,
201
- transform=train_transform if is_train else test_transform)
202
- assert len(dataset) > 0
203
- dataloaders[split] = torch.utils.data.DataLoader(
204
- dataset, batch_size=batch_size, sampler=sampler,
205
- num_workers=num_workers, pin_memory=True)
206
-
207
- return dataloaders, label_names
208
-
209
-
210
- def set_finetune(model: torch.nn.Module, model_name: str, finetune: bool
211
- ) -> None:
212
- """Set the 'requires_grad' on each model parameter according to whether or
213
- not we are fine-tuning the model.
214
- """
215
- if finetune:
216
- if 'efficientnet' in model_name:
217
- final_layer = model._fc # pylint: disable=protected-access
218
- else: # torchvision resnet
219
- final_layer = model.fc
220
- assert isinstance(final_layer, torch.nn.Module)
221
-
222
- # set all parameters to not require gradients except final FC layer
223
- model.requires_grad_(False)
224
- for param in final_layer.parameters():
225
- param.requires_grad = True
226
- else:
227
- model.requires_grad_(True)
228
-
229
-
230
- def build_model(model_name: str, num_classes: int, pretrained: bool | str,
231
- finetune: bool) -> torch.nn.Module:
232
- """Creates a model with an EfficientNet or ResNet base. The model outputs
233
- unnormalized logits.
234
-
235
- Args:
236
- model_name: str, name of EfficientNet or Resnet model
237
- num_classes: int, number of classes for output layer
238
- pretrained: bool or str, (bool) whether to initialize to ImageNet
239
- weights, (str) path to checkpoint
240
- finetune: bool, whether to freeze all layers except the final FC layer
241
-
242
- Returns: torch.nn.Module, model loaded on CPU
243
- """
244
- assert model_name in VALID_MODELS
245
-
246
- if 'efficientnet' in model_name:
247
- if pretrained is True:
248
- model = efficientnet.EfficientNet.from_pretrained(
249
- model_name, num_classes=num_classes)
250
- else:
251
- model = efficientnet.EfficientNet.from_name(
252
- model_name, num_classes=num_classes)
253
- else:
254
- model_class = getattr(tv.models, model_name)
255
- model = model_class(pretrained=(pretrained is True))
256
-
257
- # replace final fully-connected layer (which has 1000 ImageNet classes)
258
- model.fc = torch.nn.Linear(model.fc.in_features, num_classes)
259
-
260
- if isinstance(pretrained, str):
261
- print(f'Loading saved weights from {pretrained}')
262
- ckpt = torch.load(pretrained, map_location='cpu')
263
- model.load_state_dict(ckpt['model'])
264
-
265
- assert all(p.requires_grad for p in model.parameters())
266
- set_finetune(model=model, model_name=model_name, finetune=finetune)
267
- return model
268
-
269
-
270
- def prep_device(model: torch.nn.Module, device_id: int | None = None
271
- ) -> tuple[torch.nn.Module, torch.device]:
272
- """Place model on appropriate device.
273
-
274
- Args:
275
- model: torch.nn.Module, not already wrapped with DataParallel
276
- device_id: optional int, GPU device to use
277
- if None, then uses DataParallel when possible
278
- if specified, then only uses specified device
279
-
280
- Returns:
281
- model: torch.nn.Module, model placed on <device>, wrapped with
282
- DataParallel if more than 1 GPU is found
283
- device: torch.device, 'cuda:{device_id}' if GPU is found, otherwise 'cpu'
284
- """
285
- # detect GPU, use all if available
286
- if torch.cuda.is_available():
287
- print('CUDA available')
288
- torch.backends.cudnn.benchmark = True
289
- if device_id is not None:
290
- print(f'Starting CUDA device {device_id}')
291
- device = torch.device(f'cuda:{device_id}')
292
- else:
293
- device = torch.device('cuda:0')
294
- device_ids = list(range(torch.cuda.device_count()))
295
- if len(device_ids) > 1:
296
- print(f'Found multiple devices, enabling data parallelism ({device_ids})')
297
- model = torch.nn.DataParallel(model, device_ids=device_ids)
298
- else:
299
- print('CUDA not available, running on the CPU')
300
- device = torch.device('cpu')
301
- model.to(device) # in-place
302
- return model, device
303
-
304
-
305
- def main(dataset_dir: str,
306
- cropped_images_dir: str,
307
- multilabel: bool,
308
- model_name: str,
309
- pretrained: bool | str,
310
- finetune: int,
311
- label_weighted: bool,
312
- weight_by_detection_conf: bool | str,
313
- epochs: int,
314
- batch_size: int,
315
- lr: float,
316
- weight_decay: float,
317
- num_workers: int,
318
- logdir: str,
319
- log_extreme_examples: int,
320
- seed: int | None = None) -> None:
321
- """Main function."""
322
- # input validation
323
- assert os.path.exists(dataset_dir)
324
- assert os.path.exists(cropped_images_dir)
325
- if isinstance(weight_by_detection_conf, str):
326
- assert os.path.exists(weight_by_detection_conf)
327
- if isinstance(pretrained, str):
328
- assert os.path.exists(pretrained)
329
-
330
- # set seed
331
- seed = np.random.randint(10_000) if seed is None else seed
332
- np.random.seed(seed)
333
- torch.manual_seed(seed)
334
- torch.cuda.manual_seed_all(seed)
335
-
336
- # create logdir and save params
337
- params = dict(locals()) # make a copy
338
- timestamp = datetime.now().strftime('%Y%m%d_%H%M%S') # '20200722_110816'
339
- logdir = os.path.join(logdir, timestamp)
340
- os.makedirs(logdir, exist_ok=True)
341
- print('Created logdir:', logdir)
342
- params_json_path = os.path.join(logdir, 'params.json')
343
- with open(params_json_path, 'w') as f:
344
- json.dump(params, f, indent=1)
345
-
346
- if 'efficientnet' in model_name:
347
- img_size = efficientnet.EfficientNet.get_image_size(model_name)
348
- else:
349
- img_size = 224
350
-
351
- # create dataloaders and log the index_to_label mapping
352
- print('Creating dataloaders')
353
- loaders, label_names = create_dataloaders(
354
- dataset_csv_path=os.path.join(dataset_dir, 'classification_ds.csv'),
355
- label_index_json_path=os.path.join(dataset_dir, 'label_index.json'),
356
- splits_json_path=os.path.join(dataset_dir, 'splits.json'),
357
- cropped_images_dir=cropped_images_dir,
358
- img_size=img_size,
359
- multilabel=multilabel,
360
- label_weighted=label_weighted,
361
- weight_by_detection_conf=weight_by_detection_conf,
362
- batch_size=batch_size,
363
- num_workers=num_workers,
364
- augment_train=True)
365
-
366
- writer = tensorboard.SummaryWriter(logdir)
367
-
368
- # create model
369
- model = build_model(model_name, num_classes=len(label_names),
370
- pretrained=pretrained, finetune=finetune > 0)
371
- model, device = prep_device(model)
372
-
373
- # define loss function and optimizer
374
- loss_fn: torch.nn.Module
375
- if multilabel:
376
- loss_fn = torch.nn.BCEWithLogitsLoss(reduction='none').to(device)
377
- else:
378
- loss_fn = torch.nn.CrossEntropyLoss(reduction='none').to(device)
379
-
380
- # using EfficientNet training defaults
381
- # - batch norm momentum: 0.99
382
- # - optimizer: RMSProp, decay 0.9 and momentum 0.9
383
- # - epochs: 350
384
- # - learning rate: 0.256, decays by 0.97 every 2.4 epochs
385
- # - weight decay: 1e-5
386
- optimizer: torch.optim.Optimizer
387
- if 'efficientnet' in model_name:
388
- optimizer = torch.optim.RMSprop(model.parameters(), lr, alpha=0.9,
389
- momentum=0.9, weight_decay=weight_decay)
390
- lr_scheduler = torch.optim.lr_scheduler.StepLR(
391
- optimizer=optimizer, step_size=1, gamma=0.97 ** (1 / 2.4))
392
- else: # resnet
393
- optimizer = torch.optim.SGD(model.parameters(), lr, momentum=0.9,
394
- weight_decay=weight_decay)
395
- lr_scheduler = torch.optim.lr_scheduler.StepLR(
396
- optimizer=optimizer, step_size=8, gamma=0.1) # lower every 8 epochs
397
-
398
- best_epoch_metrics: dict[str, float] = {}
399
- for epoch in range(epochs):
400
- print(f'Epoch: {epoch}')
401
- writer.add_scalar('lr', lr_scheduler.get_last_lr()[0], epoch)
402
-
403
- if epoch > 0 and finetune == epoch:
404
- print('Turning off fine-tune!')
405
- set_finetune(model, model_name, finetune=False)
406
-
407
- print('- train:')
408
- train_metrics, train_heaps, train_cm = run_epoch(
409
- model, loader=loaders['train'], weighted=False, device=device,
410
- loss_fn=loss_fn, finetune=finetune > epoch, optimizer=optimizer,
411
- k_extreme=log_extreme_examples)
412
- train_metrics = prefix_all_keys(train_metrics, prefix='train/')
413
- log_run('train', epoch, writer, label_names,
414
- metrics=train_metrics, heaps=train_heaps, cm=train_cm)
415
- del train_heaps
416
-
417
- print('- val:')
418
- val_metrics, val_heaps, val_cm = run_epoch(
419
- model, loader=loaders['val'], weighted=label_weighted,
420
- device=device, loss_fn=loss_fn, k_extreme=log_extreme_examples)
421
- val_metrics = prefix_all_keys(val_metrics, prefix='val/')
422
- log_run('val', epoch, writer, label_names,
423
- metrics=val_metrics, heaps=val_heaps, cm=val_cm)
424
- del val_heaps
425
-
426
- lr_scheduler.step() # decrease the learning rate
427
-
428
- if val_metrics['val/acc_top1'] > best_epoch_metrics.get('val/acc_top1', 0): # pylint: disable=line-too-long
429
- filename = os.path.join(logdir, f'ckpt_{epoch}.pt')
430
- print(f'New best model! Saving checkpoint to {filename}')
431
- state = {
432
- 'epoch': epoch,
433
- 'model': getattr(model, 'module', model).state_dict(),
434
- 'val/acc': val_metrics['val/acc_top1'],
435
- 'optimizer': optimizer.state_dict()
436
- }
437
- torch.save(state, filename)
438
- best_epoch_metrics.update(train_metrics)
439
- best_epoch_metrics.update(val_metrics)
440
- best_epoch_metrics['epoch'] = epoch
441
-
442
- print('- test:')
443
- test_metrics, test_heaps, test_cm = run_epoch(
444
- model, loader=loaders['test'], weighted=label_weighted,
445
- device=device, loss_fn=loss_fn, k_extreme=log_extreme_examples)
446
- test_metrics = prefix_all_keys(test_metrics, prefix='test/')
447
- log_run('test', epoch, writer, label_names,
448
- metrics=test_metrics, heaps=test_heaps, cm=test_cm)
449
- del test_heaps
450
-
451
- # stop training after 8 epochs without improvement
452
- if epoch >= best_epoch_metrics['epoch'] + 8:
453
- break
454
-
455
- hparams_dict = {
456
- 'model_name': model_name,
457
- 'multilabel': multilabel,
458
- 'finetune': finetune,
459
- 'batch_size': batch_size,
460
- 'epochs': epochs
461
- }
462
- metric_dict = prefix_all_keys(best_epoch_metrics, prefix='hparam/')
463
- writer.add_hparams(hparam_dict=hparams_dict, metric_dict=metric_dict)
464
- writer.close()
465
-
466
- # do a complete evaluation run
467
- best_epoch = best_epoch_metrics['epoch']
468
- evaluate_model.main(
469
- params_json_path=params_json_path,
470
- ckpt_path=os.path.join(logdir, f'ckpt_{best_epoch}.pt'),
471
- output_dir=logdir, splits=evaluate_model.SPLITS)
472
-
473
-
474
- def log_run(split: str, epoch: int, writer: tensorboard.SummaryWriter,
475
- label_names: Sequence[str], metrics: MutableMapping[str, float],
476
- heaps: Mapping[str, Mapping[int, list[HeapItem]]] | None,
477
- cm: np.ndarray) -> None:
478
- """Logs the outputs (metrics, confusion matrix, tp/fp/fn images) from a
479
- single epoch run to Tensorboard.
480
-
481
- Args:
482
- metrics: dict, keys already prefixed with {split}/
483
- """
484
- per_label_recall = recall_from_confusion_matrix(cm, label_names)
485
- metrics.update(prefix_all_keys(per_label_recall, f'{split}/label_recall/'))
486
-
487
- # log metrics
488
- for metric, value in metrics.items():
489
- writer.add_scalar(metric, value, epoch)
490
-
491
- # log confusion matrix
492
- cm_fig = plot_utils.plot_confusion_matrix(cm, classes=label_names,
493
- normalize=True)
494
- cm_fig_img = fig_to_img(cm_fig)
495
- writer.add_image(tag=f'confusion_matrix/{split}', img_tensor=cm_fig_img,
496
- global_step=epoch, dataformats='HWC')
497
-
498
- # log tp/fp/fn images
499
- if heaps is not None:
500
- for heap_type, heap_dict in heaps.items():
501
- log_images_with_confidence(writer, heap_dict, label_names,
502
- epoch=epoch, tag=f'{split}/{heap_type}')
503
- writer.flush()
504
-
505
-
506
- def log_images_with_confidence(
507
- writer: tensorboard.SummaryWriter,
508
- heap_dict: Mapping[int, list[HeapItem]],
509
- label_names: Sequence[str],
510
- epoch: int,
511
- tag: str) -> None:
512
- """
513
- Note: performs image normalization in-place
514
-
515
- Args:
516
- writer: tensorboard.SummaryWriter
517
- heap_dict: dict, maps label_id to list of HeapItem, where each HeapItem
518
- data is a tuple (img, target, top3_conf, top3_preds, img_file)
519
- label_names: list of str, label names in order of label id
520
- epoch: int
521
- tag: str
522
- """
523
- # for every image: undo normalization, clamp to [0, 1], CHW -> HWC
524
- # - cannot be in-place, because the HeapItem might be in multiple heaps
525
- unnormalize = tv.transforms.Normalize(mean=-MEANS/STDS, std=1.0/STDS)
526
- for label_id, heap in heap_dict.items():
527
- label_name = label_names[label_id]
528
-
529
- imgs_list = []
530
- for item in sorted(heap, reverse=True): # sort largest to smallest
531
- img = item.data[0].float() # clamp() only supports fp32 on CPU
532
- img = unnormalize(img).clamp_(0, 1).permute(1, 2, 0)
533
- imgs_list.append((img, *item.data[1:]))
534
-
535
- fig, img_files = imgs_with_confidences(imgs_list, label_names)
536
-
537
- # writer.add_figure() has issues => using add_image() instead
538
- # writer.add_figure(f'{label_name}/{tag}', fig, global_step=epoch)
539
- writer.add_image(f'{label_name}/{tag}', fig_to_img(fig),
540
- global_step=epoch, dataformats='HWC')
541
- writer.add_text(f'{label_name}/{tag}_files', '\n\n'.join(img_files),
542
- global_step=epoch)
543
-
544
-
545
- def track_extreme_examples(tp_heaps: dict[int, list[HeapItem]],
546
- fp_heaps: dict[int, list[HeapItem]],
547
- fn_heaps: dict[int, list[HeapItem]],
548
- inputs: torch.Tensor,
549
- labels: torch.Tensor,
550
- img_files: Sequence[str],
551
- logits: torch.Tensor,
552
- k: int = 5) -> None:
553
- """Updates the k most extreme true-positive (tp), false-positive (fp), and
554
- false-negative (fn) examples with examples from this batch.
555
-
556
- Each HeapItem's data attribute is a tuple of:
557
- - img: torch.Tensor, shape [3, H, W], type float16, values in [0, 1]
558
- - label: int
559
- - top3_conf: list of float
560
- - top3_preds: list of float
561
- - img_file: str
562
-
563
- Args:
564
- *_heaps: dict, maps label_id (int) to heap of HeapItems
565
- inputs: torch.Tensor, shape [batch_size, 3, H, W]
566
- labels: torch.Tensor, shape [batch_size]
567
- img_files: list of str
568
- logits: torch.Tensor, shape [batch_size, num_classes]
569
- k: int, number of examples to track
570
- """
571
- with torch.no_grad():
572
- inputs = inputs.detach().to(device='cpu', dtype=torch.float16)
573
- labels_list = labels.tolist()
574
- batch_probs = torch.nn.functional.softmax(logits, dim=1).cpu()
575
- zipped = zip(inputs, labels_list, batch_probs, img_files) # all on CPU
576
- for img, label, confs, img_file in zipped:
577
- label_conf = confs[label].item()
578
-
579
- top3_conf, top3_preds = confs.topk(3)
580
- top3_conf = top3_conf.tolist()
581
- top3_preds = top3_preds.tolist()
582
-
583
- data = [img, label, top3_conf, top3_preds, img_file]
584
- if top3_preds[0] == label: # true positive
585
- item = HeapItem(priority=label_conf - top3_conf[1], data=data)
586
- add_to_heap(tp_heaps[label], item, k=k)
587
- else:
588
- # false positive for top3_pred[0]
589
- # false negative for label
590
- item = HeapItem(priority=top3_conf[0] - label_conf, data=data)
591
- add_to_heap(fp_heaps[top3_preds[0]], item, k=k)
592
- add_to_heap(fn_heaps[label], item, k=k)
593
-
594
-
595
- def correct(outputs: torch.Tensor, labels: torch.Tensor,
596
- weights: torch.Tensor | None = None,
597
- top: Sequence[int] = (1,)) -> dict[int, float]:
598
- """
599
- Args:
600
- outputs: torch.Tensor, shape [N, num_classes],
601
- either logits (pre-softmax) or probabilities
602
- labels: torch.Tensor, shape [N]
603
- weights: optional torch.Tensor, shape [N]
604
- top: tuple of int, list of values of k for calculating top-K accuracy
605
-
606
- Returns: dict, maps k to (weighted) # of correct predictions @ each k
607
- """
608
- with torch.no_grad():
609
- # preds and labels both have shape [N, k]
610
- _, preds = outputs.topk(k=max(top), dim=1, largest=True, sorted=True)
611
- labels = labels.view(-1, 1).expand_as(preds)
612
-
613
- corrects = preds.eq(labels).cumsum(dim=1) # shape [N, k]
614
- if weights is None:
615
- corrects = corrects.sum(dim=0) # shape [k]
616
- else:
617
- corrects = weights.matmul(corrects.to(weights.dtype)) # shape [k]
618
- tops = {k: corrects[k - 1].item() for k in top}
619
- return tops
620
-
621
-
622
- def run_epoch(model: torch.nn.Module,
623
- loader: torch.utils.data.DataLoader,
624
- weighted: bool,
625
- device: torch.device,
626
- top: Sequence[int] = (1, 3),
627
- loss_fn: torch.nn.Module | None = None,
628
- finetune: bool = False,
629
- optimizer: torch.optim.Optimizer | None = None,
630
- k_extreme: int = 0
631
- ) -> tuple[
632
- dict[str, float],
633
- dict[str, dict[int, list[HeapItem]]] | None,
634
- np.ndarray
635
- ]:
636
- """Runs for 1 epoch.
637
-
638
- Args:
639
- model: torch.nn.Module
640
- loader: torch.utils.data.DataLoader
641
- weighted: bool, whether to use sample weights in calculating loss and
642
- accuracy
643
- device: torch.device
644
- top: tuple of int, list of values of k for calculating top-K accuracy
645
- loss_fn: optional loss function, calculates per-example loss
646
- finetune: bool, if true sets model's dropout and BN layers to eval mode
647
- optimizer: optional optimizer
648
- k_extreme: int, # of tp/fp/fn examples to track for each label
649
-
650
- Returns:
651
- metrics: dict, metrics from epoch, contains keys:
652
- 'loss': float, mean per-example loss over entire epoch,
653
- only included if loss_fn is not None
654
- 'acc_top{k}': float, accuracy@k over the entire epoch
655
- heaps: dict, keys are ['tp', 'fp', 'fn'], values are heap_dicts,
656
- each heap_dict maps label_id (int) to a heap of <= 5 HeapItems with
657
- data attribute (img, target, top3_conf, top3_preds, img_file)
658
- - 'tp': priority is the difference between target confidence and
659
- 2nd highest confidence
660
- - 'fp': priority is the difference between highest confidence and
661
- target confidence
662
- - 'fn': same as 'fp'
663
- confusion_matrix: np.ndarray, shape [num_classes, num_classes],
664
- C[i, j] = # of samples with true label i, predicted as label j
665
- """
666
- if optimizer is not None:
667
- assert loss_fn is not None
668
-
669
- # if evaluating or finetuning, set dropout and BN layers to eval mode
670
- model.train(optimizer is not None and not finetune)
671
-
672
- if loss_fn is not None:
673
- losses = AverageMeter()
674
- accuracies_topk = {k: AverageMeter() for k in top} # acc@k
675
-
676
- # for each label, track k_extreme most-confident and least-confident images
677
- if k_extreme > 0:
678
- tp_heaps: dict[int, list[HeapItem]] = defaultdict(list)
679
- fp_heaps: dict[int, list[HeapItem]] = defaultdict(list)
680
- fn_heaps: dict[int, list[HeapItem]] = defaultdict(list)
681
-
682
- all_labels = np.zeros(len(loader.dataset), dtype=np.int32)
683
- all_preds = np.zeros_like(all_labels)
684
- end_i = 0
685
-
686
- tqdm_loader = tqdm.tqdm(loader)
687
- with torch.set_grad_enabled(optimizer is not None):
688
- for batch in tqdm_loader:
689
- if weighted:
690
- inputs, labels, img_files, weights = batch
691
- weights = weights.to(device, non_blocking=True)
692
- else:
693
- # even if batch contains sample weights, don't use them
694
- inputs, labels, img_files = batch[0:3]
695
- weights = None
696
-
697
- inputs = inputs.to(device, non_blocking=True)
698
-
699
- batch_size = labels.size(0)
700
- start_i = end_i
701
- end_i = start_i + batch_size
702
- all_labels[start_i:end_i] = labels
703
-
704
- desc = []
705
- labels = labels.to(device, non_blocking=True)
706
- outputs = model(inputs)
707
- all_preds[start_i:end_i] = outputs.detach().argmax(dim=1).cpu()
708
-
709
- if loss_fn is not None:
710
- loss = loss_fn(outputs, labels)
711
- if weights is not None:
712
- loss *= weights
713
- loss = loss.mean()
714
- losses.update(loss.item(), n=batch_size)
715
- desc.append(f'Loss {losses.val:.4f} ({losses.avg:.4f})')
716
- if optimizer is not None:
717
- optimizer.zero_grad()
718
- loss.backward()
719
- optimizer.step()
720
-
721
- top_correct = correct(outputs, labels, weights=weights, top=top)
722
- for k, acc in accuracies_topk.items():
723
- acc.update(top_correct[k] * (100. / batch_size), n=batch_size)
724
- desc.append(f'Acc@{k} {acc.val:.3f} ({acc.avg:.3f})')
725
- tqdm_loader.set_description(' '.join(desc))
726
-
727
- if k_extreme > 0:
728
- track_extreme_examples(tp_heaps, fp_heaps, fn_heaps, inputs,
729
- labels, img_files, outputs, k=k_extreme)
730
-
731
- num_classes = outputs.size(1)
732
- confusion_matrix = sklearn.metrics.confusion_matrix(
733
- all_labels, all_preds, labels=np.arange(num_classes))
734
-
735
- metrics = {}
736
- if loss_fn is not None:
737
- metrics['loss'] = losses.avg
738
- for k, acc in accuracies_topk.items():
739
- metrics[f'acc_top{k}'] = acc.avg
740
- heaps = None
741
- if k_extreme > 0:
742
- heaps = {'tp': tp_heaps, 'fp': fp_heaps, 'fn': fn_heaps}
743
- return metrics, heaps, confusion_matrix
744
-
745
-
746
- def _parse_args() -> argparse.Namespace:
747
- """Parses arguments."""
748
- parser = argparse.ArgumentParser(
749
- formatter_class=argparse.ArgumentDefaultsHelpFormatter,
750
- description='Trains classifier.')
751
- parser.add_argument(
752
- 'dataset_dir',
753
- help='path to directory containing: 1) classification dataset CSV, '
754
- '2) label index JSON, 3) splits JSON')
755
- parser.add_argument(
756
- 'cropped_images_dir',
757
- help='path to local directory where image crops are saved')
758
- parser.add_argument(
759
- '--multilabel', action='store_true',
760
- help='for multi-label, multi-class classification')
761
- parser.add_argument(
762
- '-m', '--model-name', default='efficientnet-b0',
763
- choices=VALID_MODELS,
764
- help='which EfficientNet or Resnet model')
765
- parser.add_argument(
766
- '--pretrained', nargs='?', const=True, default=False,
767
- help='start with ImageNet pretrained model or a specific checkpoint')
768
- parser.add_argument(
769
- '--finetune', type=int, default=0,
770
- help='only fine tune the final fully-connected layer for the first '
771
- '<finetune> epochs')
772
- parser.add_argument(
773
- '--label-weighted', action='store_true',
774
- help='weight training samples to balance labels')
775
- parser.add_argument(
776
- '--weight-by-detection-conf', nargs='?', const=True, default=False,
777
- help='weight training examples by detection confidence. '
778
- 'Optionally takes a .npz file for isotonic calibration.')
779
- parser.add_argument(
780
- '--epochs', type=int, default=0,
781
- help='number of epochs for training, 0 for eval-only')
782
- parser.add_argument(
783
- '--batch-size', type=int, default=256,
784
- help='batch size for both training and eval')
785
- parser.add_argument(
786
- '--lr', type=float,
787
- help='initial learning rate, defaults to (0.016 * batch_size / 256)')
788
- parser.add_argument(
789
- '--weight-decay', type=float, default=1e-5,
790
- help='weight decay')
791
- parser.add_argument(
792
- '--num-workers', type=int, default=8,
793
- help='# of workers for data loading')
794
- parser.add_argument(
795
- '--logdir', default='.',
796
- help='directory where TensorBoard logs and a params file are saved')
797
- parser.add_argument(
798
- '--log-extreme-examples', type=int, default=0,
799
- help='# of tp/fp/fn examples to log for each label and split per epoch')
800
- parser.add_argument(
801
- '--seed', type=int,
802
- help='random seed')
803
- return parser.parse_args()
804
-
805
-
806
- if __name__ == '__main__':
807
- args = _parse_args()
808
- if args.lr is None:
809
- args.lr = 0.016 * args.batch_size / 256 # based on TF models repo
810
- main(dataset_dir=args.dataset_dir,
811
- cropped_images_dir=args.cropped_images_dir,
812
- multilabel=args.multilabel,
813
- model_name=args.model_name,
814
- pretrained=args.pretrained,
815
- finetune=args.finetune,
816
- label_weighted=args.label_weighted,
817
- weight_by_detection_conf=args.weight_by_detection_conf,
818
- epochs=args.epochs,
819
- batch_size=args.batch_size,
820
- lr=args.lr,
821
- weight_decay=args.weight_decay,
822
- num_workers=args.num_workers,
823
- logdir=args.logdir,
824
- log_extreme_examples=args.log_extreme_examples,
825
- seed=args.seed)