megadetector 5.0.9__py3-none-any.whl → 5.0.11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of megadetector might be problematic. Click here for more details.

Files changed (226) hide show
  1. {megadetector-5.0.9.dist-info → megadetector-5.0.11.dist-info}/LICENSE +0 -0
  2. {megadetector-5.0.9.dist-info → megadetector-5.0.11.dist-info}/METADATA +12 -11
  3. megadetector-5.0.11.dist-info/RECORD +5 -0
  4. megadetector-5.0.11.dist-info/top_level.txt +1 -0
  5. api/__init__.py +0 -0
  6. api/batch_processing/__init__.py +0 -0
  7. api/batch_processing/api_core/__init__.py +0 -0
  8. api/batch_processing/api_core/batch_service/__init__.py +0 -0
  9. api/batch_processing/api_core/batch_service/score.py +0 -439
  10. api/batch_processing/api_core/server.py +0 -294
  11. api/batch_processing/api_core/server_api_config.py +0 -98
  12. api/batch_processing/api_core/server_app_config.py +0 -55
  13. api/batch_processing/api_core/server_batch_job_manager.py +0 -220
  14. api/batch_processing/api_core/server_job_status_table.py +0 -152
  15. api/batch_processing/api_core/server_orchestration.py +0 -360
  16. api/batch_processing/api_core/server_utils.py +0 -92
  17. api/batch_processing/api_core_support/__init__.py +0 -0
  18. api/batch_processing/api_core_support/aggregate_results_manually.py +0 -46
  19. api/batch_processing/api_support/__init__.py +0 -0
  20. api/batch_processing/api_support/summarize_daily_activity.py +0 -152
  21. api/batch_processing/data_preparation/__init__.py +0 -0
  22. api/batch_processing/data_preparation/manage_local_batch.py +0 -2391
  23. api/batch_processing/data_preparation/manage_video_batch.py +0 -327
  24. api/batch_processing/integration/digiKam/setup.py +0 -6
  25. api/batch_processing/integration/digiKam/xmp_integration.py +0 -465
  26. api/batch_processing/integration/eMammal/test_scripts/config_template.py +0 -5
  27. api/batch_processing/integration/eMammal/test_scripts/push_annotations_to_emammal.py +0 -126
  28. api/batch_processing/integration/eMammal/test_scripts/select_images_for_testing.py +0 -55
  29. api/batch_processing/postprocessing/__init__.py +0 -0
  30. api/batch_processing/postprocessing/add_max_conf.py +0 -64
  31. api/batch_processing/postprocessing/categorize_detections_by_size.py +0 -163
  32. api/batch_processing/postprocessing/combine_api_outputs.py +0 -249
  33. api/batch_processing/postprocessing/compare_batch_results.py +0 -958
  34. api/batch_processing/postprocessing/convert_output_format.py +0 -397
  35. api/batch_processing/postprocessing/load_api_results.py +0 -195
  36. api/batch_processing/postprocessing/md_to_coco.py +0 -310
  37. api/batch_processing/postprocessing/md_to_labelme.py +0 -330
  38. api/batch_processing/postprocessing/merge_detections.py +0 -401
  39. api/batch_processing/postprocessing/postprocess_batch_results.py +0 -1904
  40. api/batch_processing/postprocessing/remap_detection_categories.py +0 -170
  41. api/batch_processing/postprocessing/render_detection_confusion_matrix.py +0 -661
  42. api/batch_processing/postprocessing/repeat_detection_elimination/find_repeat_detections.py +0 -211
  43. api/batch_processing/postprocessing/repeat_detection_elimination/remove_repeat_detections.py +0 -82
  44. api/batch_processing/postprocessing/repeat_detection_elimination/repeat_detections_core.py +0 -1631
  45. api/batch_processing/postprocessing/separate_detections_into_folders.py +0 -731
  46. api/batch_processing/postprocessing/subset_json_detector_output.py +0 -696
  47. api/batch_processing/postprocessing/top_folders_to_bottom.py +0 -223
  48. api/synchronous/__init__.py +0 -0
  49. api/synchronous/api_core/animal_detection_api/__init__.py +0 -0
  50. api/synchronous/api_core/animal_detection_api/api_backend.py +0 -152
  51. api/synchronous/api_core/animal_detection_api/api_frontend.py +0 -266
  52. api/synchronous/api_core/animal_detection_api/config.py +0 -35
  53. api/synchronous/api_core/animal_detection_api/data_management/annotations/annotation_constants.py +0 -47
  54. api/synchronous/api_core/animal_detection_api/detection/detector_training/copy_checkpoints.py +0 -43
  55. api/synchronous/api_core/animal_detection_api/detection/detector_training/model_main_tf2.py +0 -114
  56. api/synchronous/api_core/animal_detection_api/detection/process_video.py +0 -543
  57. api/synchronous/api_core/animal_detection_api/detection/pytorch_detector.py +0 -304
  58. api/synchronous/api_core/animal_detection_api/detection/run_detector.py +0 -627
  59. api/synchronous/api_core/animal_detection_api/detection/run_detector_batch.py +0 -1029
  60. api/synchronous/api_core/animal_detection_api/detection/run_inference_with_yolov5_val.py +0 -581
  61. api/synchronous/api_core/animal_detection_api/detection/run_tiled_inference.py +0 -754
  62. api/synchronous/api_core/animal_detection_api/detection/tf_detector.py +0 -165
  63. api/synchronous/api_core/animal_detection_api/detection/video_utils.py +0 -495
  64. api/synchronous/api_core/animal_detection_api/md_utils/azure_utils.py +0 -174
  65. api/synchronous/api_core/animal_detection_api/md_utils/ct_utils.py +0 -262
  66. api/synchronous/api_core/animal_detection_api/md_utils/directory_listing.py +0 -251
  67. api/synchronous/api_core/animal_detection_api/md_utils/matlab_porting_tools.py +0 -97
  68. api/synchronous/api_core/animal_detection_api/md_utils/path_utils.py +0 -416
  69. api/synchronous/api_core/animal_detection_api/md_utils/process_utils.py +0 -110
  70. api/synchronous/api_core/animal_detection_api/md_utils/sas_blob_utils.py +0 -509
  71. api/synchronous/api_core/animal_detection_api/md_utils/string_utils.py +0 -59
  72. api/synchronous/api_core/animal_detection_api/md_utils/url_utils.py +0 -144
  73. api/synchronous/api_core/animal_detection_api/md_utils/write_html_image_list.py +0 -226
  74. api/synchronous/api_core/animal_detection_api/md_visualization/visualization_utils.py +0 -841
  75. api/synchronous/api_core/tests/__init__.py +0 -0
  76. api/synchronous/api_core/tests/load_test.py +0 -110
  77. classification/__init__.py +0 -0
  78. classification/aggregate_classifier_probs.py +0 -108
  79. classification/analyze_failed_images.py +0 -227
  80. classification/cache_batchapi_outputs.py +0 -198
  81. classification/create_classification_dataset.py +0 -627
  82. classification/crop_detections.py +0 -516
  83. classification/csv_to_json.py +0 -226
  84. classification/detect_and_crop.py +0 -855
  85. classification/efficientnet/__init__.py +0 -9
  86. classification/efficientnet/model.py +0 -415
  87. classification/efficientnet/utils.py +0 -610
  88. classification/evaluate_model.py +0 -520
  89. classification/identify_mislabeled_candidates.py +0 -152
  90. classification/json_to_azcopy_list.py +0 -63
  91. classification/json_validator.py +0 -695
  92. classification/map_classification_categories.py +0 -276
  93. classification/merge_classification_detection_output.py +0 -506
  94. classification/prepare_classification_script.py +0 -194
  95. classification/prepare_classification_script_mc.py +0 -228
  96. classification/run_classifier.py +0 -286
  97. classification/save_mislabeled.py +0 -110
  98. classification/train_classifier.py +0 -825
  99. classification/train_classifier_tf.py +0 -724
  100. classification/train_utils.py +0 -322
  101. data_management/__init__.py +0 -0
  102. data_management/annotations/__init__.py +0 -0
  103. data_management/annotations/annotation_constants.py +0 -34
  104. data_management/camtrap_dp_to_coco.py +0 -238
  105. data_management/cct_json_utils.py +0 -395
  106. data_management/cct_to_md.py +0 -176
  107. data_management/cct_to_wi.py +0 -289
  108. data_management/coco_to_labelme.py +0 -272
  109. data_management/coco_to_yolo.py +0 -662
  110. data_management/databases/__init__.py +0 -0
  111. data_management/databases/add_width_and_height_to_db.py +0 -33
  112. data_management/databases/combine_coco_camera_traps_files.py +0 -206
  113. data_management/databases/integrity_check_json_db.py +0 -477
  114. data_management/databases/subset_json_db.py +0 -115
  115. data_management/generate_crops_from_cct.py +0 -149
  116. data_management/get_image_sizes.py +0 -188
  117. data_management/importers/add_nacti_sizes.py +0 -52
  118. data_management/importers/add_timestamps_to_icct.py +0 -79
  119. data_management/importers/animl_results_to_md_results.py +0 -158
  120. data_management/importers/auckland_doc_test_to_json.py +0 -372
  121. data_management/importers/auckland_doc_to_json.py +0 -200
  122. data_management/importers/awc_to_json.py +0 -189
  123. data_management/importers/bellevue_to_json.py +0 -273
  124. data_management/importers/cacophony-thermal-importer.py +0 -796
  125. data_management/importers/carrizo_shrubfree_2018.py +0 -268
  126. data_management/importers/carrizo_trail_cam_2017.py +0 -287
  127. data_management/importers/cct_field_adjustments.py +0 -57
  128. data_management/importers/channel_islands_to_cct.py +0 -913
  129. data_management/importers/eMammal/copy_and_unzip_emammal.py +0 -180
  130. data_management/importers/eMammal/eMammal_helpers.py +0 -249
  131. data_management/importers/eMammal/make_eMammal_json.py +0 -223
  132. data_management/importers/ena24_to_json.py +0 -275
  133. data_management/importers/filenames_to_json.py +0 -385
  134. data_management/importers/helena_to_cct.py +0 -282
  135. data_management/importers/idaho-camera-traps.py +0 -1407
  136. data_management/importers/idfg_iwildcam_lila_prep.py +0 -294
  137. data_management/importers/jb_csv_to_json.py +0 -150
  138. data_management/importers/mcgill_to_json.py +0 -250
  139. data_management/importers/missouri_to_json.py +0 -489
  140. data_management/importers/nacti_fieldname_adjustments.py +0 -79
  141. data_management/importers/noaa_seals_2019.py +0 -181
  142. data_management/importers/pc_to_json.py +0 -365
  143. data_management/importers/plot_wni_giraffes.py +0 -123
  144. data_management/importers/prepare-noaa-fish-data-for-lila.py +0 -359
  145. data_management/importers/prepare_zsl_imerit.py +0 -131
  146. data_management/importers/rspb_to_json.py +0 -356
  147. data_management/importers/save_the_elephants_survey_A.py +0 -320
  148. data_management/importers/save_the_elephants_survey_B.py +0 -332
  149. data_management/importers/snapshot_safari_importer.py +0 -758
  150. data_management/importers/snapshot_safari_importer_reprise.py +0 -665
  151. data_management/importers/snapshot_serengeti_lila.py +0 -1067
  152. data_management/importers/snapshotserengeti/make_full_SS_json.py +0 -150
  153. data_management/importers/snapshotserengeti/make_per_season_SS_json.py +0 -153
  154. data_management/importers/sulross_get_exif.py +0 -65
  155. data_management/importers/timelapse_csv_set_to_json.py +0 -490
  156. data_management/importers/ubc_to_json.py +0 -399
  157. data_management/importers/umn_to_json.py +0 -507
  158. data_management/importers/wellington_to_json.py +0 -263
  159. data_management/importers/wi_to_json.py +0 -441
  160. data_management/importers/zamba_results_to_md_results.py +0 -181
  161. data_management/labelme_to_coco.py +0 -548
  162. data_management/labelme_to_yolo.py +0 -272
  163. data_management/lila/__init__.py +0 -0
  164. data_management/lila/add_locations_to_island_camera_traps.py +0 -97
  165. data_management/lila/add_locations_to_nacti.py +0 -147
  166. data_management/lila/create_lila_blank_set.py +0 -557
  167. data_management/lila/create_lila_test_set.py +0 -151
  168. data_management/lila/create_links_to_md_results_files.py +0 -106
  169. data_management/lila/download_lila_subset.py +0 -177
  170. data_management/lila/generate_lila_per_image_labels.py +0 -515
  171. data_management/lila/get_lila_annotation_counts.py +0 -170
  172. data_management/lila/get_lila_image_counts.py +0 -111
  173. data_management/lila/lila_common.py +0 -300
  174. data_management/lila/test_lila_metadata_urls.py +0 -132
  175. data_management/ocr_tools.py +0 -874
  176. data_management/read_exif.py +0 -681
  177. data_management/remap_coco_categories.py +0 -84
  178. data_management/remove_exif.py +0 -66
  179. data_management/resize_coco_dataset.py +0 -189
  180. data_management/wi_download_csv_to_coco.py +0 -246
  181. data_management/yolo_output_to_md_output.py +0 -441
  182. data_management/yolo_to_coco.py +0 -676
  183. detection/__init__.py +0 -0
  184. detection/detector_training/__init__.py +0 -0
  185. detection/detector_training/model_main_tf2.py +0 -114
  186. detection/process_video.py +0 -703
  187. detection/pytorch_detector.py +0 -337
  188. detection/run_detector.py +0 -779
  189. detection/run_detector_batch.py +0 -1219
  190. detection/run_inference_with_yolov5_val.py +0 -917
  191. detection/run_tiled_inference.py +0 -935
  192. detection/tf_detector.py +0 -188
  193. detection/video_utils.py +0 -606
  194. docs/source/conf.py +0 -43
  195. md_utils/__init__.py +0 -0
  196. md_utils/azure_utils.py +0 -174
  197. md_utils/ct_utils.py +0 -612
  198. md_utils/directory_listing.py +0 -246
  199. md_utils/md_tests.py +0 -968
  200. md_utils/path_utils.py +0 -1044
  201. md_utils/process_utils.py +0 -157
  202. md_utils/sas_blob_utils.py +0 -509
  203. md_utils/split_locations_into_train_val.py +0 -228
  204. md_utils/string_utils.py +0 -92
  205. md_utils/url_utils.py +0 -323
  206. md_utils/write_html_image_list.py +0 -225
  207. md_visualization/__init__.py +0 -0
  208. md_visualization/plot_utils.py +0 -293
  209. md_visualization/render_images_with_thumbnails.py +0 -275
  210. md_visualization/visualization_utils.py +0 -1537
  211. md_visualization/visualize_db.py +0 -551
  212. md_visualization/visualize_detector_output.py +0 -406
  213. megadetector-5.0.9.dist-info/RECORD +0 -224
  214. megadetector-5.0.9.dist-info/top_level.txt +0 -8
  215. taxonomy_mapping/__init__.py +0 -0
  216. taxonomy_mapping/map_lila_taxonomy_to_wi_taxonomy.py +0 -491
  217. taxonomy_mapping/map_new_lila_datasets.py +0 -154
  218. taxonomy_mapping/prepare_lila_taxonomy_release.py +0 -142
  219. taxonomy_mapping/preview_lila_taxonomy.py +0 -591
  220. taxonomy_mapping/retrieve_sample_image.py +0 -71
  221. taxonomy_mapping/simple_image_download.py +0 -218
  222. taxonomy_mapping/species_lookup.py +0 -834
  223. taxonomy_mapping/taxonomy_csv_checker.py +0 -159
  224. taxonomy_mapping/taxonomy_graph.py +0 -346
  225. taxonomy_mapping/validate_lila_category_mappings.py +0 -83
  226. {megadetector-5.0.9.dist-info → megadetector-5.0.11.dist-info}/WHEEL +0 -0
@@ -1,9 +0,0 @@
1
- __version__ = "0.7.0"
2
- from .model import EfficientNet, VALID_MODELS
3
- from .utils import (
4
- GlobalParams,
5
- BlockArgs,
6
- BlockDecoder,
7
- efficientnet,
8
- get_model_params,
9
- )
@@ -1,415 +0,0 @@
1
- """model.py - Model and module class for EfficientNet.
2
- They are built to mirror those in the official TensorFlow implementation.
3
- """
4
-
5
- # Author: lukemelas (github username)
6
- # Github repo: https://github.com/lukemelas/EfficientNet-PyTorch
7
- # With adjustments and added comments by workingcoder (github username).
8
-
9
- import torch
10
- from torch import nn
11
- from torch.nn import functional as F
12
- from .utils import (
13
- round_filters,
14
- round_repeats,
15
- drop_connect,
16
- get_same_padding_conv2d,
17
- get_model_params,
18
- efficientnet_params,
19
- load_pretrained_weights,
20
- Swish,
21
- MemoryEfficientSwish,
22
- calculate_output_image_size
23
- )
24
-
25
-
26
- VALID_MODELS = (
27
- 'efficientnet-b0', 'efficientnet-b1', 'efficientnet-b2', 'efficientnet-b3',
28
- 'efficientnet-b4', 'efficientnet-b5', 'efficientnet-b6', 'efficientnet-b7',
29
- 'efficientnet-b8',
30
-
31
- # Support the construction of 'efficientnet-l2' without pretrained weights
32
- 'efficientnet-l2'
33
- )
34
-
35
-
36
- class MBConvBlock(nn.Module):
37
- """Mobile Inverted Residual Bottleneck Block.
38
-
39
- Args:
40
- block_args (namedtuple): BlockArgs, defined in utils.py.
41
- global_params (namedtuple): GlobalParam, defined in utils.py.
42
- image_size (tuple or list): [image_height, image_width].
43
-
44
- References:
45
- [1] https://arxiv.org/abs/1704.04861 (MobileNet v1)
46
- [2] https://arxiv.org/abs/1801.04381 (MobileNet v2)
47
- [3] https://arxiv.org/abs/1905.02244 (MobileNet v3)
48
- """
49
-
50
- def __init__(self, block_args, global_params, image_size=None):
51
- super().__init__()
52
- self._block_args = block_args
53
- self._bn_mom = 1 - global_params.batch_norm_momentum # pytorch's difference from tensorflow
54
- self._bn_eps = global_params.batch_norm_epsilon
55
- self.has_se = (self._block_args.se_ratio is not None) and (0 < self._block_args.se_ratio <= 1)
56
- self.id_skip = block_args.id_skip # whether to use skip connection and drop connect
57
-
58
- # Expansion phase (Inverted Bottleneck)
59
- inp = self._block_args.input_filters # number of input channels
60
- oup = self._block_args.input_filters * self._block_args.expand_ratio # number of output channels
61
- if self._block_args.expand_ratio != 1:
62
- Conv2d = get_same_padding_conv2d(image_size=image_size)
63
- self._expand_conv = Conv2d(in_channels=inp, out_channels=oup, kernel_size=1, bias=False)
64
- self._bn0 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
65
- # image_size = calculate_output_image_size(image_size, 1) <-- this wouldn't modify image_size
66
-
67
- # Depthwise convolution phase
68
- k = self._block_args.kernel_size
69
- s = self._block_args.stride
70
- Conv2d = get_same_padding_conv2d(image_size=image_size)
71
- self._depthwise_conv = Conv2d(
72
- in_channels=oup, out_channels=oup, groups=oup, # groups makes it depthwise
73
- kernel_size=k, stride=s, bias=False)
74
- self._bn1 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
75
- image_size = calculate_output_image_size(image_size, s)
76
-
77
- # Squeeze and Excitation layer, if desired
78
- if self.has_se:
79
- Conv2d = get_same_padding_conv2d(image_size=(1, 1))
80
- num_squeezed_channels = max(1, int(self._block_args.input_filters * self._block_args.se_ratio))
81
- self._se_reduce = Conv2d(in_channels=oup, out_channels=num_squeezed_channels, kernel_size=1)
82
- self._se_expand = Conv2d(in_channels=num_squeezed_channels, out_channels=oup, kernel_size=1)
83
-
84
- # Pointwise convolution phase
85
- final_oup = self._block_args.output_filters
86
- Conv2d = get_same_padding_conv2d(image_size=image_size)
87
- self._project_conv = Conv2d(in_channels=oup, out_channels=final_oup, kernel_size=1, bias=False)
88
- self._bn2 = nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps)
89
- self._swish = MemoryEfficientSwish()
90
-
91
- def forward(self, inputs, drop_connect_rate=None):
92
- """MBConvBlock's forward function.
93
-
94
- Args:
95
- inputs (tensor): Input tensor.
96
- drop_connect_rate (bool): Drop connect rate (float, between 0 and 1).
97
-
98
- Returns:
99
- Output of this block after processing.
100
- """
101
-
102
- # Expansion and Depthwise Convolution
103
- x = inputs
104
- if self._block_args.expand_ratio != 1:
105
- x = self._expand_conv(inputs)
106
- x = self._bn0(x)
107
- x = self._swish(x)
108
-
109
- x = self._depthwise_conv(x)
110
- x = self._bn1(x)
111
- x = self._swish(x)
112
-
113
- # Squeeze and Excitation
114
- if self.has_se:
115
- x_squeezed = F.adaptive_avg_pool2d(x, 1)
116
- x_squeezed = self._se_reduce(x_squeezed)
117
- x_squeezed = self._swish(x_squeezed)
118
- x_squeezed = self._se_expand(x_squeezed)
119
- x = torch.sigmoid(x_squeezed) * x
120
-
121
- # Pointwise Convolution
122
- x = self._project_conv(x)
123
- x = self._bn2(x)
124
-
125
- # Skip connection and drop connect
126
- input_filters, output_filters = self._block_args.input_filters, self._block_args.output_filters
127
- if self.id_skip and self._block_args.stride == 1 and input_filters == output_filters:
128
- # The combination of skip connection and drop connect brings about stochastic depth.
129
- if drop_connect_rate:
130
- x = drop_connect(x, p=drop_connect_rate, training=self.training)
131
- x = x + inputs # skip connection
132
- return x
133
-
134
- def set_swish(self, memory_efficient=True):
135
- """Sets swish function as memory efficient (for training) or standard (for export).
136
-
137
- Args:
138
- memory_efficient (bool): Whether to use memory-efficient version of swish.
139
- """
140
- self._swish = MemoryEfficientSwish() if memory_efficient else Swish()
141
-
142
-
143
- class EfficientNet(nn.Module):
144
- """EfficientNet model.
145
- Most easily loaded with the .from_name or .from_pretrained methods.
146
-
147
- Args:
148
- blocks_args (list[namedtuple]): A list of BlockArgs to construct blocks.
149
- global_params (namedtuple): A set of GlobalParams shared between blocks.
150
-
151
- References:
152
- [1] https://arxiv.org/abs/1905.11946 (EfficientNet)
153
-
154
- Example:
155
-
156
-
157
- import torch
158
- >>> from efficientnet.model import EfficientNet
159
- >>> inputs = torch.rand(1, 3, 224, 224)
160
- >>> model = EfficientNet.from_pretrained('efficientnet-b0')
161
- >>> model.eval()
162
- >>> outputs = model(inputs)
163
- """
164
-
165
- def __init__(self, blocks_args=None, global_params=None):
166
- super().__init__()
167
- assert isinstance(blocks_args, list), 'blocks_args should be a list'
168
- assert len(blocks_args) > 0, 'block args must be greater than 0'
169
- self._global_params = global_params
170
- self._blocks_args = blocks_args
171
-
172
- # Batch norm parameters
173
- bn_mom = 1 - self._global_params.batch_norm_momentum
174
- bn_eps = self._global_params.batch_norm_epsilon
175
-
176
- # Get stem static or dynamic convolution depending on image size
177
- image_size = global_params.image_size
178
- Conv2d = get_same_padding_conv2d(image_size=image_size)
179
-
180
- # Stem
181
- in_channels = 3 # rgb
182
- out_channels = round_filters(32, self._global_params) # number of output channels
183
- self._conv_stem = Conv2d(in_channels, out_channels, kernel_size=3, stride=2, bias=False)
184
- self._bn0 = nn.BatchNorm2d(num_features=out_channels, momentum=bn_mom, eps=bn_eps)
185
- image_size = calculate_output_image_size(image_size, 2)
186
-
187
- # Build blocks
188
- self._blocks = nn.ModuleList([])
189
- for block_args in self._blocks_args:
190
-
191
- # Update block input and output filters based on depth multiplier.
192
- block_args = block_args._replace(
193
- input_filters=round_filters(block_args.input_filters, self._global_params),
194
- output_filters=round_filters(block_args.output_filters, self._global_params),
195
- num_repeat=round_repeats(block_args.num_repeat, self._global_params)
196
- )
197
-
198
- # The first block needs to take care of stride and filter size increase.
199
- self._blocks.append(MBConvBlock(block_args, self._global_params, image_size=image_size))
200
- image_size = calculate_output_image_size(image_size, block_args.stride)
201
- if block_args.num_repeat > 1: # modify block_args to keep same output size
202
- block_args = block_args._replace(input_filters=block_args.output_filters, stride=1)
203
- for _ in range(block_args.num_repeat - 1):
204
- self._blocks.append(MBConvBlock(block_args, self._global_params, image_size=image_size))
205
- # image_size = calculate_output_image_size(image_size, block_args.stride) # stride = 1
206
-
207
- # Head
208
- in_channels = block_args.output_filters # output of final block
209
- out_channels = round_filters(1280, self._global_params)
210
- Conv2d = get_same_padding_conv2d(image_size=image_size)
211
- self._conv_head = Conv2d(in_channels, out_channels, kernel_size=1, bias=False)
212
- self._bn1 = nn.BatchNorm2d(num_features=out_channels, momentum=bn_mom, eps=bn_eps)
213
-
214
- # Final linear layer
215
- self._avg_pooling = nn.AdaptiveAvgPool2d(1)
216
- self._dropout = nn.Dropout(self._global_params.dropout_rate)
217
- self._fc = nn.Linear(out_channels, self._global_params.num_classes)
218
- self._swish = MemoryEfficientSwish()
219
-
220
- def set_swish(self, memory_efficient=True):
221
- """Sets swish function as memory efficient (for training) or standard (for export).
222
-
223
- Args:
224
- memory_efficient (bool): Whether to use memory-efficient version of swish.
225
-
226
- """
227
- self._swish = MemoryEfficientSwish() if memory_efficient else Swish()
228
- for block in self._blocks:
229
- block.set_swish(memory_efficient)
230
-
231
- def extract_endpoints(self, inputs):
232
- """Use convolution layer to extract features
233
- from reduction levels i in [1, 2, 3, 4, 5].
234
-
235
- Args:
236
- inputs (tensor): Input tensor.
237
-
238
- Returns:
239
- Dictionary of last intermediate features
240
- with reduction levels i in [1, 2, 3, 4, 5].
241
- Example:
242
- >>> import torch
243
- >>> from efficientnet.model import EfficientNet
244
- >>> inputs = torch.rand(1, 3, 224, 224)
245
- >>> model = EfficientNet.from_pretrained('efficientnet-b0')
246
- >>> endpoints = model.extract_endpoints(inputs)
247
- >>> print(endpoints['reduction_1'].shape) # torch.Size([1, 16, 112, 112])
248
- >>> print(endpoints['reduction_2'].shape) # torch.Size([1, 24, 56, 56])
249
- >>> print(endpoints['reduction_3'].shape) # torch.Size([1, 40, 28, 28])
250
- >>> print(endpoints['reduction_4'].shape) # torch.Size([1, 112, 14, 14])
251
- >>> print(endpoints['reduction_5'].shape) # torch.Size([1, 1280, 7, 7])
252
- """
253
- endpoints = dict()
254
-
255
- # Stem
256
- x = self._swish(self._bn0(self._conv_stem(inputs)))
257
- prev_x = x
258
-
259
- # Blocks
260
- for idx, block in enumerate(self._blocks):
261
- drop_connect_rate = self._global_params.drop_connect_rate
262
- if drop_connect_rate:
263
- drop_connect_rate *= float(idx) / len(self._blocks) # scale drop connect_rate
264
- x = block(x, drop_connect_rate=drop_connect_rate)
265
- if prev_x.size(2) > x.size(2):
266
- endpoints['reduction_{}'.format(len(endpoints)+1)] = prev_x
267
- prev_x = x
268
-
269
- # Head
270
- x = self._swish(self._bn1(self._conv_head(x)))
271
- endpoints['reduction_{}'.format(len(endpoints)+1)] = x
272
-
273
- return endpoints
274
-
275
- def extract_features(self, inputs):
276
- """use convolution layer to extract feature .
277
-
278
- Args:
279
- inputs (tensor): Input tensor.
280
-
281
- Returns:
282
- Output of the final convolution
283
- layer in the efficientnet model.
284
- """
285
- # Stem
286
- x = self._swish(self._bn0(self._conv_stem(inputs)))
287
-
288
- # Blocks
289
- for idx, block in enumerate(self._blocks):
290
- drop_connect_rate = self._global_params.drop_connect_rate
291
- if drop_connect_rate:
292
- drop_connect_rate *= float(idx) / len(self._blocks) # scale drop connect_rate
293
- x = block(x, drop_connect_rate=drop_connect_rate)
294
-
295
- # Head
296
- x = self._swish(self._bn1(self._conv_head(x)))
297
-
298
- return x
299
-
300
- def forward(self, inputs):
301
- """EfficientNet's forward function.
302
- Calls extract_features to extract features, applies final linear layer, and returns logits.
303
-
304
- Args:
305
- inputs (tensor): Input tensor.
306
-
307
- Returns:
308
- Output of this model after processing.
309
- """
310
- # Convolution layers
311
- x = self.extract_features(inputs)
312
- # Pooling and final linear layer
313
- x = self._avg_pooling(x)
314
- if self._global_params.include_top:
315
- x = x.flatten(start_dim=1)
316
- x = self._dropout(x)
317
- x = self._fc(x)
318
- return x
319
-
320
- @classmethod
321
- def from_name(cls, model_name, in_channels=3, **override_params):
322
- """create an efficientnet model according to name.
323
-
324
- Args:
325
- model_name (str): Name for efficientnet.
326
- in_channels (int): Input data's channel number.
327
- override_params (other key word params):
328
- Params to override model's global_params.
329
- Optional key:
330
- 'width_coefficient', 'depth_coefficient',
331
- 'image_size', 'dropout_rate',
332
- 'num_classes', 'batch_norm_momentum',
333
- 'batch_norm_epsilon', 'drop_connect_rate',
334
- 'depth_divisor', 'min_depth'
335
-
336
- Returns:
337
- An efficientnet model.
338
- """
339
- cls._check_model_name_is_valid(model_name)
340
- blocks_args, global_params = get_model_params(model_name, override_params)
341
- model = cls(blocks_args, global_params)
342
- model._change_in_channels(in_channels)
343
- return model
344
-
345
- @classmethod
346
- def from_pretrained(cls, model_name, weights_path=None, advprop=False,
347
- in_channels=3, num_classes=1000, **override_params):
348
- """create an efficientnet model according to name.
349
-
350
- Args:
351
- model_name (str): Name for efficientnet.
352
- weights_path (None or str):
353
- str: path to pretrained weights file on the local disk.
354
- None: use pretrained weights downloaded from the Internet.
355
- advprop (bool):
356
- Whether to load pretrained weights
357
- trained with advprop (valid when weights_path is None).
358
- in_channels (int): Input data's channel number.
359
- num_classes (int):
360
- Number of categories for classification.
361
- It controls the output size for final linear layer.
362
- override_params (other key word params):
363
- Params to override model's global_params.
364
- Optional key:
365
- 'width_coefficient', 'depth_coefficient',
366
- 'image_size', 'dropout_rate',
367
- 'batch_norm_momentum',
368
- 'batch_norm_epsilon', 'drop_connect_rate',
369
- 'depth_divisor', 'min_depth'
370
-
371
- Returns:
372
- A pretrained efficientnet model.
373
- """
374
- model = cls.from_name(model_name, num_classes=num_classes, **override_params)
375
- load_pretrained_weights(model, model_name, weights_path=weights_path, load_fc=(num_classes == 1000), advprop=advprop)
376
- model._change_in_channels(in_channels)
377
- return model
378
-
379
- @classmethod
380
- def get_image_size(cls, model_name):
381
- """Get the input image size for a given efficientnet model.
382
-
383
- Args:
384
- model_name (str): Name for efficientnet.
385
-
386
- Returns:
387
- Input image size (resolution).
388
- """
389
- cls._check_model_name_is_valid(model_name)
390
- _, _, res, _ = efficientnet_params(model_name)
391
- return res
392
-
393
- @classmethod
394
- def _check_model_name_is_valid(cls, model_name):
395
- """Validates model name.
396
-
397
- Args:
398
- model_name (str): Name for efficientnet.
399
-
400
- Returns:
401
- bool: Is a valid name or not.
402
- """
403
- if model_name not in VALID_MODELS:
404
- raise ValueError('model_name should be one of: ' + ', '.join(VALID_MODELS))
405
-
406
- def _change_in_channels(self, in_channels):
407
- """Adjust model's first convolution layer to in_channels, if in_channels not equals 3.
408
-
409
- Args:
410
- in_channels (int): Input data's channel number.
411
- """
412
- if in_channels != 3:
413
- Conv2d = get_same_padding_conv2d(image_size=self._global_params.image_size)
414
- out_channels = round_filters(32, self._global_params)
415
- self._conv_stem = Conv2d(in_channels, out_channels, kernel_size=3, stride=2, bias=False)