keras-hub-nightly 0.16.1.dev202410020340__py3-none-any.whl → 0.16.1.dev202410030339__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- keras_hub/api/layers/__init__.py +0 -3
- keras_hub/api/models/__init__.py +1 -1
- keras_hub/src/layers/preprocessing/audio_converter.py +3 -7
- keras_hub/src/layers/preprocessing/image_converter.py +164 -34
- keras_hub/src/models/backbone.py +3 -9
- keras_hub/src/models/csp_darknet/csp_darknet_image_classifier.py +0 -109
- keras_hub/src/models/densenet/densenet_image_classifier.py +0 -128
- keras_hub/src/models/densenet/densenet_image_converter.py +2 -4
- keras_hub/src/models/feature_pyramid_backbone.py +1 -1
- keras_hub/src/models/image_classifier.py +147 -2
- keras_hub/src/models/image_classifier_preprocessor.py +3 -3
- keras_hub/src/models/image_segmenter.py +0 -5
- keras_hub/src/models/mix_transformer/mix_transformer_classifier.py +0 -109
- keras_hub/src/models/mobilenet/mobilenet_image_classifier.py +0 -92
- keras_hub/src/models/pali_gemma/pali_gemma_image_converter.py +2 -4
- keras_hub/src/models/preprocessor.py +3 -5
- keras_hub/src/models/resnet/resnet_backbone.py +1 -11
- keras_hub/src/models/resnet/resnet_image_classifier.py +0 -137
- keras_hub/src/models/resnet/resnet_image_converter.py +2 -4
- keras_hub/src/models/sam/__init__.py +5 -0
- keras_hub/src/models/sam/sam_image_converter.py +2 -4
- keras_hub/src/models/sam/sam_image_segmenter_preprocessor.py +11 -1
- keras_hub/src/models/sam/sam_presets.py +3 -3
- keras_hub/src/models/task.py +23 -25
- keras_hub/src/models/vgg/vgg_backbone.py +1 -20
- keras_hub/src/models/vgg/vgg_image_classifier.py +108 -29
- keras_hub/src/tokenizers/tokenizer.py +3 -6
- keras_hub/src/utils/preset_utils.py +103 -61
- keras_hub/src/utils/timm/preset_loader.py +8 -9
- keras_hub/src/version_utils.py +1 -1
- {keras_hub_nightly-0.16.1.dev202410020340.dist-info → keras_hub_nightly-0.16.1.dev202410030339.dist-info}/METADATA +1 -1
- {keras_hub_nightly-0.16.1.dev202410020340.dist-info → keras_hub_nightly-0.16.1.dev202410030339.dist-info}/RECORD +34 -35
- keras_hub/src/layers/preprocessing/resizing_image_converter.py +0 -138
- {keras_hub_nightly-0.16.1.dev202410020340.dist-info → keras_hub_nightly-0.16.1.dev202410030339.dist-info}/WHEEL +0 -0
- {keras_hub_nightly-0.16.1.dev202410020340.dist-info → keras_hub_nightly-0.16.1.dev202410030339.dist-info}/top_level.txt +0 -0
keras_hub/api/layers/__init__.py
CHANGED
@@ -33,9 +33,6 @@ from keras_hub.src.layers.preprocessing.multi_segment_packer import (
|
|
33
33
|
)
|
34
34
|
from keras_hub.src.layers.preprocessing.random_deletion import RandomDeletion
|
35
35
|
from keras_hub.src.layers.preprocessing.random_swap import RandomSwap
|
36
|
-
from keras_hub.src.layers.preprocessing.resizing_image_converter import (
|
37
|
-
ResizingImageConverter,
|
38
|
-
)
|
39
36
|
from keras_hub.src.layers.preprocessing.start_end_packer import StartEndPacker
|
40
37
|
from keras_hub.src.models.densenet.densenet_image_converter import (
|
41
38
|
DenseNetImageConverter,
|
keras_hub/api/models/__init__.py
CHANGED
@@ -254,7 +254,7 @@ from keras_hub.src.models.roberta.roberta_tokenizer import RobertaTokenizer
|
|
254
254
|
from keras_hub.src.models.sam.sam_backbone import SAMBackbone
|
255
255
|
from keras_hub.src.models.sam.sam_image_segmenter import SAMImageSegmenter
|
256
256
|
from keras_hub.src.models.sam.sam_image_segmenter_preprocessor import (
|
257
|
-
SAMImageSegmenterPreprocessor
|
257
|
+
SAMImageSegmenterPreprocessor,
|
258
258
|
)
|
259
259
|
from keras_hub.src.models.seq_2_seq_lm import Seq2SeqLM
|
260
260
|
from keras_hub.src.models.seq_2_seq_lm_preprocessor import Seq2SeqLMPreprocessor
|
@@ -2,11 +2,10 @@ from keras_hub.src.api_export import keras_hub_export
|
|
2
2
|
from keras_hub.src.layers.preprocessing.preprocessing_layer import (
|
3
3
|
PreprocessingLayer,
|
4
4
|
)
|
5
|
-
from keras_hub.src.utils.preset_utils import AUDIO_CONVERTER_CONFIG_FILE
|
6
5
|
from keras_hub.src.utils.preset_utils import builtin_presets
|
7
6
|
from keras_hub.src.utils.preset_utils import find_subclass
|
8
7
|
from keras_hub.src.utils.preset_utils import get_preset_loader
|
9
|
-
from keras_hub.src.utils.preset_utils import
|
8
|
+
from keras_hub.src.utils.preset_utils import get_preset_saver
|
10
9
|
from keras_hub.src.utils.python_utils import classproperty
|
11
10
|
|
12
11
|
|
@@ -101,8 +100,5 @@ class AudioConverter(PreprocessingLayer):
|
|
101
100
|
Args:
|
102
101
|
preset_dir: The path to the local model preset directory.
|
103
102
|
"""
|
104
|
-
|
105
|
-
|
106
|
-
preset_dir,
|
107
|
-
config_file=AUDIO_CONVERTER_CONFIG_FILE,
|
108
|
-
)
|
103
|
+
saver = get_preset_saver(preset_dir)
|
104
|
+
saver.save_audio_converter(self)
|
@@ -1,47 +1,184 @@
|
|
1
|
+
import math
|
2
|
+
|
3
|
+
import keras
|
4
|
+
import numpy as np
|
5
|
+
from keras import ops
|
6
|
+
|
1
7
|
from keras_hub.src.api_export import keras_hub_export
|
2
8
|
from keras_hub.src.layers.preprocessing.preprocessing_layer import (
|
3
9
|
PreprocessingLayer,
|
4
10
|
)
|
5
|
-
from keras_hub.src.utils.
|
11
|
+
from keras_hub.src.utils.keras_utils import standardize_data_format
|
6
12
|
from keras_hub.src.utils.preset_utils import builtin_presets
|
7
13
|
from keras_hub.src.utils.preset_utils import find_subclass
|
8
14
|
from keras_hub.src.utils.preset_utils import get_preset_loader
|
9
|
-
from keras_hub.src.utils.preset_utils import
|
15
|
+
from keras_hub.src.utils.preset_utils import get_preset_saver
|
10
16
|
from keras_hub.src.utils.python_utils import classproperty
|
17
|
+
from keras_hub.src.utils.tensor_utils import preprocessing_function
|
11
18
|
|
12
19
|
|
13
20
|
@keras_hub_export("keras_hub.layers.ImageConverter")
|
14
21
|
class ImageConverter(PreprocessingLayer):
|
15
|
-
"""
|
22
|
+
"""Preprocess raw images into model ready inputs.
|
23
|
+
|
24
|
+
This class converts from raw images to model ready inputs. This conversion
|
25
|
+
proceeds in the following steps:
|
16
26
|
|
17
|
-
|
18
|
-
|
19
|
-
|
20
|
-
|
21
|
-
|
27
|
+
1. Resize the image using to `image_size`. If `image_size` is `None`, this
|
28
|
+
step will be skipped.
|
29
|
+
2. Rescale the image by multiplying by `scale`, which can be either global
|
30
|
+
or per channel. If `scale` is `None`, this step will be skipped.
|
31
|
+
3. Offset the image by adding `offset`, which can be either global
|
32
|
+
or per channel. If `offset` is `None`, this step will be skipped.
|
22
33
|
|
23
34
|
The layer will take as input a raw image tensor in the channels last or
|
24
35
|
channels first format, and output a preprocessed image input for modeling.
|
25
|
-
|
26
|
-
|
27
|
-
|
36
|
+
This tensor can be batched (rank 4), or unbatched (rank 3).
|
37
|
+
|
38
|
+
This layer can be used with the `from_preset()` constructor to load a layer
|
39
|
+
that will rescale and resize an image for a specific pretrained model.
|
40
|
+
Using the layer this way allows writing preprocessing code that does not
|
41
|
+
need updating when switching between model checkpoints.
|
42
|
+
|
43
|
+
Args:
|
44
|
+
image_size: `(int, int)` tuple or `None`. The output size of the image,
|
45
|
+
not including the channels axis. If `None`, the input will not be
|
46
|
+
resized.
|
47
|
+
scale: float, tuple of floats, or `None`. The scale to apply to the
|
48
|
+
inputs. If `scale` is a single float, the entire input will be
|
49
|
+
multiplied by `scale`. If `scale` is a tuple, it's assumed to
|
50
|
+
contain per-channel scale value multiplied against each channel of
|
51
|
+
the input images. If `scale` is `None`, no scaling is applied.
|
52
|
+
offset: float, tuple of floats, or `None`. The offset to apply to the
|
53
|
+
inputs. If `offset` is a single float, the entire input will be
|
54
|
+
summed with `offset`. If `offset` is a tuple, it's assumed to
|
55
|
+
contain per-channel offset value summed against each channel of the
|
56
|
+
input images. If `offset` is `None`, no scaling is applied.
|
57
|
+
crop_to_aspect_ratio: If `True`, resize the images without aspect
|
58
|
+
ratio distortion. When the original aspect ratio differs
|
59
|
+
from the target aspect ratio, the output image will be
|
60
|
+
cropped so as to return the
|
61
|
+
largest possible window in the image (of size `(height, width)`)
|
62
|
+
that matches the target aspect ratio. By default
|
63
|
+
(`crop_to_aspect_ratio=False`), aspect ratio may not be preserved.
|
64
|
+
interpolation: String, the interpolation method.
|
65
|
+
Supports `"bilinear"`, `"nearest"`, `"bicubic"`,
|
66
|
+
`"lanczos3"`, `"lanczos5"`. Defaults to `"bilinear"`.
|
67
|
+
data_format: String, either `"channels_last"` or `"channels_first"`.
|
68
|
+
The ordering of the dimensions in the inputs. `"channels_last"`
|
69
|
+
corresponds to inputs with shape `(batch, height, width, channels)`
|
70
|
+
while `"channels_first"` corresponds to inputs with shape
|
71
|
+
`(batch, channels, height, width)`. It defaults to the
|
72
|
+
`image_data_format` value found in your Keras config file at
|
73
|
+
`~/.keras/keras.json`. If you never set it, then it will be
|
74
|
+
`"channels_last"`.
|
28
75
|
|
29
76
|
Examples:
|
30
77
|
```python
|
31
|
-
# Resize images
|
32
|
-
converter = keras_hub.layers.ImageConverter
|
33
|
-
|
34
|
-
|
35
|
-
|
36
|
-
converter(np.
|
78
|
+
# Resize raw images and scale them to [0, 1].
|
79
|
+
converter = keras_hub.layers.ImageConverter(
|
80
|
+
image_size=(128, 128),
|
81
|
+
scale=1. / 255,
|
82
|
+
)
|
83
|
+
converter(np.random.randint(0, 256, size=(2, 512, 512, 3)))
|
84
|
+
|
85
|
+
# Resize images to the specific size needed for a PaliGemma preset.
|
86
|
+
converter = keras_hub.layers.ImageConverter.from_preset(
|
87
|
+
"pali_gemma_3b_224"
|
88
|
+
)
|
89
|
+
converter(np.random.randint(0, 256, size=(2, 512, 512, 3)))
|
37
90
|
```
|
38
91
|
"""
|
39
92
|
|
40
93
|
backbone_cls = None
|
41
94
|
|
95
|
+
def __init__(
|
96
|
+
self,
|
97
|
+
image_size=None,
|
98
|
+
scale=None,
|
99
|
+
offset=None,
|
100
|
+
crop_to_aspect_ratio=True,
|
101
|
+
interpolation="bilinear",
|
102
|
+
data_format=None,
|
103
|
+
**kwargs,
|
104
|
+
):
|
105
|
+
# TODO: old arg names. Delete this block after resaving Kaggle assets.
|
106
|
+
if "height" in kwargs and "width" in kwargs:
|
107
|
+
image_size = (kwargs.pop("height"), kwargs.pop("width"))
|
108
|
+
if "variance" in kwargs and "mean" in kwargs:
|
109
|
+
std = [math.sqrt(v) for v in kwargs.pop("variance")]
|
110
|
+
scale = [scale / s for s in std]
|
111
|
+
offset = [-m / s for m, s in zip(kwargs.pop("mean"), std)]
|
112
|
+
|
113
|
+
super().__init__(**kwargs)
|
114
|
+
|
115
|
+
# Create the `Resizing` layer here even if it's not being used. That
|
116
|
+
# allows us to make `image_size` a settable property.
|
117
|
+
self.resizing = keras.layers.Resizing(
|
118
|
+
height=image_size[0] if image_size else None,
|
119
|
+
width=image_size[1] if image_size else None,
|
120
|
+
crop_to_aspect_ratio=crop_to_aspect_ratio,
|
121
|
+
interpolation=interpolation,
|
122
|
+
data_format=data_format,
|
123
|
+
dtype=self.dtype_policy,
|
124
|
+
name="resizing",
|
125
|
+
)
|
126
|
+
self.scale = scale
|
127
|
+
self.offset = offset
|
128
|
+
self.crop_to_aspect_ratio = crop_to_aspect_ratio
|
129
|
+
self.interpolation = interpolation
|
130
|
+
self.data_format = standardize_data_format(data_format)
|
131
|
+
|
132
|
+
@property
|
42
133
|
def image_size(self):
|
43
|
-
"""
|
44
|
-
|
134
|
+
"""Settable tuple of `(height, width)` ints. The output image shape."""
|
135
|
+
if self.resizing.height is None:
|
136
|
+
return None
|
137
|
+
return (self.resizing.height, self.resizing.width)
|
138
|
+
|
139
|
+
@image_size.setter
|
140
|
+
def image_size(self, value):
|
141
|
+
if value is None:
|
142
|
+
value = (None, None)
|
143
|
+
self.resizing.height = value[0]
|
144
|
+
self.resizing.width = value[1]
|
145
|
+
|
146
|
+
@preprocessing_function
|
147
|
+
def call(self, inputs):
|
148
|
+
if self.image_size is not None:
|
149
|
+
x = self.resizing(inputs)
|
150
|
+
if self.scale is not None:
|
151
|
+
x = x * self._expand_non_channel_dims(self.scale, x)
|
152
|
+
if self.offset is not None:
|
153
|
+
x = x + self._expand_non_channel_dims(self.offset, x)
|
154
|
+
return x
|
155
|
+
|
156
|
+
def _expand_non_channel_dims(self, value, inputs):
|
157
|
+
unbatched = len(ops.shape(inputs)) == 3
|
158
|
+
channels_first = self.data_format == "channels_first"
|
159
|
+
if unbatched:
|
160
|
+
broadcast_dims = (1, 2) if channels_first else (0, 1)
|
161
|
+
else:
|
162
|
+
broadcast_dims = (0, 2, 3) if channels_first else (0, 1, 2)
|
163
|
+
# If inputs are not a tensor type, return a numpy array.
|
164
|
+
# This might happen when running under tf.data.
|
165
|
+
if ops.is_tensor(inputs):
|
166
|
+
return ops.expand_dims(value, broadcast_dims)
|
167
|
+
else:
|
168
|
+
return np.expand_dims(value, broadcast_dims)
|
169
|
+
|
170
|
+
def get_config(self):
|
171
|
+
config = super().get_config()
|
172
|
+
config.update(
|
173
|
+
{
|
174
|
+
"image_size": self.image_size,
|
175
|
+
"scale": self.scale,
|
176
|
+
"offset": self.offset,
|
177
|
+
"interpolation": self.interpolation,
|
178
|
+
"crop_to_aspect_ratio": self.crop_to_aspect_ratio,
|
179
|
+
}
|
180
|
+
)
|
181
|
+
return config
|
45
182
|
|
46
183
|
@classproperty
|
47
184
|
def presets(cls):
|
@@ -69,13 +206,6 @@ class ImageConverter(PreprocessingLayer):
|
|
69
206
|
You can run `cls.presets.keys()` to list all built-in presets available
|
70
207
|
on the class.
|
71
208
|
|
72
|
-
This constructor can be called in one of two ways. Either from the base
|
73
|
-
class like `keras_hub.models.ImageConverter.from_preset()`, or from a
|
74
|
-
model class like
|
75
|
-
`keras_hub.models.PaliGemmaImageConverter.from_preset()`. If calling
|
76
|
-
from the base class, the subclass of the returning object will be
|
77
|
-
inferred from the config in the preset directory.
|
78
|
-
|
79
209
|
Args:
|
80
210
|
preset: string. A built-in preset identifier, a Kaggle Models
|
81
211
|
handle, a Hugging Face handle, or a path to a local directory.
|
@@ -85,17 +215,20 @@ class ImageConverter(PreprocessingLayer):
|
|
85
215
|
|
86
216
|
Examples:
|
87
217
|
```python
|
218
|
+
batch = np.random.randint(0, 256, size=(2, 512, 512, 3))
|
219
|
+
|
88
220
|
# Resize images for `"pali_gemma_3b_224"`.
|
89
221
|
converter = keras_hub.layers.ImageConverter.from_preset(
|
90
222
|
"pali_gemma_3b_224"
|
91
223
|
)
|
92
|
-
converter(
|
93
|
-
|
224
|
+
converter(batch) # Output shape: (2, 224, 224, 3)
|
225
|
+
|
226
|
+
# Resize images for `"pali_gemma_3b_448"` without cropping.
|
94
227
|
converter = keras_hub.layers.ImageConverter.from_preset(
|
95
228
|
"pali_gemma_3b_448",
|
96
229
|
crop_to_aspect_ratio=False,
|
97
230
|
)
|
98
|
-
converter(
|
231
|
+
converter(batch) # Output shape: (2, 448, 448, 3)
|
99
232
|
```
|
100
233
|
"""
|
101
234
|
loader = get_preset_loader(preset)
|
@@ -110,8 +243,5 @@ class ImageConverter(PreprocessingLayer):
|
|
110
243
|
Args:
|
111
244
|
preset_dir: The path to the local model preset directory.
|
112
245
|
"""
|
113
|
-
|
114
|
-
|
115
|
-
preset_dir,
|
116
|
-
config_file=IMAGE_CONVERTER_CONFIG_FILE,
|
117
|
-
)
|
246
|
+
saver = get_preset_saver(preset_dir)
|
247
|
+
saver.save_image_converter(self)
|
keras_hub/src/models/backbone.py
CHANGED
@@ -1,15 +1,10 @@
|
|
1
|
-
import os
|
2
|
-
|
3
1
|
import keras
|
4
2
|
|
5
3
|
from keras_hub.src.api_export import keras_hub_export
|
6
4
|
from keras_hub.src.utils.keras_utils import assert_quantization_support
|
7
|
-
from keras_hub.src.utils.preset_utils import CONFIG_FILE
|
8
|
-
from keras_hub.src.utils.preset_utils import MODEL_WEIGHTS_FILE
|
9
5
|
from keras_hub.src.utils.preset_utils import builtin_presets
|
10
6
|
from keras_hub.src.utils.preset_utils import get_preset_loader
|
11
|
-
from keras_hub.src.utils.preset_utils import
|
12
|
-
from keras_hub.src.utils.preset_utils import save_serialized_object
|
7
|
+
from keras_hub.src.utils.preset_utils import get_preset_saver
|
13
8
|
from keras_hub.src.utils.python_utils import classproperty
|
14
9
|
|
15
10
|
|
@@ -193,9 +188,8 @@ class Backbone(keras.Model):
|
|
193
188
|
Args:
|
194
189
|
preset_dir: The path to the local model preset directory.
|
195
190
|
"""
|
196
|
-
|
197
|
-
|
198
|
-
save_metadata(self, preset_dir)
|
191
|
+
saver = get_preset_saver(preset_dir)
|
192
|
+
saver.save_backbone(self)
|
199
193
|
|
200
194
|
def enable_lora(self, rank):
|
201
195
|
"""Enable Lora on the backbone.
|
@@ -1,5 +1,3 @@
|
|
1
|
-
import keras
|
2
|
-
|
3
1
|
from keras_hub.src.api_export import keras_hub_export
|
4
2
|
from keras_hub.src.models.csp_darknet.csp_darknet_backbone import (
|
5
3
|
CSPDarkNetBackbone,
|
@@ -9,111 +7,4 @@ from keras_hub.src.models.image_classifier import ImageClassifier
|
|
9
7
|
|
10
8
|
@keras_hub_export("keras_hub.models.CSPDarkNetImageClassifier")
|
11
9
|
class CSPDarkNetImageClassifier(ImageClassifier):
|
12
|
-
"""CSPDarkNet image classifier task model.
|
13
|
-
|
14
|
-
Args:
|
15
|
-
backbone: A `keras_hub.models.CSPDarkNetBackbone` instance.
|
16
|
-
num_classes: int. The number of classes to predict.
|
17
|
-
activation: `None`, str or callable. The activation function to use on
|
18
|
-
the `Dense` layer. Set `activation=None` to return the output
|
19
|
-
logits. Defaults to `"softmax"`.
|
20
|
-
|
21
|
-
To fine-tune with `fit()`, pass a dataset containing tuples of `(x, y)`
|
22
|
-
where `x` is a tensor and `y` is a integer from `[0, num_classes)`.
|
23
|
-
All `ImageClassifier` tasks include a `from_preset()` constructor which can
|
24
|
-
be used to load a pre-trained config and weights.
|
25
|
-
|
26
|
-
Examples:
|
27
|
-
|
28
|
-
Call `predict()` to run inference.
|
29
|
-
```python
|
30
|
-
# Load preset and train
|
31
|
-
images = np.ones((2, 224, 224, 3), dtype="float32")
|
32
|
-
classifier = keras_hub.models.CSPDarkNetImageClassifier.from_preset(
|
33
|
-
"csp_darknet_tiny_imagenet")
|
34
|
-
classifier.predict(images)
|
35
|
-
```
|
36
|
-
|
37
|
-
Call `fit()` on a single batch.
|
38
|
-
```python
|
39
|
-
# Load preset and train
|
40
|
-
images = np.ones((2, 224, 224, 3), dtype="float32")
|
41
|
-
labels = [0, 3]
|
42
|
-
classifier = keras_hub.models.CSPDarkNetImageClassifier.from_preset(
|
43
|
-
"csp_darknet_tiny_imagenet")
|
44
|
-
classifier.fit(x=images, y=labels, batch_size=2)
|
45
|
-
```
|
46
|
-
|
47
|
-
Call `fit()` with custom loss, optimizer and backbone.
|
48
|
-
```python
|
49
|
-
classifier = keras_hub.models.CSPDarkNetImageClassifier.from_preset(
|
50
|
-
"csp_darknet_tiny_imagenet")
|
51
|
-
classifier.compile(
|
52
|
-
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
|
53
|
-
optimizer=keras.optimizers.Adam(5e-5),
|
54
|
-
)
|
55
|
-
classifier.backbone.trainable = False
|
56
|
-
classifier.fit(x=images, y=labels, batch_size=2)
|
57
|
-
```
|
58
|
-
|
59
|
-
Custom backbone.
|
60
|
-
```python
|
61
|
-
images = np.ones((2, 224, 224, 3), dtype="float32")
|
62
|
-
labels = [0, 3]
|
63
|
-
backbone = keras_hub.models.CSPDarkNetBackbone(
|
64
|
-
stackwise_num_filters=[128, 256, 512, 1024],
|
65
|
-
stackwise_depth=[3, 9, 9, 3],
|
66
|
-
block_type="basic_block",
|
67
|
-
image_shape = (224, 224, 3),
|
68
|
-
)
|
69
|
-
classifier = keras_hub.models.CSPDarkNetImageClassifier(
|
70
|
-
backbone=backbone,
|
71
|
-
num_classes=4,
|
72
|
-
)
|
73
|
-
classifier.fit(x=images, y=labels, batch_size=2)
|
74
|
-
```
|
75
|
-
"""
|
76
|
-
|
77
10
|
backbone_cls = CSPDarkNetBackbone
|
78
|
-
|
79
|
-
def __init__(
|
80
|
-
self,
|
81
|
-
backbone,
|
82
|
-
num_classes,
|
83
|
-
activation="softmax",
|
84
|
-
preprocessor=None, # adding this dummy arg for saved model test
|
85
|
-
# TODO: once preprocessor flow is figured out, this needs to be updated
|
86
|
-
**kwargs,
|
87
|
-
):
|
88
|
-
# === Layers ===
|
89
|
-
self.backbone = backbone
|
90
|
-
self.output_dense = keras.layers.Dense(
|
91
|
-
num_classes,
|
92
|
-
activation=activation,
|
93
|
-
name="predictions",
|
94
|
-
)
|
95
|
-
|
96
|
-
# === Functional Model ===
|
97
|
-
inputs = self.backbone.input
|
98
|
-
x = self.backbone(inputs)
|
99
|
-
outputs = self.output_dense(x)
|
100
|
-
super().__init__(
|
101
|
-
inputs=inputs,
|
102
|
-
outputs=outputs,
|
103
|
-
**kwargs,
|
104
|
-
)
|
105
|
-
|
106
|
-
# === Config ===
|
107
|
-
self.num_classes = num_classes
|
108
|
-
self.activation = activation
|
109
|
-
|
110
|
-
def get_config(self):
|
111
|
-
# Backbone serialized in `super`
|
112
|
-
config = super().get_config()
|
113
|
-
config.update(
|
114
|
-
{
|
115
|
-
"num_classes": self.num_classes,
|
116
|
-
"activation": self.activation,
|
117
|
-
}
|
118
|
-
)
|
119
|
-
return config
|
@@ -1,5 +1,3 @@
|
|
1
|
-
import keras
|
2
|
-
|
3
1
|
from keras_hub.src.api_export import keras_hub_export
|
4
2
|
from keras_hub.src.models.densenet.densenet_backbone import DenseNetBackbone
|
5
3
|
from keras_hub.src.models.densenet.densenet_image_classifier_preprocessor import (
|
@@ -10,131 +8,5 @@ from keras_hub.src.models.image_classifier import ImageClassifier
|
|
10
8
|
|
11
9
|
@keras_hub_export("keras_hub.models.DenseNetImageClassifier")
|
12
10
|
class DenseNetImageClassifier(ImageClassifier):
|
13
|
-
"""DenseNet image classifier task model.
|
14
|
-
|
15
|
-
To fine-tune with `fit()`, pass a dataset containing tuples of `(x, y)`
|
16
|
-
where `x` is a tensor and `y` is a integer from `[0, num_classes)`.
|
17
|
-
All `ImageClassifier` tasks include a `from_preset()` constructor which can
|
18
|
-
be used to load a pre-trained config and weights.
|
19
|
-
|
20
|
-
Args:
|
21
|
-
backbone: A `keras_hub.models.DenseNetBackbone` instance.
|
22
|
-
num_classes: int. The number of classes to predict.
|
23
|
-
activation: `None`, str or callable. The activation function to use on
|
24
|
-
the `Dense` layer. Set `activation=None` to return the output
|
25
|
-
logits. Defaults to `None`.
|
26
|
-
pooling: A pooling layer to use before the final classification layer,
|
27
|
-
must be one of "avg" or "max". Use "avg" for
|
28
|
-
`GlobalAveragePooling2D` and "max" for "GlobalMaxPooling2D.
|
29
|
-
preprocessor: A `keras_hub.models.DenseNetImageClassifierPreprocessor`
|
30
|
-
or `None`. If `None`, this model will not apply preprocessing, and
|
31
|
-
inputs should be preprocessed before calling the model.
|
32
|
-
|
33
|
-
Examples:
|
34
|
-
|
35
|
-
Call `predict()` to run inference.
|
36
|
-
```python
|
37
|
-
# Load preset and train
|
38
|
-
images = np.ones((2, 224, 224, 3), dtype="float32")
|
39
|
-
classifier = keras_hub.models.DenseNetImageClassifier.from_preset(
|
40
|
-
"densenet121_imagenet")
|
41
|
-
classifier.predict(images)
|
42
|
-
```
|
43
|
-
|
44
|
-
Call `fit()` on a single batch.
|
45
|
-
```python
|
46
|
-
# Load preset and train
|
47
|
-
images = np.ones((2, 224, 224, 3), dtype="float32")
|
48
|
-
labels = [0, 3]
|
49
|
-
classifier = keras_hub.models.DenseNetImageClassifier.from_preset(
|
50
|
-
"densenet121_imagenet")
|
51
|
-
classifier.fit(x=images, y=labels, batch_size=2)
|
52
|
-
```
|
53
|
-
|
54
|
-
Call `fit()` with custom loss, optimizer and backbone.
|
55
|
-
```python
|
56
|
-
classifier = keras_hub.models.DenseNetImageClassifier.from_preset(
|
57
|
-
"densenet121_imagenet")
|
58
|
-
classifier.compile(
|
59
|
-
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
|
60
|
-
optimizer=keras.optimizers.Adam(5e-5),
|
61
|
-
)
|
62
|
-
classifier.backbone.trainable = False
|
63
|
-
classifier.fit(x=images, y=labels, batch_size=2)
|
64
|
-
```
|
65
|
-
|
66
|
-
Custom backbone.
|
67
|
-
```python
|
68
|
-
images = np.ones((2, 224, 224, 3), dtype="float32")
|
69
|
-
labels = [0, 3]
|
70
|
-
backbone = keras_hub.models.DenseNetBackbone(
|
71
|
-
stackwise_num_filters=[128, 256, 512, 1024],
|
72
|
-
stackwise_depth=[3, 9, 9, 3],
|
73
|
-
block_type="basic_block",
|
74
|
-
image_shape = (224, 224, 3),
|
75
|
-
)
|
76
|
-
classifier = keras_hub.models.DenseNetImageClassifier(
|
77
|
-
backbone=backbone,
|
78
|
-
num_classes=4,
|
79
|
-
)
|
80
|
-
classifier.fit(x=images, y=labels, batch_size=2)
|
81
|
-
```
|
82
|
-
"""
|
83
|
-
|
84
11
|
backbone_cls = DenseNetBackbone
|
85
12
|
preprocessor_cls = DenseNetImageClassifierPreprocessor
|
86
|
-
|
87
|
-
def __init__(
|
88
|
-
self,
|
89
|
-
backbone,
|
90
|
-
num_classes,
|
91
|
-
activation=None,
|
92
|
-
pooling="avg",
|
93
|
-
preprocessor=None,
|
94
|
-
**kwargs,
|
95
|
-
):
|
96
|
-
# === Layers ===
|
97
|
-
self.backbone = backbone
|
98
|
-
self.preprocessor = preprocessor
|
99
|
-
if pooling == "avg":
|
100
|
-
self.pooler = keras.layers.GlobalAveragePooling2D()
|
101
|
-
elif pooling == "max":
|
102
|
-
self.pooler = keras.layers.GlobalMaxPooling2D()
|
103
|
-
else:
|
104
|
-
raise ValueError(
|
105
|
-
"Unknown `pooling` type. Polling should be either `'avg'` or "
|
106
|
-
f"`'max'`. Received: pooling={pooling}."
|
107
|
-
)
|
108
|
-
self.output_dense = keras.layers.Dense(
|
109
|
-
num_classes,
|
110
|
-
activation=activation,
|
111
|
-
name="predictions",
|
112
|
-
)
|
113
|
-
|
114
|
-
# === Functional Model ===
|
115
|
-
inputs = self.backbone.input
|
116
|
-
x = self.backbone(inputs)
|
117
|
-
x = self.pooler(x)
|
118
|
-
outputs = self.output_dense(x)
|
119
|
-
super().__init__(
|
120
|
-
inputs=inputs,
|
121
|
-
outputs=outputs,
|
122
|
-
**kwargs,
|
123
|
-
)
|
124
|
-
|
125
|
-
# === Config ===
|
126
|
-
self.num_classes = num_classes
|
127
|
-
self.activation = activation
|
128
|
-
self.pooling = pooling
|
129
|
-
|
130
|
-
def get_config(self):
|
131
|
-
# Backbone serialized in `super`
|
132
|
-
config = super().get_config()
|
133
|
-
config.update(
|
134
|
-
{
|
135
|
-
"num_classes": self.num_classes,
|
136
|
-
"activation": self.activation,
|
137
|
-
"pooling": self.pooling,
|
138
|
-
}
|
139
|
-
)
|
140
|
-
return config
|
@@ -1,10 +1,8 @@
|
|
1
1
|
from keras_hub.src.api_export import keras_hub_export
|
2
|
-
from keras_hub.src.layers.preprocessing.
|
3
|
-
ResizingImageConverter,
|
4
|
-
)
|
2
|
+
from keras_hub.src.layers.preprocessing.image_converter import ImageConverter
|
5
3
|
from keras_hub.src.models.densenet.densenet_backbone import DenseNetBackbone
|
6
4
|
|
7
5
|
|
8
6
|
@keras_hub_export("keras_hub.layers.DenseNetImageConverter")
|
9
|
-
class DenseNetImageConverter(
|
7
|
+
class DenseNetImageConverter(ImageConverter):
|
10
8
|
backbone_cls = DenseNetBackbone
|
@@ -15,7 +15,7 @@ class FeaturePyramidBackbone(Backbone):
|
|
15
15
|
Example:
|
16
16
|
|
17
17
|
```python
|
18
|
-
input_data = np.random.uniform(0,
|
18
|
+
input_data = np.random.uniform(0, 256, size=(2, 224, 224, 3))
|
19
19
|
|
20
20
|
# Convert to feature pyramid output format using ResNet.
|
21
21
|
backbone = ResNetBackbone.from_preset("resnet50")
|