keras-hub-nightly 0.16.1.dev202410020340__py3-none-any.whl → 0.16.1.dev202410040340__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- keras_hub/api/layers/__init__.py +3 -3
- keras_hub/api/models/__init__.py +10 -1
- keras_hub/src/layers/preprocessing/audio_converter.py +3 -7
- keras_hub/src/layers/preprocessing/image_converter.py +164 -34
- keras_hub/src/models/backbone.py +3 -9
- keras_hub/src/models/csp_darknet/csp_darknet_image_classifier.py +0 -109
- keras_hub/src/models/deeplab_v3/__init__.py +7 -0
- keras_hub/src/models/deeplab_v3/deeplab_v3_backbone.py +196 -0
- keras_hub/src/models/deeplab_v3/deeplab_v3_image_converter.py +10 -0
- keras_hub/src/models/deeplab_v3/deeplab_v3_image_segmeter_preprocessor.py +16 -0
- keras_hub/src/models/deeplab_v3/deeplab_v3_layers.py +215 -0
- keras_hub/src/models/deeplab_v3/deeplab_v3_presets.py +4 -0
- keras_hub/src/models/deeplab_v3/deeplab_v3_segmenter.py +109 -0
- keras_hub/src/models/densenet/densenet_image_classifier.py +0 -128
- keras_hub/src/models/densenet/densenet_image_converter.py +2 -4
- keras_hub/src/models/feature_pyramid_backbone.py +1 -1
- keras_hub/src/models/image_classifier.py +147 -2
- keras_hub/src/models/image_classifier_preprocessor.py +3 -3
- keras_hub/src/models/image_segmenter.py +0 -5
- keras_hub/src/models/image_segmenter_preprocessor.py +29 -4
- keras_hub/src/models/mix_transformer/mix_transformer_classifier.py +0 -109
- keras_hub/src/models/mobilenet/mobilenet_image_classifier.py +0 -92
- keras_hub/src/models/pali_gemma/pali_gemma_image_converter.py +2 -4
- keras_hub/src/models/preprocessor.py +3 -5
- keras_hub/src/models/resnet/resnet_backbone.py +1 -11
- keras_hub/src/models/resnet/resnet_image_classifier.py +0 -137
- keras_hub/src/models/resnet/resnet_image_converter.py +2 -4
- keras_hub/src/models/sam/__init__.py +5 -0
- keras_hub/src/models/sam/sam_image_converter.py +2 -4
- keras_hub/src/models/sam/sam_image_segmenter_preprocessor.py +11 -1
- keras_hub/src/models/sam/sam_presets.py +3 -3
- keras_hub/src/models/stable_diffusion_3/flow_match_euler_discrete_scheduler.py +8 -1
- keras_hub/src/models/stable_diffusion_3/stable_diffusion_3_backbone.py +57 -93
- keras_hub/src/models/stable_diffusion_3/stable_diffusion_3_presets.py +3 -3
- keras_hub/src/models/stable_diffusion_3/stable_diffusion_3_text_to_image.py +5 -3
- keras_hub/src/models/task.py +39 -36
- keras_hub/src/models/vae/__init__.py +1 -0
- keras_hub/src/models/vae/vae_backbone.py +172 -0
- keras_hub/src/models/vae/vae_layers.py +740 -0
- keras_hub/src/models/vgg/vgg_backbone.py +1 -20
- keras_hub/src/models/vgg/vgg_image_classifier.py +108 -29
- keras_hub/src/tokenizers/tokenizer.py +3 -6
- keras_hub/src/utils/preset_utils.py +103 -61
- keras_hub/src/utils/timm/preset_loader.py +8 -9
- keras_hub/src/version_utils.py +1 -1
- {keras_hub_nightly-0.16.1.dev202410020340.dist-info → keras_hub_nightly-0.16.1.dev202410040340.dist-info}/METADATA +1 -1
- {keras_hub_nightly-0.16.1.dev202410020340.dist-info → keras_hub_nightly-0.16.1.dev202410040340.dist-info}/RECORD +49 -41
- keras_hub/src/layers/preprocessing/resizing_image_converter.py +0 -138
- keras_hub/src/models/stable_diffusion_3/vae_image_decoder.py +0 -320
- {keras_hub_nightly-0.16.1.dev202410020340.dist-info → keras_hub_nightly-0.16.1.dev202410040340.dist-info}/WHEEL +0 -0
- {keras_hub_nightly-0.16.1.dev202410020340.dist-info → keras_hub_nightly-0.16.1.dev202410040340.dist-info}/top_level.txt +0 -0
@@ -16,11 +16,6 @@ class ImageSegmenter(Task):
|
|
16
16
|
be used to load a pre-trained config and weights.
|
17
17
|
"""
|
18
18
|
|
19
|
-
def __init__(self, *args, **kwargs):
|
20
|
-
super().__init__(*args, **kwargs)
|
21
|
-
# Default compilation.
|
22
|
-
self.compile()
|
23
|
-
|
24
19
|
def compile(
|
25
20
|
self,
|
26
21
|
optimizer="auto",
|
@@ -19,9 +19,11 @@ class ImageSegmenterPreprocessor(Preprocessor):
|
|
19
19
|
|
20
20
|
- `x`: The first input, should always be included. It can be an image or
|
21
21
|
a batch of images.
|
22
|
-
- `y`: (Optional) Usually the segmentation mask(s),
|
23
|
-
|
22
|
+
- `y`: (Optional) Usually the segmentation mask(s), if `resize_output_mask`
|
23
|
+
is set to `True` this will be resized to input image shape else will be
|
24
|
+
passed through unaltered.
|
24
25
|
- `sample_weight`: (Optional) Will be passed through unaltered.
|
26
|
+
- `resize_output_mask` bool: If set to `True` the output mask will be resized to the same size as the input image. Defaults to `False`.
|
25
27
|
|
26
28
|
The layer will output either `x`, an `(x, y)` tuple if labels were provided,
|
27
29
|
or an `(x, y, sample_weight)` tuple if labels and sample weight were
|
@@ -29,7 +31,7 @@ class ImageSegmenterPreprocessor(Preprocessor):
|
|
29
31
|
been applied.
|
30
32
|
|
31
33
|
All `ImageSegmenterPreprocessor` tasks include a `from_preset()`
|
32
|
-
constructor which can be used to load a pre-trained config
|
34
|
+
constructor which can be used to load a pre-trained config.
|
33
35
|
You can call the `from_preset()` constructor directly on this base class, in
|
34
36
|
which case the correct class for your model will be automatically
|
35
37
|
instantiated.
|
@@ -49,7 +51,8 @@ class ImageSegmenterPreprocessor(Preprocessor):
|
|
49
51
|
x, y = preprocessor(x, y)
|
50
52
|
|
51
53
|
# Resize a batch of images and masks.
|
52
|
-
x, y = [np.ones((512, 512, 3)), np.zeros((512, 512, 3))],
|
54
|
+
x, y = [np.ones((512, 512, 3)), np.zeros((512, 512, 3))],
|
55
|
+
[np.ones((512, 512, 1)), np.zeros((512, 512, 1))]
|
53
56
|
x, y = preprocessor(x, y)
|
54
57
|
|
55
58
|
# Use a `tf.data.Dataset`.
|
@@ -61,13 +64,35 @@ class ImageSegmenterPreprocessor(Preprocessor):
|
|
61
64
|
def __init__(
|
62
65
|
self,
|
63
66
|
image_converter=None,
|
67
|
+
resize_output_mask=False,
|
64
68
|
**kwargs,
|
65
69
|
):
|
66
70
|
super().__init__(**kwargs)
|
67
71
|
self.image_converter = image_converter
|
72
|
+
self.resize_output_mask = resize_output_mask
|
68
73
|
|
69
74
|
@preprocessing_function
|
70
75
|
def call(self, x, y=None, sample_weight=None):
|
71
76
|
if self.image_converter:
|
72
77
|
x = self.image_converter(x)
|
78
|
+
|
79
|
+
if y is not None and self.image_converter and self.resize_output_mask:
|
80
|
+
|
81
|
+
y = keras.layers.Resizing(
|
82
|
+
height=(
|
83
|
+
self.image_converter.image_size[0]
|
84
|
+
if self.image_converter.image_size
|
85
|
+
else None
|
86
|
+
),
|
87
|
+
width=(
|
88
|
+
self.image_converter.image_size[1]
|
89
|
+
if self.image_converter.image_size
|
90
|
+
else None
|
91
|
+
),
|
92
|
+
crop_to_aspect_ratio=self.image_converter.crop_to_aspect_ratio,
|
93
|
+
interpolation="nearest",
|
94
|
+
data_format=self.image_converter.data_format,
|
95
|
+
dtype=self.dtype_policy,
|
96
|
+
name="mask_resizing",
|
97
|
+
)(y)
|
73
98
|
return keras.utils.pack_x_y_sample_weight(x, y, sample_weight)
|
@@ -1,5 +1,3 @@
|
|
1
|
-
import keras
|
2
|
-
|
3
1
|
from keras_hub.src.api_export import keras_hub_export
|
4
2
|
from keras_hub.src.models.image_classifier import ImageClassifier
|
5
3
|
from keras_hub.src.models.mix_transformer.mix_transformer_backbone import (
|
@@ -9,111 +7,4 @@ from keras_hub.src.models.mix_transformer.mix_transformer_backbone import (
|
|
9
7
|
|
10
8
|
@keras_hub_export("keras_hub.models.MiTImageClassifier")
|
11
9
|
class MiTImageClassifier(ImageClassifier):
|
12
|
-
"""MiTImageClassifier image classifier model.
|
13
|
-
|
14
|
-
Args:
|
15
|
-
backbone: A `keras_hub.models.MiTBackbone` instance.
|
16
|
-
num_classes: int. The number of classes to predict.
|
17
|
-
activation: `None`, str or callable. The activation function to use on
|
18
|
-
the `Dense` layer. Set `activation=None` to return the output
|
19
|
-
logits. Defaults to `"softmax"`.
|
20
|
-
|
21
|
-
To fine-tune with `fit()`, pass a dataset containing tuples of `(x, y)`
|
22
|
-
where `x` is a tensor and `y` is a integer from `[0, num_classes)`.
|
23
|
-
All `ImageClassifier` tasks include a `from_preset()` constructor which can
|
24
|
-
be used to load a pre-trained config and weights.
|
25
|
-
|
26
|
-
Examples:
|
27
|
-
|
28
|
-
Call `predict()` to run inference.
|
29
|
-
```python
|
30
|
-
# Load preset and train
|
31
|
-
images = np.ones((2, 224, 224, 3), dtype="float32")
|
32
|
-
classifier = keras_hub.models.MiTImageClassifier.from_preset(
|
33
|
-
"mit_b0_imagenet")
|
34
|
-
classifier.predict(images)
|
35
|
-
```
|
36
|
-
|
37
|
-
Call `fit()` on a single batch.
|
38
|
-
```python
|
39
|
-
# Load preset and train
|
40
|
-
images = np.ones((2, 224, 224, 3), dtype="float32")
|
41
|
-
labels = [0, 3]
|
42
|
-
classifier = keras_hub.models.MixTransformerImageClassifier.from_preset(
|
43
|
-
"mit_b0_imagenet")
|
44
|
-
classifier.fit(x=images, y=labels, batch_size=2)
|
45
|
-
```
|
46
|
-
|
47
|
-
Call `fit()` with custom loss, optimizer and backbone.
|
48
|
-
```python
|
49
|
-
classifier = keras_hub.models.MiTImageClassifier.from_preset(
|
50
|
-
"mit_b0_imagenet")
|
51
|
-
classifier.compile(
|
52
|
-
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
|
53
|
-
optimizer=keras.optimizers.Adam(5e-5),
|
54
|
-
)
|
55
|
-
classifier.backbone.trainable = False
|
56
|
-
classifier.fit(x=images, y=labels, batch_size=2)
|
57
|
-
```
|
58
|
-
|
59
|
-
Custom backbone.
|
60
|
-
```python
|
61
|
-
images = np.ones((2, 224, 224, 3), dtype="float32")
|
62
|
-
labels = [0, 3]
|
63
|
-
backbone = keras_hub.models.MiTBackbone(
|
64
|
-
stackwise_num_filters=[128, 256, 512, 1024],
|
65
|
-
stackwise_depth=[3, 9, 9, 3],
|
66
|
-
block_type="basic_block",
|
67
|
-
image_shape = (224, 224, 3),
|
68
|
-
)
|
69
|
-
classifier = keras_hub.models.MiTImageClassifier(
|
70
|
-
backbone=backbone,
|
71
|
-
num_classes=4,
|
72
|
-
)
|
73
|
-
classifier.fit(x=images, y=labels, batch_size=2)
|
74
|
-
```
|
75
|
-
"""
|
76
|
-
|
77
10
|
backbone_cls = MiTBackbone
|
78
|
-
|
79
|
-
def __init__(
|
80
|
-
self,
|
81
|
-
backbone,
|
82
|
-
num_classes,
|
83
|
-
activation="softmax",
|
84
|
-
preprocessor=None, # adding this dummy arg for saved model test
|
85
|
-
# TODO: once preprocessor flow is figured out, this needs to be updated
|
86
|
-
**kwargs,
|
87
|
-
):
|
88
|
-
# === Layers ===
|
89
|
-
self.backbone = backbone
|
90
|
-
self.output_dense = keras.layers.Dense(
|
91
|
-
num_classes,
|
92
|
-
activation=activation,
|
93
|
-
name="predictions",
|
94
|
-
)
|
95
|
-
|
96
|
-
# === Functional Model ===
|
97
|
-
inputs = self.backbone.input
|
98
|
-
x = self.backbone(inputs)
|
99
|
-
outputs = self.output_dense(x)
|
100
|
-
super().__init__(
|
101
|
-
inputs=inputs,
|
102
|
-
outputs=outputs,
|
103
|
-
**kwargs,
|
104
|
-
)
|
105
|
-
|
106
|
-
# === Config ===
|
107
|
-
self.num_classes = num_classes
|
108
|
-
self.activation = activation
|
109
|
-
|
110
|
-
def get_config(self):
|
111
|
-
# Backbone serialized in `super`
|
112
|
-
config = super().get_config()
|
113
|
-
config.update(
|
114
|
-
{
|
115
|
-
"num_classes": self.num_classes,
|
116
|
-
"activation": self.activation,
|
117
|
-
}
|
118
|
-
)
|
119
|
-
return config
|
@@ -1,5 +1,3 @@
|
|
1
|
-
import keras
|
2
|
-
|
3
1
|
from keras_hub.src.api_export import keras_hub_export
|
4
2
|
from keras_hub.src.models.image_classifier import ImageClassifier
|
5
3
|
from keras_hub.src.models.mobilenet.mobilenet_backbone import MobileNetBackbone
|
@@ -7,94 +5,4 @@ from keras_hub.src.models.mobilenet.mobilenet_backbone import MobileNetBackbone
|
|
7
5
|
|
8
6
|
@keras_hub_export("keras_hub.models.MobileNetImageClassifier")
|
9
7
|
class MobileNetImageClassifier(ImageClassifier):
|
10
|
-
"""MobileNetV3 image classifier task model.
|
11
|
-
|
12
|
-
To fine-tune with `fit()`, pass a dataset containing tuples of `(x, y)`
|
13
|
-
where `x` is a tensor and `y` is a integer from `[0, num_classes)`.
|
14
|
-
All `ImageClassifier` tasks include a `from_preset()` constructor which can
|
15
|
-
be used to load a pre-trained config and weights.
|
16
|
-
|
17
|
-
Args:
|
18
|
-
backbone: A `keras_hub.models.MobileNetBackbone` instance.
|
19
|
-
num_classes: int. The number of classes to predict.
|
20
|
-
activation: `None`, str or callable. The activation function to use on
|
21
|
-
the `Dense` layer. Set `activation=None` to return the output
|
22
|
-
logits. Defaults to `"softmax"`.
|
23
|
-
|
24
|
-
Examples:
|
25
|
-
|
26
|
-
Call `predict()` to run inference.
|
27
|
-
```python
|
28
|
-
# Load preset and train
|
29
|
-
images = np.ones((2, 224, 224, 3), dtype="float32")
|
30
|
-
classifier = keras_hub.models.MobileNetImageClassifier.from_preset(
|
31
|
-
"mobilenet_v3_small_imagenet")
|
32
|
-
classifier.predict(images)
|
33
|
-
```
|
34
|
-
|
35
|
-
Custom backbone.
|
36
|
-
```python
|
37
|
-
images = np.ones((2, 224, 224, 3), dtype="float32")
|
38
|
-
labels = [0, 3]
|
39
|
-
model = MobileNetBackbone(
|
40
|
-
stackwise_expansion = [1, 4, 6],
|
41
|
-
stackwise_filters = [4, 8, 16],
|
42
|
-
stackwise_kernel_size = [3, 3, 5],
|
43
|
-
stackwise_stride = [2, 2, 1],
|
44
|
-
stackwise_se_ratio = [ 0.25, None, 0.25],
|
45
|
-
stackwise_activation = ["relu", "relu", "hard_swish"],
|
46
|
-
output_filter=1280,
|
47
|
-
activation="hard_swish",
|
48
|
-
inverted_res_block=True,
|
49
|
-
)
|
50
|
-
classifier = keras_hub.models.MobileNetImageClassifier(
|
51
|
-
backbone=backbone,
|
52
|
-
num_classes=4,
|
53
|
-
)
|
54
|
-
classifier.fit(x=images, y=labels, batch_size=2)
|
55
|
-
```
|
56
|
-
"""
|
57
|
-
|
58
8
|
backbone_cls = MobileNetBackbone
|
59
|
-
|
60
|
-
def __init__(
|
61
|
-
self,
|
62
|
-
backbone,
|
63
|
-
num_classes,
|
64
|
-
activation="softmax",
|
65
|
-
preprocessor=None, # adding this dummy arg for saved model test
|
66
|
-
# TODO: once preprocessor flow is figured out, this needs to be updated
|
67
|
-
**kwargs,
|
68
|
-
):
|
69
|
-
# === Layers ===
|
70
|
-
self.backbone = backbone
|
71
|
-
self.output_dense = keras.layers.Dense(
|
72
|
-
num_classes,
|
73
|
-
activation=activation,
|
74
|
-
name="predictions",
|
75
|
-
)
|
76
|
-
|
77
|
-
# === Functional Model ===
|
78
|
-
inputs = self.backbone.input
|
79
|
-
x = self.backbone(inputs)
|
80
|
-
outputs = self.output_dense(x)
|
81
|
-
super().__init__(
|
82
|
-
inputs=inputs,
|
83
|
-
outputs=outputs,
|
84
|
-
**kwargs,
|
85
|
-
)
|
86
|
-
|
87
|
-
# === Config ===
|
88
|
-
self.num_classes = num_classes
|
89
|
-
self.activation = activation
|
90
|
-
|
91
|
-
def get_config(self):
|
92
|
-
# Backbone serialized in `super`
|
93
|
-
config = super().get_config()
|
94
|
-
config.update(
|
95
|
-
{
|
96
|
-
"num_classes": self.num_classes,
|
97
|
-
"activation": self.activation,
|
98
|
-
}
|
99
|
-
)
|
100
|
-
return config
|
@@ -1,12 +1,10 @@
|
|
1
1
|
from keras_hub.src.api_export import keras_hub_export
|
2
|
-
from keras_hub.src.layers.preprocessing.
|
3
|
-
ResizingImageConverter,
|
4
|
-
)
|
2
|
+
from keras_hub.src.layers.preprocessing.image_converter import ImageConverter
|
5
3
|
from keras_hub.src.models.pali_gemma.pali_gemma_backbone import (
|
6
4
|
PaliGemmaBackbone,
|
7
5
|
)
|
8
6
|
|
9
7
|
|
10
8
|
@keras_hub_export("keras_hub.layers.PaliGemmaImageConverter")
|
11
|
-
class PaliGemmaImageConverter(
|
9
|
+
class PaliGemmaImageConverter(ImageConverter):
|
12
10
|
backbone_cls = PaliGemmaBackbone
|
@@ -8,7 +8,7 @@ from keras_hub.src.utils.preset_utils import PREPROCESSOR_CONFIG_FILE
|
|
8
8
|
from keras_hub.src.utils.preset_utils import builtin_presets
|
9
9
|
from keras_hub.src.utils.preset_utils import find_subclass
|
10
10
|
from keras_hub.src.utils.preset_utils import get_preset_loader
|
11
|
-
from keras_hub.src.utils.preset_utils import
|
11
|
+
from keras_hub.src.utils.preset_utils import get_preset_saver
|
12
12
|
from keras_hub.src.utils.python_utils import classproperty
|
13
13
|
|
14
14
|
|
@@ -209,7 +209,5 @@ class Preprocessor(PreprocessingLayer):
|
|
209
209
|
Args:
|
210
210
|
preset_dir: The path to the local model preset directory.
|
211
211
|
"""
|
212
|
-
|
213
|
-
|
214
|
-
if hasattr(layer, "save_to_preset"):
|
215
|
-
layer.save_to_preset(preset_dir)
|
212
|
+
saver = get_preset_saver(preset_dir)
|
213
|
+
saver.save_preprocessor(self)
|
@@ -51,16 +51,6 @@ class ResNetBackbone(FeaturePyramidBackbone):
|
|
51
51
|
`True` for ResNetV2, `False` for ResNet.
|
52
52
|
image_shape: tuple. The input shape without the batch size.
|
53
53
|
Defaults to `(None, None, 3)`.
|
54
|
-
pooling: `None` or str. Pooling mode for feature extraction. Defaults
|
55
|
-
to `"avg"`.
|
56
|
-
- `None` means that the output of the model will be the 4D tensor
|
57
|
-
from the last convolutional block.
|
58
|
-
- `avg` means that global average pooling will be applied to the
|
59
|
-
output of the last convolutional block, resulting in a 2D
|
60
|
-
tensor.
|
61
|
-
- `max` means that global max pooling will be applied to the
|
62
|
-
output of the last convolutional block, resulting in a 2D
|
63
|
-
tensor.
|
64
54
|
data_format: `None` or str. If specified, either `"channels_last"` or
|
65
55
|
`"channels_first"`. The ordering of the dimensions in the
|
66
56
|
inputs. `"channels_last"` corresponds to inputs with shape
|
@@ -75,7 +65,7 @@ class ResNetBackbone(FeaturePyramidBackbone):
|
|
75
65
|
|
76
66
|
Examples:
|
77
67
|
```python
|
78
|
-
input_data = np.random.uniform(0,
|
68
|
+
input_data = np.random.uniform(0, 1, size=(2, 224, 224, 3))
|
79
69
|
|
80
70
|
# Pretrained ResNet backbone.
|
81
71
|
model = keras_hub.models.ResNetBackbone.from_preset("resnet50")
|
@@ -1,5 +1,3 @@
|
|
1
|
-
import keras
|
2
|
-
|
3
1
|
from keras_hub.src.api_export import keras_hub_export
|
4
2
|
from keras_hub.src.models.image_classifier import ImageClassifier
|
5
3
|
from keras_hub.src.models.resnet.resnet_backbone import ResNetBackbone
|
@@ -10,140 +8,5 @@ from keras_hub.src.models.resnet.resnet_image_classifier_preprocessor import (
|
|
10
8
|
|
11
9
|
@keras_hub_export("keras_hub.models.ResNetImageClassifier")
|
12
10
|
class ResNetImageClassifier(ImageClassifier):
|
13
|
-
"""ResNet image classifier task model.
|
14
|
-
|
15
|
-
Args:
|
16
|
-
backbone: A `keras_hub.models.ResNetBackbone` instance.
|
17
|
-
num_classes: int. The number of classes to predict.
|
18
|
-
activation: `None`, str or callable. The activation function to use on
|
19
|
-
the `Dense` layer. Set `activation=None` to return the output
|
20
|
-
logits. Defaults to `"softmax"`.
|
21
|
-
head_dtype: `None` or str or `keras.mixed_precision.DTypePolicy`. The
|
22
|
-
dtype to use for the classification head's computations and weights.
|
23
|
-
|
24
|
-
To fine-tune with `fit()`, pass a dataset containing tuples of `(x, y)`
|
25
|
-
where `x` is a tensor and `y` is a integer from `[0, num_classes)`.
|
26
|
-
All `ImageClassifier` tasks include a `from_preset()` constructor which can
|
27
|
-
be used to load a pre-trained config and weights.
|
28
|
-
|
29
|
-
Examples:
|
30
|
-
|
31
|
-
Call `predict()` to run inference.
|
32
|
-
```python
|
33
|
-
# Load preset and train
|
34
|
-
images = np.ones((2, 224, 224, 3), dtype="float32")
|
35
|
-
classifier = keras_hub.models.ResNetImageClassifier.from_preset(
|
36
|
-
"resnet_50_imagenet"
|
37
|
-
)
|
38
|
-
classifier.predict(images)
|
39
|
-
```
|
40
|
-
|
41
|
-
Call `fit()` on a single batch.
|
42
|
-
```python
|
43
|
-
# Load preset and train
|
44
|
-
images = np.ones((2, 224, 224, 3), dtype="float32")
|
45
|
-
labels = [0, 3]
|
46
|
-
classifier = keras_hub.models.ResNetImageClassifier.from_preset(
|
47
|
-
"resnet_50_imagenet"
|
48
|
-
)
|
49
|
-
classifier.fit(x=images, y=labels, batch_size=2)
|
50
|
-
```
|
51
|
-
|
52
|
-
Call `fit()` with custom loss, optimizer and backbone.
|
53
|
-
```python
|
54
|
-
classifier = keras_hub.models.ResNetImageClassifier.from_preset(
|
55
|
-
"resnet_50_imagenet"
|
56
|
-
)
|
57
|
-
classifier.compile(
|
58
|
-
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
|
59
|
-
optimizer=keras.optimizers.Adam(5e-5),
|
60
|
-
)
|
61
|
-
classifier.backbone.trainable = False
|
62
|
-
classifier.fit(x=images, y=labels, batch_size=2)
|
63
|
-
```
|
64
|
-
|
65
|
-
Custom backbone.
|
66
|
-
```python
|
67
|
-
images = np.ones((2, 224, 224, 3), dtype="float32")
|
68
|
-
labels = [0, 3]
|
69
|
-
backbone = keras_hub.models.ResNetBackbone(
|
70
|
-
stackwise_num_filters=[64, 64, 64],
|
71
|
-
stackwise_num_blocks=[2, 2, 2],
|
72
|
-
stackwise_num_strides=[1, 2, 2],
|
73
|
-
block_type="basic_block",
|
74
|
-
use_pre_activation=True,
|
75
|
-
pooling="avg",
|
76
|
-
)
|
77
|
-
classifier = keras_hub.models.ResNetImageClassifier(
|
78
|
-
backbone=backbone,
|
79
|
-
num_classes=4,
|
80
|
-
)
|
81
|
-
classifier.fit(x=images, y=labels, batch_size=2)
|
82
|
-
```
|
83
|
-
"""
|
84
|
-
|
85
11
|
backbone_cls = ResNetBackbone
|
86
12
|
preprocessor_cls = ResNetImageClassifierPreprocessor
|
87
|
-
|
88
|
-
def __init__(
|
89
|
-
self,
|
90
|
-
backbone,
|
91
|
-
num_classes,
|
92
|
-
preprocessor=None,
|
93
|
-
pooling="avg",
|
94
|
-
activation=None,
|
95
|
-
head_dtype=None,
|
96
|
-
**kwargs,
|
97
|
-
):
|
98
|
-
head_dtype = head_dtype or backbone.dtype_policy
|
99
|
-
|
100
|
-
# === Layers ===
|
101
|
-
self.backbone = backbone
|
102
|
-
self.preprocessor = preprocessor
|
103
|
-
if pooling == "avg":
|
104
|
-
self.pooler = keras.layers.GlobalAveragePooling2D(
|
105
|
-
data_format=backbone.data_format, dtype=head_dtype
|
106
|
-
)
|
107
|
-
elif pooling == "max":
|
108
|
-
self.pooler = keras.layers.GlobalAveragePooling2D(
|
109
|
-
data_format=backbone.data_format, dtype=head_dtype
|
110
|
-
)
|
111
|
-
else:
|
112
|
-
raise ValueError(
|
113
|
-
"Unknown `pooling` type. Polling should be either `'avg'` or "
|
114
|
-
f"`'max'`. Received: pooling={pooling}."
|
115
|
-
)
|
116
|
-
self.output_dense = keras.layers.Dense(
|
117
|
-
num_classes,
|
118
|
-
activation=activation,
|
119
|
-
dtype=head_dtype,
|
120
|
-
name="predictions",
|
121
|
-
)
|
122
|
-
|
123
|
-
# === Functional Model ===
|
124
|
-
inputs = self.backbone.input
|
125
|
-
x = self.backbone(inputs)
|
126
|
-
x = self.pooler(x)
|
127
|
-
outputs = self.output_dense(x)
|
128
|
-
super().__init__(
|
129
|
-
inputs=inputs,
|
130
|
-
outputs=outputs,
|
131
|
-
**kwargs,
|
132
|
-
)
|
133
|
-
|
134
|
-
# === Config ===
|
135
|
-
self.num_classes = num_classes
|
136
|
-
self.activation = activation
|
137
|
-
self.pooling = pooling
|
138
|
-
|
139
|
-
def get_config(self):
|
140
|
-
# Backbone serialized in `super`
|
141
|
-
config = super().get_config()
|
142
|
-
config.update(
|
143
|
-
{
|
144
|
-
"num_classes": self.num_classes,
|
145
|
-
"pooling": self.pooling,
|
146
|
-
"activation": self.activation,
|
147
|
-
}
|
148
|
-
)
|
149
|
-
return config
|
@@ -1,10 +1,8 @@
|
|
1
1
|
from keras_hub.src.api_export import keras_hub_export
|
2
|
-
from keras_hub.src.layers.preprocessing.
|
3
|
-
ResizingImageConverter,
|
4
|
-
)
|
2
|
+
from keras_hub.src.layers.preprocessing.image_converter import ImageConverter
|
5
3
|
from keras_hub.src.models.resnet.resnet_backbone import ResNetBackbone
|
6
4
|
|
7
5
|
|
8
6
|
@keras_hub_export("keras_hub.layers.ResNetImageConverter")
|
9
|
-
class ResNetImageConverter(
|
7
|
+
class ResNetImageConverter(ImageConverter):
|
10
8
|
backbone_cls = ResNetBackbone
|
@@ -1,10 +1,8 @@
|
|
1
1
|
from keras_hub.src.api_export import keras_hub_export
|
2
|
-
from keras_hub.src.layers.preprocessing.
|
3
|
-
ResizingImageConverter,
|
4
|
-
)
|
2
|
+
from keras_hub.src.layers.preprocessing.image_converter import ImageConverter
|
5
3
|
from keras_hub.src.models.sam.sam_backbone import SAMBackbone
|
6
4
|
|
7
5
|
|
8
6
|
@keras_hub_export("keras_hub.layers.SAMImageConverter")
|
9
|
-
class SAMImageConverter(
|
7
|
+
class SAMImageConverter(ImageConverter):
|
10
8
|
backbone_cls = SAMBackbone
|
@@ -1,12 +1,22 @@
|
|
1
|
+
import keras
|
2
|
+
|
1
3
|
from keras_hub.src.api_export import keras_hub_export
|
2
4
|
from keras_hub.src.models.image_segmenter_preprocessor import (
|
3
5
|
ImageSegmenterPreprocessor,
|
4
6
|
)
|
5
7
|
from keras_hub.src.models.sam.sam_backbone import SAMBackbone
|
6
8
|
from keras_hub.src.models.sam.sam_image_converter import SAMImageConverter
|
9
|
+
from keras_hub.src.utils.tensor_utils import preprocessing_function
|
7
10
|
|
8
11
|
|
9
|
-
@keras_hub_export("keras_hub.models.
|
12
|
+
@keras_hub_export("keras_hub.models.SAMImageSegmenterPreprocessor")
|
10
13
|
class SAMImageSegmenterPreprocessor(ImageSegmenterPreprocessor):
|
11
14
|
backbone_cls = SAMBackbone
|
12
15
|
image_converter_cls = SAMImageConverter
|
16
|
+
|
17
|
+
@preprocessing_function
|
18
|
+
def call(self, x, y=None, sample_weight=None):
|
19
|
+
images = x["images"]
|
20
|
+
if self.image_converter:
|
21
|
+
x["images"] = self.image_converter(images)
|
22
|
+
return keras.utils.pack_x_y_sample_weight(x, y, sample_weight)
|
@@ -9,7 +9,7 @@ backbone_presets = {
|
|
9
9
|
"path": "sam",
|
10
10
|
"model_card": "https://arxiv.org/abs/2304.02643",
|
11
11
|
},
|
12
|
-
"kaggle_handle": "kaggle://kerashub/sam/keras/sam_base_sa1b/
|
12
|
+
"kaggle_handle": "kaggle://kerashub/sam/keras/sam_base_sa1b/2",
|
13
13
|
},
|
14
14
|
"sam_large_sa1b": {
|
15
15
|
"metadata": {
|
@@ -19,7 +19,7 @@ backbone_presets = {
|
|
19
19
|
"path": "sam",
|
20
20
|
"model_card": "https://arxiv.org/abs/2304.02643",
|
21
21
|
},
|
22
|
-
"kaggle_handle": "kaggle://kerashub/sam/keras/sam_large_sa1b/
|
22
|
+
"kaggle_handle": "kaggle://kerashub/sam/keras/sam_large_sa1b/2",
|
23
23
|
},
|
24
24
|
"sam_huge_sa1b": {
|
25
25
|
"metadata": {
|
@@ -29,6 +29,6 @@ backbone_presets = {
|
|
29
29
|
"path": "sam",
|
30
30
|
"model_card": "https://arxiv.org/abs/2304.02643",
|
31
31
|
},
|
32
|
-
"kaggle_handle": "kaggle://kerashub/sam/keras/sam_huge_sa1b/
|
32
|
+
"kaggle_handle": "kaggle://kerashub/sam/keras/sam_huge_sa1b/2",
|
33
33
|
},
|
34
34
|
}
|
@@ -27,7 +27,7 @@ class FlowMatchEulerDiscreteScheduler(layers.Layer):
|
|
27
27
|
https://arxiv.org/abs/2403.03206).
|
28
28
|
"""
|
29
29
|
|
30
|
-
def __init__(self, num_train_timesteps=1000, shift=
|
30
|
+
def __init__(self, num_train_timesteps=1000, shift=3.0, **kwargs):
|
31
31
|
super().__init__(**kwargs)
|
32
32
|
self.num_train_timesteps = int(num_train_timesteps)
|
33
33
|
self.shift = float(shift)
|
@@ -65,6 +65,13 @@ class FlowMatchEulerDiscreteScheduler(layers.Layer):
|
|
65
65
|
timestep = self._sigma_to_timestep(sigma)
|
66
66
|
return sigma, timestep
|
67
67
|
|
68
|
+
def add_noise(self, inputs, noises, step, num_steps):
|
69
|
+
sigma, _ = self(step, num_steps)
|
70
|
+
return ops.add(
|
71
|
+
ops.multiply(sigma, noises),
|
72
|
+
ops.multiply(ops.subtract(1.0, sigma), inputs),
|
73
|
+
)
|
74
|
+
|
68
75
|
def get_config(self):
|
69
76
|
config = super().get_config()
|
70
77
|
config.update(
|