keras-hub-nightly 0.16.0.dev20240915160609__py3-none-any.whl → 0.16.1.dev202409210335__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. keras_hub/__init__.py +0 -6
  2. keras_hub/api/__init__.py +1 -0
  3. keras_hub/api/utils/__init__.py +22 -0
  4. keras_hub/src/api_export.py +17 -11
  5. keras_hub/src/layers/preprocessing/resizing_image_converter.py +56 -6
  6. keras_hub/src/models/csp_darknet/csp_darknet_backbone.py +1 -11
  7. keras_hub/src/models/csp_darknet/csp_darknet_image_classifier.py +0 -1
  8. keras_hub/src/models/densenet/densenet_backbone.py +2 -12
  9. keras_hub/src/models/densenet/densenet_image_classifier.py +0 -1
  10. keras_hub/src/models/efficientnet/efficientnet_backbone.py +3 -14
  11. keras_hub/src/models/gemma/gemma_decoder_block.py +1 -1
  12. keras_hub/src/models/mix_transformer/mix_transformer_backbone.py +1 -11
  13. keras_hub/src/models/mix_transformer/mix_transformer_classifier.py +0 -1
  14. keras_hub/src/models/mobilenet/mobilenet_backbone.py +3 -14
  15. keras_hub/src/models/mobilenet/mobilenet_image_classifier.py +0 -1
  16. keras_hub/src/models/pali_gemma/pali_gemma_vit.py +3 -0
  17. keras_hub/src/models/resnet/resnet_backbone.py +1 -21
  18. keras_hub/src/models/resnet/resnet_image_classifier.py +9 -4
  19. keras_hub/src/models/resnet/resnet_presets.py +6 -6
  20. keras_hub/src/models/retinanet/__init__.py +13 -0
  21. keras_hub/src/models/retinanet/anchor_generator.py +175 -0
  22. keras_hub/src/models/retinanet/box_matcher.py +259 -0
  23. keras_hub/src/models/retinanet/non_max_supression.py +578 -0
  24. keras_hub/src/models/vgg/vgg_backbone.py +0 -8
  25. keras_hub/src/models/vgg/vgg_image_classifier.py +0 -1
  26. keras_hub/src/models/vit_det/vit_det_backbone.py +0 -9
  27. keras_hub/src/tests/test_case.py +11 -3
  28. keras_hub/src/tokenizers/byte_pair_tokenizer.py +1 -0
  29. keras_hub/src/tokenizers/sentence_piece_tokenizer.py +1 -0
  30. keras_hub/src/tokenizers/word_piece_tokenizer.py +1 -0
  31. keras_hub/src/utils/imagenet/__init__.py +13 -0
  32. keras_hub/src/utils/imagenet/imagenet_utils.py +1067 -0
  33. keras_hub/src/utils/preset_utils.py +10 -1
  34. keras_hub/src/utils/tensor_utils.py +14 -14
  35. keras_hub/src/utils/timm/convert_resnet.py +0 -8
  36. keras_hub/src/utils/timm/preset_loader.py +16 -1
  37. keras_hub/src/version_utils.py +1 -1
  38. keras_hub_nightly-0.16.1.dev202409210335.dist-info/METADATA +202 -0
  39. {keras_hub_nightly-0.16.0.dev20240915160609.dist-info → keras_hub_nightly-0.16.1.dev202409210335.dist-info}/RECORD +41 -34
  40. {keras_hub_nightly-0.16.0.dev20240915160609.dist-info → keras_hub_nightly-0.16.1.dev202409210335.dist-info}/WHEEL +1 -1
  41. keras_hub_nightly-0.16.0.dev20240915160609.dist-info/METADATA +0 -33
  42. {keras_hub_nightly-0.16.0.dev20240915160609.dist-info → keras_hub_nightly-0.16.1.dev202409210335.dist-info}/top_level.txt +0 -0
keras_hub/__init__.py CHANGED
@@ -11,12 +11,6 @@
11
11
  # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
12
  # See the License for the specific language governing permissions and
13
13
  # limitations under the License.
14
- """DO NOT EDIT.
15
-
16
- This file was autogenerated. Do not edit it by hand,
17
- since your modifications would be overwritten.
18
- """
19
-
20
14
  import os
21
15
 
22
16
  # sentencepiece segfaults on some version of tensorflow if tf is imported first.
keras_hub/api/__init__.py CHANGED
@@ -23,6 +23,7 @@ from keras_hub.api import metrics
23
23
  from keras_hub.api import models
24
24
  from keras_hub.api import samplers
25
25
  from keras_hub.api import tokenizers
26
+ from keras_hub.api import utils
26
27
  from keras_hub.src.utils.preset_utils import upload_preset
27
28
  from keras_hub.src.version_utils import __version__
28
29
  from keras_hub.src.version_utils import version
@@ -0,0 +1,22 @@
1
+ # Copyright 2024 The KerasHub Authors
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # https://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """DO NOT EDIT.
15
+
16
+ This file was autogenerated. Do not edit it by hand,
17
+ since your modifications would be overwritten.
18
+ """
19
+
20
+ from keras_hub.src.utils.imagenet.imagenet_utils import (
21
+ decode_imagenet_predictions,
22
+ )
@@ -14,7 +14,7 @@
14
14
 
15
15
  import types
16
16
 
17
- import keras
17
+ from keras.saving import register_keras_serializable
18
18
 
19
19
  try:
20
20
  import namex
@@ -22,14 +22,20 @@ except ImportError:
22
22
  namex = None
23
23
 
24
24
 
25
- def maybe_register_serializable(symbol):
25
+ def maybe_register_serializable(path, symbol):
26
+ if isinstance(path, (list, tuple)):
27
+ # If we have multiple export names, actually make sure to register these
28
+ # first. This makes sure we have a backward compat mapping of old
29
+ # serialized names to new class.
30
+ for name in path:
31
+ name = name.split(".")[-1]
32
+ register_keras_serializable(package="keras_nlp", name=name)(symbol)
33
+ register_keras_serializable(package="keras_hub", name=name)(symbol)
26
34
  if isinstance(symbol, types.FunctionType) or hasattr(symbol, "get_config"):
27
- # We register twice, first with the old name, second with the new name,
28
- # so loading still works under the old name.
29
- # TODO replace keras_nlp with keras-hub after rename.
30
- compat_name = "keras_nlp"
31
- keras.saving.register_keras_serializable(package=compat_name)(symbol)
32
- keras.saving.register_keras_serializable(package="keras_hub")(symbol)
35
+ # We register twice, first with keras_nlp, second with keras_hub,
36
+ # so loading still works for classes saved as "keras_nlp".
37
+ register_keras_serializable(package="keras_nlp")(symbol)
38
+ register_keras_serializable(package="keras_hub")(symbol)
33
39
 
34
40
 
35
41
  if namex:
@@ -39,15 +45,15 @@ if namex:
39
45
  super().__init__(package="keras_hub", path=path)
40
46
 
41
47
  def __call__(self, symbol):
42
- maybe_register_serializable(symbol)
48
+ maybe_register_serializable(self.path, symbol)
43
49
  return super().__call__(symbol)
44
50
 
45
51
  else:
46
52
 
47
53
  class keras_hub_export:
48
54
  def __init__(self, path):
49
- pass
55
+ self.path = path
50
56
 
51
57
  def __call__(self, symbol):
52
- maybe_register_serializable(symbol)
58
+ maybe_register_serializable(self.path, symbol)
53
59
  return symbol
@@ -12,9 +12,11 @@
12
12
  # See the License for the specific language governing permissions and
13
13
  # limitations under the License.
14
14
  import keras
15
+ from keras import ops
15
16
 
16
17
  from keras_hub.src.api_export import keras_hub_export
17
18
  from keras_hub.src.layers.preprocessing.image_converter import ImageConverter
19
+ from keras_hub.src.utils.keras_utils import standardize_data_format
18
20
  from keras_hub.src.utils.tensor_utils import preprocessing_function
19
21
 
20
22
 
@@ -23,13 +25,23 @@ class ResizingImageConverter(ImageConverter):
23
25
  """An `ImageConverter` that simply resizes the input image.
24
26
 
25
27
  The `ResizingImageConverter` is a subclass of `ImageConverter` for models
26
- that simply need to resize image tensors before using them for modeling.
27
- The layer will take as input a raw image tensor (batched or unbatched) in the
28
- channels last or channels first format, and output a resize tensor.
28
+ that need to resize (and optionally rescale) image tensors before using them
29
+ for modeling. The layer will take as input a raw image tensor (batched or
30
+ unbatched) in the channels last or channels first format, and output a
31
+ resize tensor.
29
32
 
30
33
  Args:
31
- height: Integer, the height of the output shape.
32
- width: Integer, the width of the output shape.
34
+ height: int, the height of the output shape.
35
+ width: int, the width of the output shape.
36
+ scale: float or `None`. If set, the image we be rescaled with a
37
+ `keras.layers.Rescaling` layer, multiplying the image by this
38
+ scale.
39
+ mean: tuples of floats per channel or `None`. If set, the image will be
40
+ normalized per channel by subtracting mean.
41
+ If set, also set `variance`.
42
+ variance: tuples of floats per channel or `None`. If set, the image will
43
+ be normalized per channel by dividing by `sqrt(variance)`.
44
+ If set, also set `mean`.
33
45
  crop_to_aspect_ratio: If `True`, resize the images without aspect
34
46
  ratio distortion. When the original aspect ratio differs
35
47
  from the target aspect ratio, the output image will be
@@ -64,6 +76,9 @@ class ResizingImageConverter(ImageConverter):
64
76
  self,
65
77
  height,
66
78
  width,
79
+ scale=None,
80
+ mean=None,
81
+ variance=None,
67
82
  crop_to_aspect_ratio=True,
68
83
  interpolation="bilinear",
69
84
  data_format=None,
@@ -78,7 +93,26 @@ class ResizingImageConverter(ImageConverter):
78
93
  crop_to_aspect_ratio=crop_to_aspect_ratio,
79
94
  interpolation=interpolation,
80
95
  data_format=data_format,
96
+ dtype=self.dtype_policy,
97
+ name="resizing",
81
98
  )
99
+ if scale is not None:
100
+ self.rescaling = keras.layers.Rescaling(
101
+ scale=scale,
102
+ dtype=self.dtype_policy,
103
+ name="rescaling",
104
+ )
105
+ else:
106
+ self.rescaling = None
107
+ if (mean is not None) != (variance is not None):
108
+ raise ValueError(
109
+ "Both `mean` and `variance` should be set or `None`. Received "
110
+ f"`mean={mean}`, `variance={variance}`."
111
+ )
112
+ self.scale = scale
113
+ self.mean = mean
114
+ self.variance = variance
115
+ self.data_format = standardize_data_format(data_format)
82
116
 
83
117
  def image_size(self):
84
118
  """Returns the preprocessed size of a single image."""
@@ -86,7 +120,20 @@ class ResizingImageConverter(ImageConverter):
86
120
 
87
121
  @preprocessing_function
88
122
  def call(self, inputs):
89
- return self.resizing(inputs)
123
+ x = self.resizing(inputs)
124
+ if self.rescaling:
125
+ x = self.rescaling(x)
126
+ if self.mean is not None:
127
+ # Avoid `layers.Normalization` so this works batched and unbatched.
128
+ channels_first = self.data_format == "channels_first"
129
+ if len(ops.shape(inputs)) == 3:
130
+ broadcast_dims = (1, 2) if channels_first else (0, 1)
131
+ else:
132
+ broadcast_dims = (0, 2, 3) if channels_first else (0, 1, 2)
133
+ mean = ops.expand_dims(ops.array(self.mean), broadcast_dims)
134
+ std = ops.expand_dims(ops.sqrt(self.variance), broadcast_dims)
135
+ x = (x - mean) / std
136
+ return x
90
137
 
91
138
  def get_config(self):
92
139
  config = super().get_config()
@@ -96,6 +143,9 @@ class ResizingImageConverter(ImageConverter):
96
143
  "width": self.resizing.width,
97
144
  "interpolation": self.resizing.interpolation,
98
145
  "crop_to_aspect_ratio": self.resizing.crop_to_aspect_ratio,
146
+ "scale": self.scale,
147
+ "mean": self.mean,
148
+ "variance": self.variance,
99
149
  }
100
150
  )
101
151
  return config
@@ -31,9 +31,6 @@ class CSPDarkNetBackbone(FeaturePyramidBackbone):
31
31
  level in the model.
32
32
  stackwise_depth: A list of ints, the depth for each dark level in the
33
33
  model.
34
- include_rescaling: boolean. If `True`, rescale the input using
35
- `Rescaling(1 / 255.0)` layer. If `False`, do nothing. Defaults to
36
- `True`.
37
34
  block_type: str. One of `"basic_block"` or `"depthwise_block"`.
38
35
  Use `"depthwise_block"` for depthwise conv block
39
36
  `"basic_block"` for basic conv block.
@@ -55,7 +52,6 @@ class CSPDarkNetBackbone(FeaturePyramidBackbone):
55
52
  model = keras_hub.models.CSPDarkNetBackbone(
56
53
  stackwise_num_filters=[128, 256, 512, 1024],
57
54
  stackwise_depth=[3, 9, 9, 3],
58
- include_rescaling=False,
59
55
  )
60
56
  model(input_data)
61
57
  ```
@@ -65,7 +61,6 @@ class CSPDarkNetBackbone(FeaturePyramidBackbone):
65
61
  self,
66
62
  stackwise_num_filters,
67
63
  stackwise_depth,
68
- include_rescaling=True,
69
64
  block_type="basic_block",
70
65
  image_shape=(None, None, 3),
71
66
  **kwargs,
@@ -82,10 +77,7 @@ class CSPDarkNetBackbone(FeaturePyramidBackbone):
82
77
  base_channels = stackwise_num_filters[0] // 2
83
78
 
84
79
  image_input = layers.Input(shape=image_shape)
85
- x = image_input
86
- if include_rescaling:
87
- x = layers.Rescaling(scale=1 / 255.0)(x)
88
-
80
+ x = image_input # Intermediate result.
89
81
  x = apply_focus(channel_axis, name="stem_focus")(x)
90
82
  x = apply_darknet_conv_block(
91
83
  base_channels,
@@ -130,7 +122,6 @@ class CSPDarkNetBackbone(FeaturePyramidBackbone):
130
122
  # === Config ===
131
123
  self.stackwise_num_filters = stackwise_num_filters
132
124
  self.stackwise_depth = stackwise_depth
133
- self.include_rescaling = include_rescaling
134
125
  self.block_type = block_type
135
126
  self.image_shape = image_shape
136
127
  self.pyramid_outputs = pyramid_outputs
@@ -141,7 +132,6 @@ class CSPDarkNetBackbone(FeaturePyramidBackbone):
141
132
  {
142
133
  "stackwise_num_filters": self.stackwise_num_filters,
143
134
  "stackwise_depth": self.stackwise_depth,
144
- "include_rescaling": self.include_rescaling,
145
135
  "block_type": self.block_type,
146
136
  "image_shape": self.image_shape,
147
137
  }
@@ -76,7 +76,6 @@ class CSPDarkNetImageClassifier(ImageClassifier):
76
76
  backbone = keras_hub.models.CSPDarkNetBackbone(
77
77
  stackwise_num_filters=[128, 256, 512, 1024],
78
78
  stackwise_depth=[3, 9, 9, 3],
79
- include_rescaling=False,
80
79
  block_type="basic_block",
81
80
  image_shape = (224, 224, 3),
82
81
  )
@@ -31,9 +31,6 @@ class DenseNetBackbone(FeaturePyramidBackbone):
31
31
  Args:
32
32
  stackwise_num_repeats: list of ints, number of repeated convolutional
33
33
  blocks per dense block.
34
- include_rescaling: bool, whether to rescale the inputs. If set
35
- to `True`, inputs will be passed through a `Rescaling(1/255.0)`
36
- layer. Defaults to `True`.
37
34
  image_shape: optional shape tuple, defaults to (None, None, 3).
38
35
  compression_ratio: float, compression rate at transition layers,
39
36
  defaults to 0.5.
@@ -51,7 +48,6 @@ class DenseNetBackbone(FeaturePyramidBackbone):
51
48
  # Randomly initialized backbone with a custom config
52
49
  model = keras_hub.models.DenseNetBackbone(
53
50
  stackwise_num_repeats=[6, 12, 24, 16],
54
- include_rescaling=False,
55
51
  )
56
52
  model(input_data)
57
53
  ```
@@ -60,7 +56,6 @@ class DenseNetBackbone(FeaturePyramidBackbone):
60
56
  def __init__(
61
57
  self,
62
58
  stackwise_num_repeats,
63
- include_rescaling=True,
64
59
  image_shape=(None, None, 3),
65
60
  compression_ratio=0.5,
66
61
  growth_rate=32,
@@ -71,10 +66,7 @@ class DenseNetBackbone(FeaturePyramidBackbone):
71
66
  channel_axis = -1 if data_format == "channels_last" else 1
72
67
  image_input = keras.layers.Input(shape=image_shape)
73
68
 
74
- x = image_input
75
- if include_rescaling:
76
- x = keras.layers.Rescaling(1 / 255.0)(x)
77
-
69
+ x = image_input # Intermediate result.
78
70
  x = keras.layers.Conv2D(
79
71
  64,
80
72
  7,
@@ -114,7 +106,7 @@ class DenseNetBackbone(FeaturePyramidBackbone):
114
106
  growth_rate,
115
107
  name=f"conv{len(stackwise_num_repeats) + 1}",
116
108
  )
117
- pyramid_outputs[f"P{len(stackwise_num_repeats) +1}"] = x
109
+ pyramid_outputs[f"P{len(stackwise_num_repeats) + 1}"] = x
118
110
  x = keras.layers.BatchNormalization(
119
111
  axis=channel_axis, epsilon=BN_EPSILON, name="bn"
120
112
  )(x)
@@ -124,7 +116,6 @@ class DenseNetBackbone(FeaturePyramidBackbone):
124
116
 
125
117
  # === Config ===
126
118
  self.stackwise_num_repeats = stackwise_num_repeats
127
- self.include_rescaling = include_rescaling
128
119
  self.compression_ratio = compression_ratio
129
120
  self.growth_rate = growth_rate
130
121
  self.image_shape = image_shape
@@ -135,7 +126,6 @@ class DenseNetBackbone(FeaturePyramidBackbone):
135
126
  config.update(
136
127
  {
137
128
  "stackwise_num_repeats": self.stackwise_num_repeats,
138
- "include_rescaling": self.include_rescaling,
139
129
  "compression_ratio": self.compression_ratio,
140
130
  "growth_rate": self.growth_rate,
141
131
  "image_shape": self.image_shape,
@@ -74,7 +74,6 @@ class DenseNetImageClassifier(ImageClassifier):
74
74
  backbone = keras_hub.models.DenseNetBackbone(
75
75
  stackwise_num_filters=[128, 256, 512, 1024],
76
76
  stackwise_depth=[3, 9, 9, 3],
77
- include_rescaling=False,
78
77
  block_type="basic_block",
79
78
  image_shape = (224, 224, 3),
80
79
  )
@@ -67,8 +67,6 @@ class EfficientNetBackbone(FeaturePyramidBackbone):
67
67
  MBConvBlock, but instead of using a depthwise convolution and a 1x1
68
68
  output convolution blocks fused blocks use a single 3x3 convolution
69
69
  block.
70
- include_rescaling: bool, whether to rescale the inputs. If set to
71
- True, inputs will be passed through a `Rescaling(1/255.0)` layer.
72
70
  min_depth: integer, minimum number of filters. Can be None and ignored
73
71
  if use_depth_divisor_as_min_depth is set to True.
74
72
  include_initial_padding: bool, whether to include initial zero padding
@@ -96,7 +94,6 @@ class EfficientNetBackbone(FeaturePyramidBackbone):
96
94
  stackwise_block_types=[["fused"] * 3 + ["unfused"] * 3],
97
95
  width_coefficient=1.0,
98
96
  depth_coefficient=1.0,
99
- include_rescaling=False,
100
97
  )
101
98
  images = np.ones((1, 256, 256, 3))
102
99
  outputs = efficientnet.predict(images)
@@ -116,7 +113,6 @@ class EfficientNetBackbone(FeaturePyramidBackbone):
116
113
  stackwise_squeeze_and_excite_ratios,
117
114
  stackwise_strides,
118
115
  stackwise_block_types,
119
- include_rescaling=True,
120
116
  dropout=0.2,
121
117
  depth_divisor=8,
122
118
  min_depth=8,
@@ -129,14 +125,9 @@ class EfficientNetBackbone(FeaturePyramidBackbone):
129
125
  batch_norm_momentum=0.9,
130
126
  **kwargs,
131
127
  ):
132
- img_input = keras.layers.Input(shape=input_shape)
133
-
134
- x = img_input
135
-
136
- if include_rescaling:
137
- # Use common rescaling strategy across keras
138
- x = keras.layers.Rescaling(scale=1.0 / 255.0)(x)
128
+ image_input = keras.layers.Input(shape=input_shape)
139
129
 
130
+ x = image_input # Intermediate result.
140
131
  if include_initial_padding:
141
132
  x = keras.layers.ZeroPadding2D(
142
133
  padding=self._correct_pad_downsample(x, 3),
@@ -282,10 +273,9 @@ class EfficientNetBackbone(FeaturePyramidBackbone):
282
273
  curr_pyramid_level += 1
283
274
 
284
275
  # Create model.
285
- super().__init__(inputs=img_input, outputs=x, **kwargs)
276
+ super().__init__(inputs=image_input, outputs=x, **kwargs)
286
277
 
287
278
  # === Config ===
288
- self.include_rescaling = include_rescaling
289
279
  self.width_coefficient = width_coefficient
290
280
  self.depth_coefficient = depth_coefficient
291
281
  self.dropout = dropout
@@ -313,7 +303,6 @@ class EfficientNetBackbone(FeaturePyramidBackbone):
313
303
  config = super().get_config()
314
304
  config.update(
315
305
  {
316
- "include_rescaling": self.include_rescaling,
317
306
  "width_coefficient": self.width_coefficient,
318
307
  "depth_coefficient": self.depth_coefficient,
319
308
  "dropout": self.dropout,
@@ -68,7 +68,7 @@ class GemmaDecoderBlock(keras.layers.Layer):
68
68
  self.post_attention_norm = RMSNormalization(
69
69
  epsilon=self.layer_norm_epsilon,
70
70
  dtype=self.dtype_policy,
71
- name="pre_attention_norm",
71
+ name="post_attention_norm",
72
72
  )
73
73
 
74
74
  self.attention = CachedGemmaAttention(
@@ -36,7 +36,6 @@ class MiTBackbone(FeaturePyramidBackbone):
36
36
  end_value,
37
37
  patch_sizes,
38
38
  strides,
39
- include_rescaling=True,
40
39
  image_shape=(None, None, 3),
41
40
  hidden_dims=None,
42
41
  **kwargs,
@@ -60,9 +59,6 @@ class MiTBackbone(FeaturePyramidBackbone):
60
59
  value projections. If set to > 1, a `Conv2D` layer is used to
61
60
  reduce the length of the sequence.
62
61
  end_value: The end value of the sequence.
63
- include_rescaling: bool, whether to rescale the inputs. If set
64
- to `True`, inputs will be passed through a `Rescaling(1/255.0)`
65
- layer. Defaults to `True`.
66
62
  image_shape: optional shape tuple, defaults to (None, None, 3).
67
63
  hidden_dims: the embedding dims per hierarchical layer, used as
68
64
  the levels of the feature pyramid.
@@ -123,11 +119,7 @@ class MiTBackbone(FeaturePyramidBackbone):
123
119
 
124
120
  # === Functional Model ===
125
121
  image_input = keras.layers.Input(shape=image_shape)
126
- x = image_input
127
-
128
- if include_rescaling:
129
- x = keras.layers.Rescaling(scale=1 / 255)(x)
130
-
122
+ x = image_input # Intermediate result.
131
123
  pyramid_outputs = {}
132
124
  for i in range(num_layers):
133
125
  # Compute new height/width after the `proj`
@@ -151,7 +143,6 @@ class MiTBackbone(FeaturePyramidBackbone):
151
143
 
152
144
  # === Config ===
153
145
  self.depths = depths
154
- self.include_rescaling = include_rescaling
155
146
  self.image_shape = image_shape
156
147
  self.hidden_dims = hidden_dims
157
148
  self.pyramid_outputs = pyramid_outputs
@@ -167,7 +158,6 @@ class MiTBackbone(FeaturePyramidBackbone):
167
158
  config.update(
168
159
  {
169
160
  "depths": self.depths,
170
- "include_rescaling": self.include_rescaling,
171
161
  "hidden_dims": self.hidden_dims,
172
162
  "image_shape": self.image_shape,
173
163
  "num_layers": self.num_layers,
@@ -76,7 +76,6 @@ class MiTImageClassifier(ImageClassifier):
76
76
  backbone = keras_hub.models.MiTBackbone(
77
77
  stackwise_num_filters=[128, 256, 512, 1024],
78
78
  stackwise_depth=[3, 9, 9, 3],
79
- include_rescaling=False,
80
79
  block_type="basic_block",
81
80
  image_shape = (224, 224, 3),
82
81
  )
@@ -54,9 +54,6 @@ class MobileNetBackbone(Backbone):
54
54
  model. 0 if dont want to add Squeeze and Excite layer.
55
55
  stackwise_activation: list of activation functions, for each inverted
56
56
  residual block in the model.
57
- include_rescaling: bool, whether to rescale the inputs. If set to True,
58
- inputs will be passed through a `Rescaling(scale=1 / 255)`
59
- layer.
60
57
  image_shape: optional shape tuple, defaults to (224, 224, 3).
61
58
  depth_multiplier: float, controls the width of the network.
62
59
  - If `depth_multiplier` < 1.0, proportionally decreases the number
@@ -92,7 +89,6 @@ class MobileNetBackbone(Backbone):
92
89
  stackwise_num_strides=[2, 2, 1],
93
90
  stackwise_se_ratio=[0.25, None, 0.25],
94
91
  stackwise_activation=["relu", "relu6", "hard_swish"],
95
- include_rescaling=False,
96
92
  output_num_filters=1280,
97
93
  input_activation='hard_swish',
98
94
  output_activation='hard_swish',
@@ -111,7 +107,6 @@ class MobileNetBackbone(Backbone):
111
107
  stackwise_num_strides,
112
108
  stackwise_se_ratio,
113
109
  stackwise_activation,
114
- include_rescaling,
115
110
  output_num_filters,
116
111
  inverted_res_block,
117
112
  image_shape=(224, 224, 3),
@@ -126,12 +121,8 @@ class MobileNetBackbone(Backbone):
126
121
  -1 if keras.config.image_data_format() == "channels_last" else 1
127
122
  )
128
123
 
129
- inputs = keras.layers.Input(shape=image_shape)
130
- x = inputs
131
-
132
- if include_rescaling:
133
- x = keras.layers.Rescaling(scale=1 / 255)(x)
134
-
124
+ image_input = keras.layers.Input(shape=image_shape)
125
+ x = image_input # Intermediate result.
135
126
  input_num_filters = adjust_channels(input_num_filters)
136
127
  x = keras.layers.Conv2D(
137
128
  input_num_filters,
@@ -195,7 +186,7 @@ class MobileNetBackbone(Backbone):
195
186
  )(x)
196
187
  x = keras.layers.Activation(output_activation)(x)
197
188
 
198
- super().__init__(inputs=inputs, outputs=x, **kwargs)
189
+ super().__init__(inputs=image_input, outputs=x, **kwargs)
199
190
 
200
191
  # === Config ===
201
192
  self.stackwise_expansion = stackwise_expansion
@@ -204,7 +195,6 @@ class MobileNetBackbone(Backbone):
204
195
  self.stackwise_num_strides = stackwise_num_strides
205
196
  self.stackwise_se_ratio = stackwise_se_ratio
206
197
  self.stackwise_activation = stackwise_activation
207
- self.include_rescaling = include_rescaling
208
198
  self.depth_multiplier = depth_multiplier
209
199
  self.input_num_filters = input_num_filters
210
200
  self.output_num_filters = output_num_filters
@@ -223,7 +213,6 @@ class MobileNetBackbone(Backbone):
223
213
  "stackwise_num_strides": self.stackwise_num_strides,
224
214
  "stackwise_se_ratio": self.stackwise_se_ratio,
225
215
  "stackwise_activation": self.stackwise_activation,
226
- "include_rescaling": self.include_rescaling,
227
216
  "image_shape": self.image_shape,
228
217
  "depth_multiplier": self.depth_multiplier,
229
218
  "input_num_filters": self.input_num_filters,
@@ -56,7 +56,6 @@ class MobileNetImageClassifier(ImageClassifier):
56
56
  stackwise_stride = [2, 2, 1],
57
57
  stackwise_se_ratio = [ 0.25, None, 0.25],
58
58
  stackwise_activation = ["relu", "relu", "hard_swish"],
59
- include_rescaling = False,
60
59
  output_filter=1280,
61
60
  activation="hard_swish",
62
61
  inverted_res_block=True,
@@ -476,6 +476,9 @@ class PaliGemmaVit(keras.Model):
476
476
  shape=(image_size, image_size, 3), name="images"
477
477
  )
478
478
  x = image_input # Intermediate result.
479
+ # TODO we have moved this rescaling to preprocessing layers for most
480
+ # models. We should consider removing it here, though it would break
481
+ # compatibility.
479
482
  if include_rescaling:
480
483
  rescaling = keras.layers.Rescaling(
481
484
  scale=1.0 / 127.5, offset=-1.0, name="rescaling"
@@ -44,9 +44,6 @@ class ResNetBackbone(FeaturePyramidBackbone):
44
44
  additional pooling operation rather than performing downsampling within
45
45
  the convolutional layers themselves.
46
46
 
47
- Note that `ResNetBackbone` expects the inputs to be images with a value
48
- range of `[0, 255]` when `include_rescaling=True`.
49
-
50
47
  Args:
51
48
  input_conv_filters: list of ints. The number of filters of the initial
52
49
  convolution(s).
@@ -65,9 +62,6 @@ class ResNetBackbone(FeaturePyramidBackbone):
65
62
  variants.
66
63
  use_pre_activation: boolean. Whether to use pre-activation or not.
67
64
  `True` for ResNetV2, `False` for ResNet.
68
- include_rescaling: boolean. If `True`, rescale the input using
69
- `Rescaling` and `Normalization` layers. If `False`, do nothing.
70
- Defaults to `True`.
71
65
  image_shape: tuple. The input shape without the batch size.
72
66
  Defaults to `(None, None, 3)`.
73
67
  pooling: `None` or str. Pooling mode for feature extraction. Defaults
@@ -124,7 +118,6 @@ class ResNetBackbone(FeaturePyramidBackbone):
124
118
  stackwise_num_strides,
125
119
  block_type,
126
120
  use_pre_activation=False,
127
- include_rescaling=True,
128
121
  image_shape=(None, None, 3),
129
122
  data_format=None,
130
123
  dtype=None,
@@ -170,18 +163,7 @@ class ResNetBackbone(FeaturePyramidBackbone):
170
163
 
171
164
  # === Functional Model ===
172
165
  image_input = layers.Input(shape=image_shape)
173
- if include_rescaling:
174
- x = layers.Rescaling(scale=1 / 255.0, dtype=dtype)(image_input)
175
- x = layers.Normalization(
176
- axis=bn_axis,
177
- mean=(0.485, 0.456, 0.406),
178
- variance=(0.229**2, 0.224**2, 0.225**2),
179
- dtype=dtype,
180
- name="normalization",
181
- )(x)
182
- else:
183
- x = image_input
184
-
166
+ x = image_input # Intermediate result.
185
167
  # The padding between torch and tensorflow/jax differs when `strides>1`.
186
168
  # Therefore, we need to manually pad the tensor.
187
169
  x = layers.ZeroPadding2D(
@@ -299,7 +281,6 @@ class ResNetBackbone(FeaturePyramidBackbone):
299
281
  self.stackwise_num_strides = stackwise_num_strides
300
282
  self.block_type = block_type
301
283
  self.use_pre_activation = use_pre_activation
302
- self.include_rescaling = include_rescaling
303
284
  self.image_shape = image_shape
304
285
  self.pyramid_outputs = pyramid_outputs
305
286
  self.data_format = data_format
@@ -315,7 +296,6 @@ class ResNetBackbone(FeaturePyramidBackbone):
315
296
  "stackwise_num_strides": self.stackwise_num_strides,
316
297
  "block_type": self.block_type,
317
298
  "use_pre_activation": self.use_pre_activation,
318
- "include_rescaling": self.include_rescaling,
319
299
  "image_shape": self.image_shape,
320
300
  }
321
301
  )
@@ -45,7 +45,9 @@ class ResNetImageClassifier(ImageClassifier):
45
45
  ```python
46
46
  # Load preset and train
47
47
  images = np.ones((2, 224, 224, 3), dtype="float32")
48
- classifier = keras_hub.models.ResNetImageClassifier.from_preset("resnet50")
48
+ classifier = keras_hub.models.ResNetImageClassifier.from_preset(
49
+ "resnet_50_imagenet"
50
+ )
49
51
  classifier.predict(images)
50
52
  ```
51
53
 
@@ -54,13 +56,17 @@ class ResNetImageClassifier(ImageClassifier):
54
56
  # Load preset and train
55
57
  images = np.ones((2, 224, 224, 3), dtype="float32")
56
58
  labels = [0, 3]
57
- classifier = keras_hub.models.ResNetImageClassifier.from_preset("resnet50")
59
+ classifier = keras_hub.models.ResNetImageClassifier.from_preset(
60
+ "resnet_50_imagenet"
61
+ )
58
62
  classifier.fit(x=images, y=labels, batch_size=2)
59
63
  ```
60
64
 
61
65
  Call `fit()` with custom loss, optimizer and backbone.
62
66
  ```python
63
- classifier = keras_hub.models.ResNetImageClassifier.from_preset("resnet50")
67
+ classifier = keras_hub.models.ResNetImageClassifier.from_preset(
68
+ "resnet_50_imagenet"
69
+ )
64
70
  classifier.compile(
65
71
  loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
66
72
  optimizer=keras.optimizers.Adam(5e-5),
@@ -79,7 +85,6 @@ class ResNetImageClassifier(ImageClassifier):
79
85
  stackwise_num_strides=[1, 2, 2],
80
86
  block_type="basic_block",
81
87
  use_pre_activation=True,
82
- include_rescaling=False,
83
88
  pooling="avg",
84
89
  )
85
90
  classifier = keras_hub.models.ResNetImageClassifier(