keras-hub-nightly 0.16.0.dev202409201942__py3-none-any.whl → 0.16.1.dev202409210335__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (25) hide show
  1. keras_hub/src/api_export.py +2 -2
  2. keras_hub/src/layers/preprocessing/resizing_image_converter.py +56 -6
  3. keras_hub/src/models/csp_darknet/csp_darknet_backbone.py +1 -11
  4. keras_hub/src/models/csp_darknet/csp_darknet_image_classifier.py +0 -1
  5. keras_hub/src/models/densenet/densenet_backbone.py +1 -11
  6. keras_hub/src/models/densenet/densenet_image_classifier.py +0 -1
  7. keras_hub/src/models/efficientnet/efficientnet_backbone.py +3 -14
  8. keras_hub/src/models/mix_transformer/mix_transformer_backbone.py +1 -11
  9. keras_hub/src/models/mix_transformer/mix_transformer_classifier.py +0 -1
  10. keras_hub/src/models/mobilenet/mobilenet_backbone.py +3 -14
  11. keras_hub/src/models/mobilenet/mobilenet_image_classifier.py +0 -1
  12. keras_hub/src/models/pali_gemma/pali_gemma_vit.py +3 -0
  13. keras_hub/src/models/resnet/resnet_backbone.py +1 -21
  14. keras_hub/src/models/resnet/resnet_image_classifier.py +0 -1
  15. keras_hub/src/models/resnet/resnet_presets.py +6 -6
  16. keras_hub/src/models/vgg/vgg_backbone.py +0 -8
  17. keras_hub/src/models/vgg/vgg_image_classifier.py +0 -1
  18. keras_hub/src/models/vit_det/vit_det_backbone.py +0 -9
  19. keras_hub/src/utils/timm/convert_resnet.py +0 -8
  20. keras_hub/src/utils/timm/preset_loader.py +16 -1
  21. keras_hub/src/version_utils.py +1 -1
  22. {keras_hub_nightly-0.16.0.dev202409201942.dist-info → keras_hub_nightly-0.16.1.dev202409210335.dist-info}/METADATA +1 -1
  23. {keras_hub_nightly-0.16.0.dev202409201942.dist-info → keras_hub_nightly-0.16.1.dev202409210335.dist-info}/RECORD +25 -25
  24. {keras_hub_nightly-0.16.0.dev202409201942.dist-info → keras_hub_nightly-0.16.1.dev202409210335.dist-info}/WHEEL +0 -0
  25. {keras_hub_nightly-0.16.0.dev202409201942.dist-info → keras_hub_nightly-0.16.1.dev202409210335.dist-info}/top_level.txt +0 -0
@@ -52,8 +52,8 @@ else:
52
52
 
53
53
  class keras_hub_export:
54
54
  def __init__(self, path):
55
- pass
55
+ self.path = path
56
56
 
57
57
  def __call__(self, symbol):
58
- maybe_register_serializable(symbol)
58
+ maybe_register_serializable(self.path, symbol)
59
59
  return symbol
@@ -12,9 +12,11 @@
12
12
  # See the License for the specific language governing permissions and
13
13
  # limitations under the License.
14
14
  import keras
15
+ from keras import ops
15
16
 
16
17
  from keras_hub.src.api_export import keras_hub_export
17
18
  from keras_hub.src.layers.preprocessing.image_converter import ImageConverter
19
+ from keras_hub.src.utils.keras_utils import standardize_data_format
18
20
  from keras_hub.src.utils.tensor_utils import preprocessing_function
19
21
 
20
22
 
@@ -23,13 +25,23 @@ class ResizingImageConverter(ImageConverter):
23
25
  """An `ImageConverter` that simply resizes the input image.
24
26
 
25
27
  The `ResizingImageConverter` is a subclass of `ImageConverter` for models
26
- that simply need to resize image tensors before using them for modeling.
27
- The layer will take as input a raw image tensor (batched or unbatched) in the
28
- channels last or channels first format, and output a resize tensor.
28
+ that need to resize (and optionally rescale) image tensors before using them
29
+ for modeling. The layer will take as input a raw image tensor (batched or
30
+ unbatched) in the channels last or channels first format, and output a
31
+ resize tensor.
29
32
 
30
33
  Args:
31
- height: Integer, the height of the output shape.
32
- width: Integer, the width of the output shape.
34
+ height: int, the height of the output shape.
35
+ width: int, the width of the output shape.
36
+ scale: float or `None`. If set, the image we be rescaled with a
37
+ `keras.layers.Rescaling` layer, multiplying the image by this
38
+ scale.
39
+ mean: tuples of floats per channel or `None`. If set, the image will be
40
+ normalized per channel by subtracting mean.
41
+ If set, also set `variance`.
42
+ variance: tuples of floats per channel or `None`. If set, the image will
43
+ be normalized per channel by dividing by `sqrt(variance)`.
44
+ If set, also set `mean`.
33
45
  crop_to_aspect_ratio: If `True`, resize the images without aspect
34
46
  ratio distortion. When the original aspect ratio differs
35
47
  from the target aspect ratio, the output image will be
@@ -64,6 +76,9 @@ class ResizingImageConverter(ImageConverter):
64
76
  self,
65
77
  height,
66
78
  width,
79
+ scale=None,
80
+ mean=None,
81
+ variance=None,
67
82
  crop_to_aspect_ratio=True,
68
83
  interpolation="bilinear",
69
84
  data_format=None,
@@ -78,7 +93,26 @@ class ResizingImageConverter(ImageConverter):
78
93
  crop_to_aspect_ratio=crop_to_aspect_ratio,
79
94
  interpolation=interpolation,
80
95
  data_format=data_format,
96
+ dtype=self.dtype_policy,
97
+ name="resizing",
81
98
  )
99
+ if scale is not None:
100
+ self.rescaling = keras.layers.Rescaling(
101
+ scale=scale,
102
+ dtype=self.dtype_policy,
103
+ name="rescaling",
104
+ )
105
+ else:
106
+ self.rescaling = None
107
+ if (mean is not None) != (variance is not None):
108
+ raise ValueError(
109
+ "Both `mean` and `variance` should be set or `None`. Received "
110
+ f"`mean={mean}`, `variance={variance}`."
111
+ )
112
+ self.scale = scale
113
+ self.mean = mean
114
+ self.variance = variance
115
+ self.data_format = standardize_data_format(data_format)
82
116
 
83
117
  def image_size(self):
84
118
  """Returns the preprocessed size of a single image."""
@@ -86,7 +120,20 @@ class ResizingImageConverter(ImageConverter):
86
120
 
87
121
  @preprocessing_function
88
122
  def call(self, inputs):
89
- return self.resizing(inputs)
123
+ x = self.resizing(inputs)
124
+ if self.rescaling:
125
+ x = self.rescaling(x)
126
+ if self.mean is not None:
127
+ # Avoid `layers.Normalization` so this works batched and unbatched.
128
+ channels_first = self.data_format == "channels_first"
129
+ if len(ops.shape(inputs)) == 3:
130
+ broadcast_dims = (1, 2) if channels_first else (0, 1)
131
+ else:
132
+ broadcast_dims = (0, 2, 3) if channels_first else (0, 1, 2)
133
+ mean = ops.expand_dims(ops.array(self.mean), broadcast_dims)
134
+ std = ops.expand_dims(ops.sqrt(self.variance), broadcast_dims)
135
+ x = (x - mean) / std
136
+ return x
90
137
 
91
138
  def get_config(self):
92
139
  config = super().get_config()
@@ -96,6 +143,9 @@ class ResizingImageConverter(ImageConverter):
96
143
  "width": self.resizing.width,
97
144
  "interpolation": self.resizing.interpolation,
98
145
  "crop_to_aspect_ratio": self.resizing.crop_to_aspect_ratio,
146
+ "scale": self.scale,
147
+ "mean": self.mean,
148
+ "variance": self.variance,
99
149
  }
100
150
  )
101
151
  return config
@@ -31,9 +31,6 @@ class CSPDarkNetBackbone(FeaturePyramidBackbone):
31
31
  level in the model.
32
32
  stackwise_depth: A list of ints, the depth for each dark level in the
33
33
  model.
34
- include_rescaling: boolean. If `True`, rescale the input using
35
- `Rescaling(1 / 255.0)` layer. If `False`, do nothing. Defaults to
36
- `True`.
37
34
  block_type: str. One of `"basic_block"` or `"depthwise_block"`.
38
35
  Use `"depthwise_block"` for depthwise conv block
39
36
  `"basic_block"` for basic conv block.
@@ -55,7 +52,6 @@ class CSPDarkNetBackbone(FeaturePyramidBackbone):
55
52
  model = keras_hub.models.CSPDarkNetBackbone(
56
53
  stackwise_num_filters=[128, 256, 512, 1024],
57
54
  stackwise_depth=[3, 9, 9, 3],
58
- include_rescaling=False,
59
55
  )
60
56
  model(input_data)
61
57
  ```
@@ -65,7 +61,6 @@ class CSPDarkNetBackbone(FeaturePyramidBackbone):
65
61
  self,
66
62
  stackwise_num_filters,
67
63
  stackwise_depth,
68
- include_rescaling=True,
69
64
  block_type="basic_block",
70
65
  image_shape=(None, None, 3),
71
66
  **kwargs,
@@ -82,10 +77,7 @@ class CSPDarkNetBackbone(FeaturePyramidBackbone):
82
77
  base_channels = stackwise_num_filters[0] // 2
83
78
 
84
79
  image_input = layers.Input(shape=image_shape)
85
- x = image_input
86
- if include_rescaling:
87
- x = layers.Rescaling(scale=1 / 255.0)(x)
88
-
80
+ x = image_input # Intermediate result.
89
81
  x = apply_focus(channel_axis, name="stem_focus")(x)
90
82
  x = apply_darknet_conv_block(
91
83
  base_channels,
@@ -130,7 +122,6 @@ class CSPDarkNetBackbone(FeaturePyramidBackbone):
130
122
  # === Config ===
131
123
  self.stackwise_num_filters = stackwise_num_filters
132
124
  self.stackwise_depth = stackwise_depth
133
- self.include_rescaling = include_rescaling
134
125
  self.block_type = block_type
135
126
  self.image_shape = image_shape
136
127
  self.pyramid_outputs = pyramid_outputs
@@ -141,7 +132,6 @@ class CSPDarkNetBackbone(FeaturePyramidBackbone):
141
132
  {
142
133
  "stackwise_num_filters": self.stackwise_num_filters,
143
134
  "stackwise_depth": self.stackwise_depth,
144
- "include_rescaling": self.include_rescaling,
145
135
  "block_type": self.block_type,
146
136
  "image_shape": self.image_shape,
147
137
  }
@@ -76,7 +76,6 @@ class CSPDarkNetImageClassifier(ImageClassifier):
76
76
  backbone = keras_hub.models.CSPDarkNetBackbone(
77
77
  stackwise_num_filters=[128, 256, 512, 1024],
78
78
  stackwise_depth=[3, 9, 9, 3],
79
- include_rescaling=False,
80
79
  block_type="basic_block",
81
80
  image_shape = (224, 224, 3),
82
81
  )
@@ -31,9 +31,6 @@ class DenseNetBackbone(FeaturePyramidBackbone):
31
31
  Args:
32
32
  stackwise_num_repeats: list of ints, number of repeated convolutional
33
33
  blocks per dense block.
34
- include_rescaling: bool, whether to rescale the inputs. If set
35
- to `True`, inputs will be passed through a `Rescaling(1/255.0)`
36
- layer. Defaults to `True`.
37
34
  image_shape: optional shape tuple, defaults to (None, None, 3).
38
35
  compression_ratio: float, compression rate at transition layers,
39
36
  defaults to 0.5.
@@ -51,7 +48,6 @@ class DenseNetBackbone(FeaturePyramidBackbone):
51
48
  # Randomly initialized backbone with a custom config
52
49
  model = keras_hub.models.DenseNetBackbone(
53
50
  stackwise_num_repeats=[6, 12, 24, 16],
54
- include_rescaling=False,
55
51
  )
56
52
  model(input_data)
57
53
  ```
@@ -60,7 +56,6 @@ class DenseNetBackbone(FeaturePyramidBackbone):
60
56
  def __init__(
61
57
  self,
62
58
  stackwise_num_repeats,
63
- include_rescaling=True,
64
59
  image_shape=(None, None, 3),
65
60
  compression_ratio=0.5,
66
61
  growth_rate=32,
@@ -71,10 +66,7 @@ class DenseNetBackbone(FeaturePyramidBackbone):
71
66
  channel_axis = -1 if data_format == "channels_last" else 1
72
67
  image_input = keras.layers.Input(shape=image_shape)
73
68
 
74
- x = image_input
75
- if include_rescaling:
76
- x = keras.layers.Rescaling(1 / 255.0)(x)
77
-
69
+ x = image_input # Intermediate result.
78
70
  x = keras.layers.Conv2D(
79
71
  64,
80
72
  7,
@@ -124,7 +116,6 @@ class DenseNetBackbone(FeaturePyramidBackbone):
124
116
 
125
117
  # === Config ===
126
118
  self.stackwise_num_repeats = stackwise_num_repeats
127
- self.include_rescaling = include_rescaling
128
119
  self.compression_ratio = compression_ratio
129
120
  self.growth_rate = growth_rate
130
121
  self.image_shape = image_shape
@@ -135,7 +126,6 @@ class DenseNetBackbone(FeaturePyramidBackbone):
135
126
  config.update(
136
127
  {
137
128
  "stackwise_num_repeats": self.stackwise_num_repeats,
138
- "include_rescaling": self.include_rescaling,
139
129
  "compression_ratio": self.compression_ratio,
140
130
  "growth_rate": self.growth_rate,
141
131
  "image_shape": self.image_shape,
@@ -74,7 +74,6 @@ class DenseNetImageClassifier(ImageClassifier):
74
74
  backbone = keras_hub.models.DenseNetBackbone(
75
75
  stackwise_num_filters=[128, 256, 512, 1024],
76
76
  stackwise_depth=[3, 9, 9, 3],
77
- include_rescaling=False,
78
77
  block_type="basic_block",
79
78
  image_shape = (224, 224, 3),
80
79
  )
@@ -67,8 +67,6 @@ class EfficientNetBackbone(FeaturePyramidBackbone):
67
67
  MBConvBlock, but instead of using a depthwise convolution and a 1x1
68
68
  output convolution blocks fused blocks use a single 3x3 convolution
69
69
  block.
70
- include_rescaling: bool, whether to rescale the inputs. If set to
71
- True, inputs will be passed through a `Rescaling(1/255.0)` layer.
72
70
  min_depth: integer, minimum number of filters. Can be None and ignored
73
71
  if use_depth_divisor_as_min_depth is set to True.
74
72
  include_initial_padding: bool, whether to include initial zero padding
@@ -96,7 +94,6 @@ class EfficientNetBackbone(FeaturePyramidBackbone):
96
94
  stackwise_block_types=[["fused"] * 3 + ["unfused"] * 3],
97
95
  width_coefficient=1.0,
98
96
  depth_coefficient=1.0,
99
- include_rescaling=False,
100
97
  )
101
98
  images = np.ones((1, 256, 256, 3))
102
99
  outputs = efficientnet.predict(images)
@@ -116,7 +113,6 @@ class EfficientNetBackbone(FeaturePyramidBackbone):
116
113
  stackwise_squeeze_and_excite_ratios,
117
114
  stackwise_strides,
118
115
  stackwise_block_types,
119
- include_rescaling=True,
120
116
  dropout=0.2,
121
117
  depth_divisor=8,
122
118
  min_depth=8,
@@ -129,14 +125,9 @@ class EfficientNetBackbone(FeaturePyramidBackbone):
129
125
  batch_norm_momentum=0.9,
130
126
  **kwargs,
131
127
  ):
132
- img_input = keras.layers.Input(shape=input_shape)
133
-
134
- x = img_input
135
-
136
- if include_rescaling:
137
- # Use common rescaling strategy across keras
138
- x = keras.layers.Rescaling(scale=1.0 / 255.0)(x)
128
+ image_input = keras.layers.Input(shape=input_shape)
139
129
 
130
+ x = image_input # Intermediate result.
140
131
  if include_initial_padding:
141
132
  x = keras.layers.ZeroPadding2D(
142
133
  padding=self._correct_pad_downsample(x, 3),
@@ -282,10 +273,9 @@ class EfficientNetBackbone(FeaturePyramidBackbone):
282
273
  curr_pyramid_level += 1
283
274
 
284
275
  # Create model.
285
- super().__init__(inputs=img_input, outputs=x, **kwargs)
276
+ super().__init__(inputs=image_input, outputs=x, **kwargs)
286
277
 
287
278
  # === Config ===
288
- self.include_rescaling = include_rescaling
289
279
  self.width_coefficient = width_coefficient
290
280
  self.depth_coefficient = depth_coefficient
291
281
  self.dropout = dropout
@@ -313,7 +303,6 @@ class EfficientNetBackbone(FeaturePyramidBackbone):
313
303
  config = super().get_config()
314
304
  config.update(
315
305
  {
316
- "include_rescaling": self.include_rescaling,
317
306
  "width_coefficient": self.width_coefficient,
318
307
  "depth_coefficient": self.depth_coefficient,
319
308
  "dropout": self.dropout,
@@ -36,7 +36,6 @@ class MiTBackbone(FeaturePyramidBackbone):
36
36
  end_value,
37
37
  patch_sizes,
38
38
  strides,
39
- include_rescaling=True,
40
39
  image_shape=(None, None, 3),
41
40
  hidden_dims=None,
42
41
  **kwargs,
@@ -60,9 +59,6 @@ class MiTBackbone(FeaturePyramidBackbone):
60
59
  value projections. If set to > 1, a `Conv2D` layer is used to
61
60
  reduce the length of the sequence.
62
61
  end_value: The end value of the sequence.
63
- include_rescaling: bool, whether to rescale the inputs. If set
64
- to `True`, inputs will be passed through a `Rescaling(1/255.0)`
65
- layer. Defaults to `True`.
66
62
  image_shape: optional shape tuple, defaults to (None, None, 3).
67
63
  hidden_dims: the embedding dims per hierarchical layer, used as
68
64
  the levels of the feature pyramid.
@@ -123,11 +119,7 @@ class MiTBackbone(FeaturePyramidBackbone):
123
119
 
124
120
  # === Functional Model ===
125
121
  image_input = keras.layers.Input(shape=image_shape)
126
- x = image_input
127
-
128
- if include_rescaling:
129
- x = keras.layers.Rescaling(scale=1 / 255)(x)
130
-
122
+ x = image_input # Intermediate result.
131
123
  pyramid_outputs = {}
132
124
  for i in range(num_layers):
133
125
  # Compute new height/width after the `proj`
@@ -151,7 +143,6 @@ class MiTBackbone(FeaturePyramidBackbone):
151
143
 
152
144
  # === Config ===
153
145
  self.depths = depths
154
- self.include_rescaling = include_rescaling
155
146
  self.image_shape = image_shape
156
147
  self.hidden_dims = hidden_dims
157
148
  self.pyramid_outputs = pyramid_outputs
@@ -167,7 +158,6 @@ class MiTBackbone(FeaturePyramidBackbone):
167
158
  config.update(
168
159
  {
169
160
  "depths": self.depths,
170
- "include_rescaling": self.include_rescaling,
171
161
  "hidden_dims": self.hidden_dims,
172
162
  "image_shape": self.image_shape,
173
163
  "num_layers": self.num_layers,
@@ -76,7 +76,6 @@ class MiTImageClassifier(ImageClassifier):
76
76
  backbone = keras_hub.models.MiTBackbone(
77
77
  stackwise_num_filters=[128, 256, 512, 1024],
78
78
  stackwise_depth=[3, 9, 9, 3],
79
- include_rescaling=False,
80
79
  block_type="basic_block",
81
80
  image_shape = (224, 224, 3),
82
81
  )
@@ -54,9 +54,6 @@ class MobileNetBackbone(Backbone):
54
54
  model. 0 if dont want to add Squeeze and Excite layer.
55
55
  stackwise_activation: list of activation functions, for each inverted
56
56
  residual block in the model.
57
- include_rescaling: bool, whether to rescale the inputs. If set to True,
58
- inputs will be passed through a `Rescaling(scale=1 / 255)`
59
- layer.
60
57
  image_shape: optional shape tuple, defaults to (224, 224, 3).
61
58
  depth_multiplier: float, controls the width of the network.
62
59
  - If `depth_multiplier` < 1.0, proportionally decreases the number
@@ -92,7 +89,6 @@ class MobileNetBackbone(Backbone):
92
89
  stackwise_num_strides=[2, 2, 1],
93
90
  stackwise_se_ratio=[0.25, None, 0.25],
94
91
  stackwise_activation=["relu", "relu6", "hard_swish"],
95
- include_rescaling=False,
96
92
  output_num_filters=1280,
97
93
  input_activation='hard_swish',
98
94
  output_activation='hard_swish',
@@ -111,7 +107,6 @@ class MobileNetBackbone(Backbone):
111
107
  stackwise_num_strides,
112
108
  stackwise_se_ratio,
113
109
  stackwise_activation,
114
- include_rescaling,
115
110
  output_num_filters,
116
111
  inverted_res_block,
117
112
  image_shape=(224, 224, 3),
@@ -126,12 +121,8 @@ class MobileNetBackbone(Backbone):
126
121
  -1 if keras.config.image_data_format() == "channels_last" else 1
127
122
  )
128
123
 
129
- inputs = keras.layers.Input(shape=image_shape)
130
- x = inputs
131
-
132
- if include_rescaling:
133
- x = keras.layers.Rescaling(scale=1 / 255)(x)
134
-
124
+ image_input = keras.layers.Input(shape=image_shape)
125
+ x = image_input # Intermediate result.
135
126
  input_num_filters = adjust_channels(input_num_filters)
136
127
  x = keras.layers.Conv2D(
137
128
  input_num_filters,
@@ -195,7 +186,7 @@ class MobileNetBackbone(Backbone):
195
186
  )(x)
196
187
  x = keras.layers.Activation(output_activation)(x)
197
188
 
198
- super().__init__(inputs=inputs, outputs=x, **kwargs)
189
+ super().__init__(inputs=image_input, outputs=x, **kwargs)
199
190
 
200
191
  # === Config ===
201
192
  self.stackwise_expansion = stackwise_expansion
@@ -204,7 +195,6 @@ class MobileNetBackbone(Backbone):
204
195
  self.stackwise_num_strides = stackwise_num_strides
205
196
  self.stackwise_se_ratio = stackwise_se_ratio
206
197
  self.stackwise_activation = stackwise_activation
207
- self.include_rescaling = include_rescaling
208
198
  self.depth_multiplier = depth_multiplier
209
199
  self.input_num_filters = input_num_filters
210
200
  self.output_num_filters = output_num_filters
@@ -223,7 +213,6 @@ class MobileNetBackbone(Backbone):
223
213
  "stackwise_num_strides": self.stackwise_num_strides,
224
214
  "stackwise_se_ratio": self.stackwise_se_ratio,
225
215
  "stackwise_activation": self.stackwise_activation,
226
- "include_rescaling": self.include_rescaling,
227
216
  "image_shape": self.image_shape,
228
217
  "depth_multiplier": self.depth_multiplier,
229
218
  "input_num_filters": self.input_num_filters,
@@ -56,7 +56,6 @@ class MobileNetImageClassifier(ImageClassifier):
56
56
  stackwise_stride = [2, 2, 1],
57
57
  stackwise_se_ratio = [ 0.25, None, 0.25],
58
58
  stackwise_activation = ["relu", "relu", "hard_swish"],
59
- include_rescaling = False,
60
59
  output_filter=1280,
61
60
  activation="hard_swish",
62
61
  inverted_res_block=True,
@@ -476,6 +476,9 @@ class PaliGemmaVit(keras.Model):
476
476
  shape=(image_size, image_size, 3), name="images"
477
477
  )
478
478
  x = image_input # Intermediate result.
479
+ # TODO we have moved this rescaling to preprocessing layers for most
480
+ # models. We should consider removing it here, though it would break
481
+ # compatibility.
479
482
  if include_rescaling:
480
483
  rescaling = keras.layers.Rescaling(
481
484
  scale=1.0 / 127.5, offset=-1.0, name="rescaling"
@@ -44,9 +44,6 @@ class ResNetBackbone(FeaturePyramidBackbone):
44
44
  additional pooling operation rather than performing downsampling within
45
45
  the convolutional layers themselves.
46
46
 
47
- Note that `ResNetBackbone` expects the inputs to be images with a value
48
- range of `[0, 255]` when `include_rescaling=True`.
49
-
50
47
  Args:
51
48
  input_conv_filters: list of ints. The number of filters of the initial
52
49
  convolution(s).
@@ -65,9 +62,6 @@ class ResNetBackbone(FeaturePyramidBackbone):
65
62
  variants.
66
63
  use_pre_activation: boolean. Whether to use pre-activation or not.
67
64
  `True` for ResNetV2, `False` for ResNet.
68
- include_rescaling: boolean. If `True`, rescale the input using
69
- `Rescaling` and `Normalization` layers. If `False`, do nothing.
70
- Defaults to `True`.
71
65
  image_shape: tuple. The input shape without the batch size.
72
66
  Defaults to `(None, None, 3)`.
73
67
  pooling: `None` or str. Pooling mode for feature extraction. Defaults
@@ -124,7 +118,6 @@ class ResNetBackbone(FeaturePyramidBackbone):
124
118
  stackwise_num_strides,
125
119
  block_type,
126
120
  use_pre_activation=False,
127
- include_rescaling=True,
128
121
  image_shape=(None, None, 3),
129
122
  data_format=None,
130
123
  dtype=None,
@@ -170,18 +163,7 @@ class ResNetBackbone(FeaturePyramidBackbone):
170
163
 
171
164
  # === Functional Model ===
172
165
  image_input = layers.Input(shape=image_shape)
173
- if include_rescaling:
174
- x = layers.Rescaling(scale=1 / 255.0, dtype=dtype)(image_input)
175
- x = layers.Normalization(
176
- axis=bn_axis,
177
- mean=(0.485, 0.456, 0.406),
178
- variance=(0.229**2, 0.224**2, 0.225**2),
179
- dtype=dtype,
180
- name="normalization",
181
- )(x)
182
- else:
183
- x = image_input
184
-
166
+ x = image_input # Intermediate result.
185
167
  # The padding between torch and tensorflow/jax differs when `strides>1`.
186
168
  # Therefore, we need to manually pad the tensor.
187
169
  x = layers.ZeroPadding2D(
@@ -299,7 +281,6 @@ class ResNetBackbone(FeaturePyramidBackbone):
299
281
  self.stackwise_num_strides = stackwise_num_strides
300
282
  self.block_type = block_type
301
283
  self.use_pre_activation = use_pre_activation
302
- self.include_rescaling = include_rescaling
303
284
  self.image_shape = image_shape
304
285
  self.pyramid_outputs = pyramid_outputs
305
286
  self.data_format = data_format
@@ -315,7 +296,6 @@ class ResNetBackbone(FeaturePyramidBackbone):
315
296
  "stackwise_num_strides": self.stackwise_num_strides,
316
297
  "block_type": self.block_type,
317
298
  "use_pre_activation": self.use_pre_activation,
318
- "include_rescaling": self.include_rescaling,
319
299
  "image_shape": self.image_shape,
320
300
  }
321
301
  )
@@ -85,7 +85,6 @@ class ResNetImageClassifier(ImageClassifier):
85
85
  stackwise_num_strides=[1, 2, 2],
86
86
  block_type="basic_block",
87
87
  use_pre_activation=True,
88
- include_rescaling=False,
89
88
  pooling="avg",
90
89
  )
91
90
  classifier = keras_hub.models.ResNetImageClassifier(
@@ -25,7 +25,7 @@ backbone_presets = {
25
25
  "path": "resnet",
26
26
  "model_card": "https://arxiv.org/abs/2110.00476",
27
27
  },
28
- "kaggle_handle": "kaggle://kerashub/resnetv1/keras/resnet_18_imagenet/2",
28
+ "kaggle_handle": "kaggle://kerashub/resnetv1/keras/resnet_18_imagenet/3",
29
29
  },
30
30
  "resnet_50_imagenet": {
31
31
  "metadata": {
@@ -38,7 +38,7 @@ backbone_presets = {
38
38
  "path": "resnet",
39
39
  "model_card": "https://arxiv.org/abs/2110.00476",
40
40
  },
41
- "kaggle_handle": "kaggle://kerashub/resnetv1/keras/resnet_50_imagenet/2",
41
+ "kaggle_handle": "kaggle://kerashub/resnetv1/keras/resnet_50_imagenet/3",
42
42
  },
43
43
  "resnet_101_imagenet": {
44
44
  "metadata": {
@@ -51,7 +51,7 @@ backbone_presets = {
51
51
  "path": "resnet",
52
52
  "model_card": "https://arxiv.org/abs/2110.00476",
53
53
  },
54
- "kaggle_handle": "kaggle://kerashub/resnetv1/keras/resnet_101_imagenet/2",
54
+ "kaggle_handle": "kaggle://kerashub/resnetv1/keras/resnet_101_imagenet/3",
55
55
  },
56
56
  "resnet_152_imagenet": {
57
57
  "metadata": {
@@ -64,7 +64,7 @@ backbone_presets = {
64
64
  "path": "resnet",
65
65
  "model_card": "https://arxiv.org/abs/2110.00476",
66
66
  },
67
- "kaggle_handle": "kaggle://kerashub/resnetv1/keras/resnet_152_imagenet/2",
67
+ "kaggle_handle": "kaggle://kerashub/resnetv1/keras/resnet_152_imagenet/3",
68
68
  },
69
69
  "resnet_v2_50_imagenet": {
70
70
  "metadata": {
@@ -77,7 +77,7 @@ backbone_presets = {
77
77
  "path": "resnet",
78
78
  "model_card": "https://arxiv.org/abs/2110.00476",
79
79
  },
80
- "kaggle_handle": "kaggle://kerashub/resnetv2/keras/resnet_v2_50_imagenet/2",
80
+ "kaggle_handle": "kaggle://kerashub/resnetv2/keras/resnet_v2_50_imagenet/3",
81
81
  },
82
82
  "resnet_v2_101_imagenet": {
83
83
  "metadata": {
@@ -90,6 +90,6 @@ backbone_presets = {
90
90
  "path": "resnet",
91
91
  "model_card": "https://arxiv.org/abs/2110.00476",
92
92
  },
93
- "kaggle_handle": "kaggle://kerashub/resnetv2/keras/resnet_v2_101_imagenet/2",
93
+ "kaggle_handle": "kaggle://kerashub/resnetv2/keras/resnet_v2_101_imagenet/3",
94
94
  },
95
95
  }
@@ -33,8 +33,6 @@ class VGGBackbone(Backbone):
33
33
  stackwise_num_filters: list of ints, filter size for convolutional
34
34
  blocks per VGG block. For both VGG16 and VGG19 this is [
35
35
  64, 128, 256, 512, 512].
36
- include_rescaling: bool, whether to rescale the inputs. If set to
37
- True, inputs will be passed through a `Rescaling(1/255.0)` layer.
38
36
  image_shape: tuple, optional shape tuple, defaults to (224, 224, 3).
39
37
  pooling: bool, Optional pooling mode for feature extraction
40
38
  when `include_top` is `False`.
@@ -61,7 +59,6 @@ class VGGBackbone(Backbone):
61
59
  stackwise_num_repeats = [2, 2, 3, 3, 3],
62
60
  stackwise_num_filters = [64, 128, 256, 512, 512],
63
61
  image_shape = (224, 224, 3),
64
- include_rescaling = False,
65
62
  pooling = "avg",
66
63
  )
67
64
  model(input_data)
@@ -72,7 +69,6 @@ class VGGBackbone(Backbone):
72
69
  self,
73
70
  stackwise_num_repeats,
74
71
  stackwise_num_filters,
75
- include_rescaling,
76
72
  image_shape=(224, 224, 3),
77
73
  pooling="avg",
78
74
  **kwargs,
@@ -82,8 +78,6 @@ class VGGBackbone(Backbone):
82
78
  img_input = keras.layers.Input(shape=image_shape)
83
79
  x = img_input
84
80
 
85
- if include_rescaling:
86
- x = layers.Rescaling(scale=1 / 255.0)(x)
87
81
  for stack_index in range(len(stackwise_num_repeats) - 1):
88
82
  x = apply_vgg_block(
89
83
  x=x,
@@ -105,7 +99,6 @@ class VGGBackbone(Backbone):
105
99
  # === Config ===
106
100
  self.stackwise_num_repeats = stackwise_num_repeats
107
101
  self.stackwise_num_filters = stackwise_num_filters
108
- self.include_rescaling = include_rescaling
109
102
  self.image_shape = image_shape
110
103
  self.pooling = pooling
111
104
 
@@ -113,7 +106,6 @@ class VGGBackbone(Backbone):
113
106
  return {
114
107
  "stackwise_num_repeats": self.stackwise_num_repeats,
115
108
  "stackwise_num_filters": self.stackwise_num_filters,
116
- "include_rescaling": self.include_rescaling,
117
109
  "image_shape": self.image_shape,
118
110
  "pooling": self.pooling,
119
111
  }
@@ -66,7 +66,6 @@ class VGGImageClassifier(ImageClassifier):
66
66
  stackwise_num_repeats = [2, 2, 3, 3, 3],
67
67
  stackwise_num_filters = [64, 128, 256, 512, 512],
68
68
  image_shape = (224, 224, 3),
69
- include_rescaling = False,
70
69
  pooling = "avg",
71
70
  )
72
71
  classifier = keras_hub.models.VGGImageClassifier(
@@ -46,9 +46,6 @@ class ViTDetBackbone(Backbone):
46
46
  global attention.
47
47
  image_shape (tuple[int], optional): The size of the input image in
48
48
  `(H, W, C)` format. Defaults to `(1024, 1024, 3)`.
49
- include_rescaling (bool, optional): Whether to rescale the inputs. If
50
- set to `True`, inputs will be passed through a
51
- `Rescaling(1/255.0)` layer. Defaults to `False`.
52
49
  patch_size (int, optional): the patch size to be supplied to the
53
50
  Patching layer to turn input images into a flattened sequence of
54
51
  patches. Defaults to `16`.
@@ -96,7 +93,6 @@ class ViTDetBackbone(Backbone):
96
93
  intermediate_dim,
97
94
  num_heads,
98
95
  global_attention_layer_indices,
99
- include_rescaling=True,
100
96
  image_shape=(1024, 1024, 3),
101
97
  patch_size=16,
102
98
  num_output_channels=256,
@@ -123,9 +119,6 @@ class ViTDetBackbone(Backbone):
123
119
  )
124
120
  img_size = img_input.shape[-3]
125
121
  x = img_input
126
- if include_rescaling:
127
- # Use common rescaling strategy across keras_cv
128
- x = keras.layers.Rescaling(1.0 / 255.0)(x)
129
122
  # VITDet scales inputs based on the standard ImageNet mean/stddev.
130
123
  x = (x - ops.array([0.485, 0.456, 0.406], dtype=x.dtype)) / (
131
124
  ops.array([0.229, 0.224, 0.225], dtype=x.dtype)
@@ -179,14 +172,12 @@ class ViTDetBackbone(Backbone):
179
172
  self.window_size = window_size
180
173
  self.global_attention_layer_indices = global_attention_layer_indices
181
174
  self.layer_norm_epsilon = layer_norm_epsilon
182
- self.include_rescaling = include_rescaling
183
175
 
184
176
  def get_config(self):
185
177
  config = super().get_config()
186
178
  config.update(
187
179
  {
188
180
  "image_shape": self.image_shape,
189
- "include_rescaling": self.include_rescaling,
190
181
  "patch_size": self.patch_size,
191
182
  "hidden_size": self.hidden_size,
192
183
  "num_layers": self.num_layers,
@@ -151,14 +151,6 @@ def convert_weights(backbone, loader, timm_config):
151
151
  if version == "v2":
152
152
  port_batch_normalization("post_bn", "norm")
153
153
 
154
- # Rebuild normalization layer with pretrained mean & std
155
- mean = timm_config["pretrained_cfg"]["mean"]
156
- std = timm_config["pretrained_cfg"]["std"]
157
- normalization_layer = backbone.get_layer("normalization")
158
- normalization_layer.input_mean = mean
159
- normalization_layer.input_variance = [s**2 for s in std]
160
- normalization_layer.build(normalization_layer._build_input_shape)
161
-
162
154
 
163
155
  def convert_head(task, loader, timm_config):
164
156
  v2 = "resnetv2_" in timm_config["architecture"]
@@ -62,5 +62,20 @@ class TimmPresetLoader(PresetLoader):
62
62
  pretrained_cfg = self.config.get("pretrained_cfg", None)
63
63
  if not pretrained_cfg or "input_size" not in pretrained_cfg:
64
64
  return None
65
+ # This assumes the same basic setup for all timm preprocessing, and that
66
+ # all our image conversion will be via a `ResizingImageConverter. We may
67
+ # need to extend this as we cover more model types.
65
68
  input_size = pretrained_cfg["input_size"]
66
- return cls(width=input_size[1], height=input_size[2])
69
+ mean = pretrained_cfg["mean"]
70
+ variance = [s**2 for s in pretrained_cfg["std"]]
71
+ interpolation = pretrained_cfg["interpolation"]
72
+ if interpolation not in ("bilinear", "nearest", "bicubic"):
73
+ interpolation = "bilinear" # Unsupported interpolation type.
74
+ return cls(
75
+ width=input_size[1],
76
+ height=input_size[2],
77
+ scale=1 / 255.0,
78
+ mean=mean,
79
+ variance=variance,
80
+ interpolation=interpolation,
81
+ )
@@ -15,7 +15,7 @@
15
15
  from keras_hub.src.api_export import keras_hub_export
16
16
 
17
17
  # Unique source of truth for the version number.
18
- __version__ = "0.16.0.dev202409201942"
18
+ __version__ = "0.16.1.dev202409210335"
19
19
 
20
20
 
21
21
  @keras_hub_export("keras_hub.version")
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: keras-hub-nightly
3
- Version: 0.16.0.dev202409201942
3
+ Version: 0.16.1.dev202409210335
4
4
  Summary: Industry-strength Natural Language Processing extensions for Keras.
5
5
  Home-page: https://github.com/keras-team/keras-hub
6
6
  Author: Keras team
@@ -8,8 +8,8 @@ keras_hub/api/samplers/__init__.py,sha256=l56H4y3h_HlRn_PpeMyZ6vC7228EH_BVFo4Caa
8
8
  keras_hub/api/tokenizers/__init__.py,sha256=nzMwKmxkMCOiYB35BIgxHNveCM9WoYRp7ChhmVK8MIM,3042
9
9
  keras_hub/api/utils/__init__.py,sha256=4IXDgmXqFzqrCK2MPgkih0Ye1s-8hrlBaUk-n5Kqwl4,800
10
10
  keras_hub/src/__init__.py,sha256=lY7spwqXeGX_75qOHiSCff7FPvFCvRamJMF5ua9OWCg,585
11
- keras_hub/src/api_export.py,sha256=agkICNX5rGcJy_Bj29vaNmhH3no9KqJBO-V3MaqR6HQ,2062
12
- keras_hub/src/version_utils.py,sha256=4-xR-e8seKXQoWJuaZyu1IxKkBNDAI-WbkioZF8KDPI,808
11
+ keras_hub/src/api_export.py,sha256=82JzmDgnWTJR-PRJI9L_vjhW2Svz8gilbE1NMGZ2JgA,2085
12
+ keras_hub/src/version_utils.py,sha256=n0vYCPXxWIvl26pl1OjjwU68CgOOokRrcKD05BhvVNY,808
13
13
  keras_hub/src/bounding_box/__init__.py,sha256=lY7spwqXeGX_75qOHiSCff7FPvFCvRamJMF5ua9OWCg,585
14
14
  keras_hub/src/bounding_box/converters.py,sha256=V2ti6xPpaBgeLKbTpCsHsABdYOYASerIKX9oWqeOjHo,18450
15
15
  keras_hub/src/bounding_box/formats.py,sha256=5bbHO-n2ADsKIOBJDHMvIPCeNBaV1_mj-NVCgBKNiu8,4453
@@ -40,7 +40,7 @@ keras_hub/src/layers/preprocessing/multi_segment_packer.py,sha256=0se5fOIz-2fMt4
40
40
  keras_hub/src/layers/preprocessing/preprocessing_layer.py,sha256=5jFBScsNWuYyokPt8mUoyYeOkKH9ZS7MkeC3j-nxYHU,1273
41
41
  keras_hub/src/layers/preprocessing/random_deletion.py,sha256=P4YkpDXgQnlXEgukk6V_iuIrRIQOOC9i8KMkpd7UDic,10349
42
42
  keras_hub/src/layers/preprocessing/random_swap.py,sha256=Wu6pNuQ1l_5VRGlRxcomrWyEnqYfA4PcK-mHNuvSjr0,10090
43
- keras_hub/src/layers/preprocessing/resizing_image_converter.py,sha256=xbDDbJUL2IJ7Zv-CWFH8qtNjvGDrsj4Kf2L3usohIC0,4282
43
+ keras_hub/src/layers/preprocessing/resizing_image_converter.py,sha256=P7KDWTGSnf40iUGUXhCkxx7A5kQMsTF1s3PxYkYxa6U,6440
44
44
  keras_hub/src/layers/preprocessing/start_end_packer.py,sha256=3IvVoOE-0kovt_8o2w-uVYEPFhGg-tmv3cwuJQu7VPc,8560
45
45
  keras_hub/src/metrics/__init__.py,sha256=lY7spwqXeGX_75qOHiSCff7FPvFCvRamJMF5ua9OWCg,585
46
46
  keras_hub/src/metrics/bleu.py,sha256=r0vROmLVVNjc1d9fwJgc64lwmhEXHNaNT1ed1h7Y0E0,14259
@@ -95,8 +95,8 @@ keras_hub/src/models/bloom/bloom_decoder.py,sha256=hSoeVnwRQvGbpVhYmf7-k8FB3Wg4a
95
95
  keras_hub/src/models/bloom/bloom_presets.py,sha256=7GiGFPmcXd_UraNsWGQffpzjKDRF-7nqIoUsic78xf0,4696
96
96
  keras_hub/src/models/bloom/bloom_tokenizer.py,sha256=ZMx8mHhw0D50zmmvYdmpg-Lk2GcvHz7pPlRpPlhS_2s,3161
97
97
  keras_hub/src/models/csp_darknet/__init__.py,sha256=lY7spwqXeGX_75qOHiSCff7FPvFCvRamJMF5ua9OWCg,585
98
- keras_hub/src/models/csp_darknet/csp_darknet_backbone.py,sha256=Zc3liZuKV-lgAKSAGGKZzsYyFRQwMFMI1qIkUGVUMBM,14718
99
- keras_hub/src/models/csp_darknet/csp_darknet_image_classifier.py,sha256=h74Q_VHaoSAkwBsDV-ZufN6fb9NFX2gDVk7AOvX-HUk,4388
98
+ keras_hub/src/models/csp_darknet/csp_darknet_backbone.py,sha256=h0eua1EZP0vBV416uOVMmMP1JXy7cVoEj0JEO0OO_lc,14312
99
+ keras_hub/src/models/csp_darknet/csp_darknet_image_classifier.py,sha256=qLav7bxuzB0oaNJLs8gIiQbQVFjAlteDT7WKRfKoSmk,4355
100
100
  keras_hub/src/models/deberta_v3/__init__.py,sha256=NCuHFWsgQl-Wer7w3xETvqFtF75AyKabjAYdOlyN34w,874
101
101
  keras_hub/src/models/deberta_v3/deberta_v3_backbone.py,sha256=_J-PpSLubay58YO51BicDK0bF97aUeoC21ZQOt1O9r0,7831
102
102
  keras_hub/src/models/deberta_v3/deberta_v3_masked_lm.py,sha256=urcktTsXN3kDWnppplnC8yISGx37qGW5HdwHSC7VDLE,4773
@@ -109,8 +109,8 @@ keras_hub/src/models/deberta_v3/disentangled_attention_encoder.py,sha256=Zt10UPx
109
109
  keras_hub/src/models/deberta_v3/disentangled_self_attention.py,sha256=MxpWy30h9JB8nlEk7V9_wETzP-tpv1Sd1Wiz_pHGpkI,13708
110
110
  keras_hub/src/models/deberta_v3/relative_embedding.py,sha256=QT5MAnheJ1wSKFeN49pdnZzWkztz5K2oYYuNEtB_5xM,3472
111
111
  keras_hub/src/models/densenet/__init__.py,sha256=lY7spwqXeGX_75qOHiSCff7FPvFCvRamJMF5ua9OWCg,585
112
- keras_hub/src/models/densenet/densenet_backbone.py,sha256=cMTTaI1WogaSjt8x8bpPMvApYp5NVmeHTfupUmZZ774,7661
113
- keras_hub/src/models/densenet/densenet_image_classifier.py,sha256=bmmkNNpxwkwfqI_ZMmoEATClmgmmkW6NO5tDK8BCt2Y,4336
112
+ keras_hub/src/models/densenet/densenet_backbone.py,sha256=BbTecC7gfigSC3t4L-kGsZHS7pjj8DtDIztyMxo_AoI,7238
113
+ keras_hub/src/models/densenet/densenet_image_classifier.py,sha256=eECPZKHycVHNbgFuBHyiZGPWBn0M_pBdLasjmroc95g,4303
114
114
  keras_hub/src/models/distil_bert/__init__.py,sha256=EiJUA3y_b22rMacMbBD7jD0eBSzR-wbVtF73k2RsQow,889
115
115
  keras_hub/src/models/distil_bert/distil_bert_backbone.py,sha256=ZW2OgNlWXeRlfI5BrcJLYr4Oc2qNJZoDxjoL7-cGuIQ,7027
116
116
  keras_hub/src/models/distil_bert/distil_bert_masked_lm.py,sha256=1BFS1At_HYlLK21VWyhQPrPtActpmR52A8LJG2c6N8Y,4862
@@ -120,7 +120,7 @@ keras_hub/src/models/distil_bert/distil_bert_text_classifier.py,sha256=Q-qGmyl6i
120
120
  keras_hub/src/models/distil_bert/distil_bert_text_classifier_preprocessor.py,sha256=sad3XpW2HfjG2iQ4JRm1tw2jp4pZCN4LYwF1mM4GUps,5480
121
121
  keras_hub/src/models/distil_bert/distil_bert_tokenizer.py,sha256=VK7kZJEbsClp20uWVb6pj-WSUU5IMdRBk0jyUIM_RIg,3698
122
122
  keras_hub/src/models/efficientnet/__init__.py,sha256=lY7spwqXeGX_75qOHiSCff7FPvFCvRamJMF5ua9OWCg,585
123
- keras_hub/src/models/efficientnet/efficientnet_backbone.py,sha256=krz5lgw5cPs2EyKArq99XnIfUeBVbkeq2PhPFADO04c,21841
123
+ keras_hub/src/models/efficientnet/efficientnet_backbone.py,sha256=i-K9kYwnl2Ninuebw6nNJ6X7D_4dvjMrV1Y9XAdt6I4,21392
124
124
  keras_hub/src/models/efficientnet/fusedmbconv.py,sha256=_6aNQKL2XdVNgoAdKvvTh_NDkWeU66q98EFUOjEQ1UM,7933
125
125
  keras_hub/src/models/efficientnet/mbconv.py,sha256=LNbEj7RpEZ0SqzEu-7ZpH1BKm6Ne2sXPckc5c2DMqUk,8212
126
126
  keras_hub/src/models/electra/__init__.py,sha256=ixE5hAkfTFfErqbYVyIUKMT8MUz-u_175QXxEBIiGBU,849
@@ -191,12 +191,12 @@ keras_hub/src/models/mistral/mistral_presets.py,sha256=uF1Q4zllcV1upIlqmn3gxhVWz
191
191
  keras_hub/src/models/mistral/mistral_tokenizer.py,sha256=pO7mpzYgRDFpIrsmLBL3zxkadrOE0xfFj30c2nHN42c,2591
192
192
  keras_hub/src/models/mistral/mistral_transformer_decoder.py,sha256=6CdaZt1lQ9VcLz_OoYroqiqvsZfq9H5VGaWab25aCRI,10127
193
193
  keras_hub/src/models/mix_transformer/__init__.py,sha256=lY7spwqXeGX_75qOHiSCff7FPvFCvRamJMF5ua9OWCg,585
194
- keras_hub/src/models/mix_transformer/mix_transformer_backbone.py,sha256=TYcQCAMTZedirh2L4z8LrjfhmxR2CoImzIvVXFTiTMc,6833
195
- keras_hub/src/models/mix_transformer/mix_transformer_classifier.py,sha256=QUTeq4f07nrCE-hIKoam_M6jJ6aM9l6s_At5sRTo0JY,4310
194
+ keras_hub/src/models/mix_transformer/mix_transformer_backbone.py,sha256=1OUWvrI4y5rzoOsQkB8ZqQqeg5DwFIWRY-IKgR5qDfA,6426
195
+ keras_hub/src/models/mix_transformer/mix_transformer_classifier.py,sha256=Kq-FIayi0yiJ1P4_AhwdBAC-vFnfhEK3FYlmBjw4jUc,4277
196
196
  keras_hub/src/models/mix_transformer/mix_transformer_layers.py,sha256=Bi4lHMfiKgI-XOt21BBfKoK05uU3GcDJ3mQrGfCXb6Y,10123
197
197
  keras_hub/src/models/mobilenet/__init__.py,sha256=lY7spwqXeGX_75qOHiSCff7FPvFCvRamJMF5ua9OWCg,585
198
- keras_hub/src/models/mobilenet/mobilenet_backbone.py,sha256=G02NFvx2xy2mbEBX6mtJzhPwygZDAhJ2TMk2ejAuLg0,19168
199
- keras_hub/src/models/mobilenet/mobilenet_image_classifier.py,sha256=Oo3URtyqjfnmsyO9uncxOVHO9Giv607LBJ3UE8pWacU,3794
198
+ keras_hub/src/models/mobilenet/mobilenet_backbone.py,sha256=Y950Yx4s5fTmVk7YTiMFiyqZLLuB75_iJaVbefznOwo,18776
199
+ keras_hub/src/models/mobilenet/mobilenet_image_classifier.py,sha256=35Px2z1E_ATSZIYNb_bXjJ6Qimbd2rnPi04S99ycTNg,3759
200
200
  keras_hub/src/models/opt/__init__.py,sha256=DiiylcsbseSQ8te8KWZ6BTIaKYSzXHUPGBgFssFNGFY,825
201
201
  keras_hub/src/models/opt/opt_backbone.py,sha256=cbm9I7d3QlGD8l2W1eK8esqc5gm77tpwxg4t9nC-FtA,6460
202
202
  keras_hub/src/models/opt/opt_causal_lm.py,sha256=z6M8cQV-c8q7HmikNA9RuvsMMvQYF21-ZcC0nVGfnp8,11438
@@ -211,7 +211,7 @@ keras_hub/src/models/pali_gemma/pali_gemma_decoder_block.py,sha256=fXLO4uHtWYTuE
211
211
  keras_hub/src/models/pali_gemma/pali_gemma_image_converter.py,sha256=Wm1A-HuOMxesAHFbEpP5ZkPbdDaVW5CTTwkyFpI-WdI,990
212
212
  keras_hub/src/models/pali_gemma/pali_gemma_presets.py,sha256=cG5cV2bkiDJlKDiHX76BpnClsY5PcmLDezDg7emeiA4,2986
213
213
  keras_hub/src/models/pali_gemma/pali_gemma_tokenizer.py,sha256=7F1TQql3DEN517iVbNL60u6fQPimrGQvWBYh16ng8JU,3000
214
- keras_hub/src/models/pali_gemma/pali_gemma_vit.py,sha256=JUfJuyobcEb60jp3sIxlq12gIH_qsn97h4hsecimipQ,19092
214
+ keras_hub/src/models/pali_gemma/pali_gemma_vit.py,sha256=GUMAuFcpoi0TxJk7LzsKp0Tt0c_83gx645cz26GqFzA,19271
215
215
  keras_hub/src/models/phi3/__init__.py,sha256=ENAOZhScWf9RbPmkiuICR5gr36ZMUn4AniLvJOrykj8,831
216
216
  keras_hub/src/models/phi3/phi3_attention.py,sha256=BcYApteLjbrCzube7jHVagc0mMpDCReRyvsQhQcJzY8,9828
217
217
  keras_hub/src/models/phi3/phi3_backbone.py,sha256=MvTE5bMmVpFHinZIEDBM1lfJFbgu4zg-0e-8_4hK-No,9470
@@ -223,11 +223,11 @@ keras_hub/src/models/phi3/phi3_presets.py,sha256=S7_gIqPxU5FQAEnAE_68UrfGGSLOMvo
223
223
  keras_hub/src/models/phi3/phi3_rotary_embedding.py,sha256=QVJIgpOw6iMicGrsPdW8eF84vV_stf0Tqm2qBJdsKH0,5597
224
224
  keras_hub/src/models/phi3/phi3_tokenizer.py,sha256=hlA-u2sTRYARDW3ABICPeiOYW1AJwr-5kvZk3EB5z7M,2577
225
225
  keras_hub/src/models/resnet/__init__.py,sha256=41gttaQ7gt_ZaqDa_GKuMPfIk5c88-GrdC1h9fBUTXc,843
226
- keras_hub/src/models/resnet/resnet_backbone.py,sha256=n9aKIpQcJCsAZrBiiN1vxUMHeQgYudRHdu_MsdRQZqw,33260
227
- keras_hub/src/models/resnet/resnet_image_classifier.py,sha256=I-dmx0O_ES3m3W5D4ICCux5zzDMZ2cM0vYGM9CDi5AE,5395
226
+ keras_hub/src/models/resnet/resnet_backbone.py,sha256=Qu2MuPBNYasQDD4zeY2rnUUqiEYRXqjbeXilcUdimkA,32451
227
+ keras_hub/src/models/resnet/resnet_image_classifier.py,sha256=4Ksxhp4kB93mbkjh7K-uKcCyEO4MtMazHN7VtUCL-wg,5362
228
228
  keras_hub/src/models/resnet/resnet_image_classifier_preprocessor.py,sha256=Vrs9NBZRL5fgDXXY27GZJg5xMa5_wovi8A2z8kFl2nc,1129
229
229
  keras_hub/src/models/resnet/resnet_image_converter.py,sha256=820drIU5Kkib7gC7T418mmrhsBHSkenfEiZ6-fkChv0,961
230
- keras_hub/src/models/resnet/resnet_presets.py,sha256=DZoufeJyrVDL4aHSztQNzZj8Cb_OGX53Fn0Ze4RuZCI,3550
230
+ keras_hub/src/models/resnet/resnet_presets.py,sha256=6y8R-PviAnEyh-LFli9uMUNku4cJC9V7YqOd9V5PlV0,3550
231
231
  keras_hub/src/models/retinanet/__init__.py,sha256=lY7spwqXeGX_75qOHiSCff7FPvFCvRamJMF5ua9OWCg,585
232
232
  keras_hub/src/models/retinanet/anchor_generator.py,sha256=VQwgIAWh-6s28TU8MHFdl556U6h7rfF9B9iVI_zwI7c,7027
233
233
  keras_hub/src/models/retinanet/box_matcher.py,sha256=SvGn_6d5sfjq522UaHpxVCE2S5Nwml_aj5yAKApTNE4,11420
@@ -259,10 +259,10 @@ keras_hub/src/models/t5/t5_presets.py,sha256=2RT_NuJcqDdSeAsoSJXh5O_ax2H-s4YKTAo
259
259
  keras_hub/src/models/t5/t5_tokenizer.py,sha256=UnmZjiKhyb4AU7zALW3YAM_6_OGzYOVEGStBiw4ICvg,3103
260
260
  keras_hub/src/models/t5/t5_transformer_layer.py,sha256=wnu108InkHH9YMmFNTbmgIqcrKQQUxeJ7S1dcjUfBSY,5933
261
261
  keras_hub/src/models/vgg/__init__.py,sha256=lY7spwqXeGX_75qOHiSCff7FPvFCvRamJMF5ua9OWCg,585
262
- keras_hub/src/models/vgg/vgg_backbone.py,sha256=dMXIGypDQdLztvbHz0JgSdTGXXAZj11vLxG5oHk4ZNw,5479
263
- keras_hub/src/models/vgg/vgg_image_classifier.py,sha256=1bH6E46yHxN5tey2Mc62U3l4_5mTZ40U00bws-c6wqE,4106
262
+ keras_hub/src/models/vgg/vgg_backbone.py,sha256=O6onZEduEPt1J4v2HFgtHsxu-SheqpUwY2pYoeLa6uE,5080
263
+ keras_hub/src/models/vgg/vgg_image_classifier.py,sha256=cDcmHoHU1BZ211JakGPw3Z9lV22oMmK8J4-Ng8S07G0,4071
264
264
  keras_hub/src/models/vit_det/__init__.py,sha256=lY7spwqXeGX_75qOHiSCff7FPvFCvRamJMF5ua9OWCg,585
265
- keras_hub/src/models/vit_det/vit_det_backbone.py,sha256=Tyw3xTOW1rlHV-copzotzpaoPLWU8nA-LtViUGGgSlw,8541
265
+ keras_hub/src/models/vit_det/vit_det_backbone.py,sha256=4b3CUk4zg8gjFJvDU-QJZP72CV8jqw3TnaoCzUC-vyo,8054
266
266
  keras_hub/src/models/vit_det/vit_layers.py,sha256=JeUzOT2jmSOoJ_OiHOfLSkkCUZ5mlK5Mfd21DwudRCQ,20436
267
267
  keras_hub/src/models/whisper/__init__.py,sha256=FI-xj6FwZDAAdCfKhOrE1_roQ8cXhD1gK4G6CLTvPQo,849
268
268
  keras_hub/src/models/whisper/whisper_audio_converter.py,sha256=JqtA2kLUMFKZ4FrI8g2piEjahE-0-F3Yp4qQXS1cYf4,8973
@@ -314,8 +314,8 @@ keras_hub/src/utils/tensor_utils.py,sha256=XpWORE8iUzHXv1E1akiYDep07ndZJRKvjsKVl
314
314
  keras_hub/src/utils/imagenet/__init__.py,sha256=AK2s8L-VARI5OmlT6G3vtlKIVyjwLfgVwXfxzhhSCq4,585
315
315
  keras_hub/src/utils/imagenet/imagenet_utils.py,sha256=0iHrAQbh5DCa9Dh7tJiQeJc7AGzNO7j0cFEWS2Of16w,39889
316
316
  keras_hub/src/utils/timm/__init__.py,sha256=lY7spwqXeGX_75qOHiSCff7FPvFCvRamJMF5ua9OWCg,585
317
- keras_hub/src/utils/timm/convert_resnet.py,sha256=hZNj_kpwSA9Jp3NRDHtCPzHFzRKKPnidKQUAoqcdENk,6810
318
- keras_hub/src/utils/timm/preset_loader.py,sha256=EgS5xBP3sWYiTgKmOAMmj3b3kRWcPnsWLieReLHZ178,2928
317
+ keras_hub/src/utils/timm/convert_resnet.py,sha256=X2N9lk8sqRMzOMXkcIThAu6ZEtw8u8_Y4Kol82iTuW4,6417
318
+ keras_hub/src/utils/timm/preset_loader.py,sha256=ac2PwGkfe-bikhQEFeIM25gDs3xk0E9SS5A1YEzZYQU,3602
319
319
  keras_hub/src/utils/transformers/__init__.py,sha256=lY7spwqXeGX_75qOHiSCff7FPvFCvRamJMF5ua9OWCg,585
320
320
  keras_hub/src/utils/transformers/convert_albert.py,sha256=7b9X1TLrWfHieoeX_K-EXTagkl4Rp9AfPjsPrwArBGY,8280
321
321
  keras_hub/src/utils/transformers/convert_bart.py,sha256=RXmPf_XUZrUyqDaOV9T7qVNEP4rAVR44oK1aRZI0v78,14996
@@ -328,7 +328,7 @@ keras_hub/src/utils/transformers/convert_mistral.py,sha256=4QStizMS6ESEPjSI-ls6j
328
328
  keras_hub/src/utils/transformers/convert_pali_gemma.py,sha256=BT5eX1QzbjCQCopbMstiejQQWQiB_N77bpD5FMUygEo,11234
329
329
  keras_hub/src/utils/transformers/preset_loader.py,sha256=9x9hLhDh_6PAHG5gay5rVoEVyt-gXTQGrnprjMLKvCM,3294
330
330
  keras_hub/src/utils/transformers/safetensor_utils.py,sha256=2O8lcCf9yIFt5xiRVOtF1ZkPb5pfhOfDJotBaanD9Zo,3547
331
- keras_hub_nightly-0.16.0.dev202409201942.dist-info/METADATA,sha256=veZ6kme7NK3BC_Hg4JywZPIyP66gWgsgas4PsvfXbkU,7061
332
- keras_hub_nightly-0.16.0.dev202409201942.dist-info/WHEEL,sha256=GV9aMThwP_4oNCtvEC2ec3qUYutgWeAzklro_0m4WJQ,91
333
- keras_hub_nightly-0.16.0.dev202409201942.dist-info/top_level.txt,sha256=N4J6piIWBKa38A4uV-CnIopnOEf8mHAbkNXafXm_CuA,10
334
- keras_hub_nightly-0.16.0.dev202409201942.dist-info/RECORD,,
331
+ keras_hub_nightly-0.16.1.dev202409210335.dist-info/METADATA,sha256=XDJP6zqkNwmPIjLkOEAHp9hh8z1JLqO09nMcGLlnmtU,7061
332
+ keras_hub_nightly-0.16.1.dev202409210335.dist-info/WHEEL,sha256=GV9aMThwP_4oNCtvEC2ec3qUYutgWeAzklro_0m4WJQ,91
333
+ keras_hub_nightly-0.16.1.dev202409210335.dist-info/top_level.txt,sha256=N4J6piIWBKa38A4uV-CnIopnOEf8mHAbkNXafXm_CuA,10
334
+ keras_hub_nightly-0.16.1.dev202409210335.dist-info/RECORD,,