keras-hub-nightly 0.16.1.dev202409270338__py3-none-any.whl → 0.16.1.dev202409290341__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,16 +1,3 @@
1
- # Copyright 2024 The KerasHub Authors
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # https://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
1
  from keras_hub.src.api_export import keras_hub_export
15
2
  from keras_hub.src.models.densenet.densenet_backbone import DenseNetBackbone
16
3
  from keras_hub.src.models.densenet.densenet_image_converter import (
@@ -1,16 +1,3 @@
1
- # Copyright 2024 The KerasHub Authors
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # https://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
1
  from keras_hub.src.api_export import keras_hub_export
15
2
  from keras_hub.src.layers.preprocessing.resizing_image_converter import (
16
3
  ResizingImageConverter,
@@ -1,16 +1,3 @@
1
- # Copyright 2024 The KerasHub Authors
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # https://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
1
  """DenseNet preset configurations."""
15
2
 
16
3
  backbone_presets = {
@@ -24,29 +24,31 @@ class AnchorGenerator(keras.layers.Layer):
24
24
  for larger objects.
25
25
 
26
26
  Args:
27
- bounding_box_format (str): The format of the bounding boxes
27
+ bounding_box_format: str. The format of the bounding boxes
28
28
  to be generated. Expected to be a string like 'xyxy', 'xywh', etc.
29
- min_level (int): Minimum level of the output feature pyramid.
30
- max_level (int): Maximum level of the output feature pyramid.
31
- num_scales (int): Number of intermediate scales added on each level.
29
+ min_level: int. Minimum level of the output feature pyramid.
30
+ max_level: int. Maximum level of the output feature pyramid.
31
+ num_scales: int. Number of intermediate scales added on each level.
32
32
  For example, num_scales=2 adds one additional intermediate anchor
33
33
  scale [2^0, 2^0.5] on each level.
34
- aspect_ratios (list of float): Aspect ratios of anchors added on
34
+ aspect_ratios: List[float]. Aspect ratios of anchors added on
35
35
  each level. Each number indicates the ratio of width to height.
36
- anchor_size (float): Scale of size of the base anchor relative to the
36
+ anchor_size: float. Scale of size of the base anchor relative to the
37
37
  feature stride 2^level.
38
38
 
39
39
  Call arguments:
40
- images (Optional[Tensor]): An image tensor with shape `[B, H, W, C]` or
41
- `[H, W, C]`. If provided, its shape will be used to determine anchor
40
+ inputs: An image tensor with shape `[B, H, W, C]` or
41
+ `[H, W, C]`. Its shape will be used to determine anchor
42
42
  sizes.
43
43
 
44
44
  Returns:
45
45
  Dict: A dictionary mapping feature levels
46
- (e.g., 'P3', 'P4', etc.) to anchor boxes. Each entry contains a tensor
47
- of shape `(H/stride * W/stride * num_anchors_per_location, 4)`,
48
- where H and W are the height and width of the image, stride is 2^level,
49
- and num_anchors_per_location is `num_scales * len(aspect_ratios)`.
46
+ (e.g., 'P3', 'P4', etc.) to anchor boxes. Each entry contains a
47
+ tensor of shape
48
+ `(H/stride * W/stride * num_anchors_per_location, 4)`,
49
+ where H and W are the height and width of the image,
50
+ stride is 2^level, and num_anchors_per_location is
51
+ `num_scales * len(aspect_ratios)`.
50
52
 
51
53
  Example:
52
54
  ```python
@@ -81,8 +83,8 @@ class AnchorGenerator(keras.layers.Layer):
81
83
  self.anchor_size = anchor_size
82
84
  self.built = True
83
85
 
84
- def call(self, images):
85
- images_shape = ops.shape(images)
86
+ def call(self, inputs):
87
+ images_shape = ops.shape(inputs)
86
88
  if len(images_shape) == 4:
87
89
  image_shape = images_shape[1:-1]
88
90
  else:
@@ -147,8 +149,18 @@ class AnchorGenerator(keras.layers.Layer):
147
149
 
148
150
  def compute_output_shape(self, input_shape):
149
151
  multilevel_boxes_shape = {}
150
- for level in range(self.min_level, self.max_level + 1):
151
- multilevel_boxes_shape[f"P{level}"] = (None, None, 4)
152
+ if len(input_shape) == 4:
153
+ image_height, image_width = input_shape[1:-1]
154
+ else:
155
+ image_height, image_width = input_shape[:-1]
156
+
157
+ for i in range(self.min_level, self.max_level + 1):
158
+ multilevel_boxes_shape[f"P{i}"] = (
159
+ (image_height // 2 ** (i))
160
+ * (image_width // 2 ** (i))
161
+ * self.anchors_per_location,
162
+ 4,
163
+ )
152
164
  return multilevel_boxes_shape
153
165
 
154
166
  @property
@@ -0,0 +1,373 @@
1
+ import keras
2
+
3
+
4
+ class FeaturePyramid(keras.layers.Layer):
5
+ """A Feature Pyramid Network (FPN) layer.
6
+
7
+ This implements the paper:
8
+ Tsung-Yi Lin, Piotr Dollar, Ross Girshick, Kaiming He, Bharath Hariharan,
9
+ and Serge Belongie. Feature Pyramid Networks for Object Detection.
10
+ (https://arxiv.org/pdf/1612.03144)
11
+
12
+ Feature Pyramid Networks (FPNs) are basic components that are added to an
13
+ existing feature extractor (CNN) to combine features at different scales.
14
+ For the basic FPN, the inputs are features `Ci` from different levels of a
15
+ CNN, which is usually the last block for each level, where the feature is
16
+ scaled from the image by a factor of `1/2^i`.
17
+
18
+ There is an output associated with each level in the basic FPN. The output
19
+ Pi at level `i` (corresponding to Ci) is given by performing a merge
20
+ operation on the outputs of:
21
+
22
+ 1) a lateral operation on Ci (usually a conv2D layer with kernel = 1 and
23
+ strides = 1)
24
+ 2) a top-down upsampling operation from Pi+1 (except for the top most level)
25
+
26
+ The final output of each level will also have a conv2D operation
27
+ (typically with kernel = 3 and strides = 1).
28
+
29
+ The inputs to the layer should be a dict with int keys should match the
30
+ pyramid_levels, e.g. for `pyramid_levels` = [3,4,5], the expected input
31
+ dict should be `{P3:c3, P4:c4, P5:c5}`.
32
+
33
+ The output of the layer will have same structures as the inputs, a dict with
34
+ extra coarser layers will be added based on the `max_level` provided.
35
+ keys and value for each of the level.
36
+
37
+ Args:
38
+ min_level: int. The minimum level of the feature pyramid.
39
+ max_level: int. The maximum level of the feature pyramid.
40
+ num_filters: int. The number of filters in each feature map.
41
+ activation: string or `keras.activations`. The activation function
42
+ to be used in network.
43
+ Defaults to `"relu"`.
44
+ kernel_initializer: `str` or `keras.initializers` initializer.
45
+ The kernel initializer for the convolution layers.
46
+ Defaults to `"VarianceScaling"`.
47
+ bias_initializer: `str` or `keras.initializers` initializer.
48
+ The bias initializer for the convolution layers.
49
+ Defaults to `"zeros"`.
50
+ batch_norm_momentum: float.
51
+ The momentum for the batch normalization layers.
52
+ Defaults to `0.99`.
53
+ batch_norm_epsilon: float.
54
+ The epsilon for the batch normalization layers.
55
+ Defaults to `0.001`.
56
+ kernel_regularizer: `str` or `keras.regularizers` regularizer.
57
+ The kernel regularizer for the convolution layers.
58
+ Defaults to `None`.
59
+ bias_regularizer: `str` or `keras.regularizers` regularizer.
60
+ The bias regularizer for the convolution layers.
61
+ Defaults to `None`.
62
+ use_batch_norm: bool. Whether to use batch normalization.
63
+ Defaults to `False`.
64
+ **kwargs: other keyword arguments passed to `keras.layers.Layer`,
65
+ including `name`, `trainable`, `dtype` etc.
66
+ """
67
+
68
+ def __init__(
69
+ self,
70
+ min_level,
71
+ max_level,
72
+ num_filters=256,
73
+ activation="relu",
74
+ kernel_initializer="VarianceScaling",
75
+ bias_initializer="zeros",
76
+ batch_norm_momentum=0.99,
77
+ batch_norm_epsilon=0.001,
78
+ kernel_regularizer=None,
79
+ bias_regularizer=None,
80
+ use_batch_norm=False,
81
+ **kwargs,
82
+ ):
83
+ super().__init__(**kwargs)
84
+ if min_level > max_level:
85
+ raise ValueError(
86
+ f"Minimum level ({min_level}) must be less than or equal to "
87
+ f"maximum level ({max_level})."
88
+ )
89
+ self.min_level = min_level
90
+ self.max_level = max_level
91
+ self.num_filters = num_filters
92
+ self.activation = keras.activations.get(activation)
93
+ self.kernel_initializer = keras.initializers.get(kernel_initializer)
94
+ self.bias_initializer = keras.initializers.get(bias_initializer)
95
+ self.batch_norm_momentum = batch_norm_momentum
96
+ self.batch_norm_epsilon = batch_norm_epsilon
97
+ self.use_batch_norm = use_batch_norm
98
+ if kernel_regularizer is not None:
99
+ self.kernel_regularizer = keras.regularizers.get(kernel_regularizer)
100
+ else:
101
+ self.kernel_regularizer = None
102
+ if bias_regularizer is not None:
103
+ self.bias_regularizer = keras.regularizers.get(bias_regularizer)
104
+ else:
105
+ self.bias_regularizer = None
106
+ self.data_format = keras.backend.image_data_format()
107
+ self.batch_norm_axis = -1 if self.data_format == "channels_last" else 1
108
+
109
+ def build(self, input_shapes):
110
+ input_shapes = {
111
+ (
112
+ input_name.split("_")[0]
113
+ if "shape" in input_name
114
+ else input_name
115
+ ): input_shapes[input_name]
116
+ for input_name in input_shapes
117
+ }
118
+ input_levels = [int(level[1]) for level in input_shapes]
119
+ backbone_max_level = min(max(input_levels), self.max_level)
120
+
121
+ # Build lateral layers
122
+ self.lateral_conv_layers = {}
123
+ for i in range(self.min_level, backbone_max_level + 1):
124
+ level = f"P{i}"
125
+ self.lateral_conv_layers[level] = keras.layers.Conv2D(
126
+ filters=self.num_filters,
127
+ kernel_size=1,
128
+ padding="same",
129
+ data_format=self.data_format,
130
+ kernel_initializer=self.kernel_initializer,
131
+ bias_initializer=self.bias_initializer,
132
+ kernel_regularizer=self.kernel_regularizer,
133
+ bias_regularizer=self.bias_regularizer,
134
+ dtype=self.dtype_policy,
135
+ name=f"lateral_conv_{level}",
136
+ )
137
+ self.lateral_conv_layers[level].build(input_shapes[level])
138
+
139
+ self.lateral_batch_norm_layers = {}
140
+ if self.use_batch_norm:
141
+ for i in range(self.min_level, backbone_max_level + 1):
142
+ level = f"P{i}"
143
+ self.lateral_batch_norm_layers[level] = (
144
+ keras.layers.BatchNormalization(
145
+ axis=self.batch_norm_axis,
146
+ momentum=self.batch_norm_epsilon,
147
+ epsilon=self.batch_norm_epsilon,
148
+ name=f"lateral_norm_{level}",
149
+ )
150
+ )
151
+ self.lateral_batch_norm_layers[level].build(
152
+ (None, None, None, 256)
153
+ if self.data_format == "channels_last"
154
+ else (None, 256, None, None)
155
+ )
156
+
157
+ # Build output layers
158
+ self.output_conv_layers = {}
159
+ for i in range(self.min_level, backbone_max_level + 1):
160
+ level = f"P{i}"
161
+ self.output_conv_layers[level] = keras.layers.Conv2D(
162
+ filters=self.num_filters,
163
+ kernel_size=3,
164
+ padding="same",
165
+ data_format=self.data_format,
166
+ kernel_initializer=self.kernel_initializer,
167
+ bias_initializer=self.bias_initializer,
168
+ kernel_regularizer=self.kernel_regularizer,
169
+ bias_regularizer=self.bias_regularizer,
170
+ dtype=self.dtype_policy,
171
+ name=f"output_conv_{level}",
172
+ )
173
+ self.output_conv_layers[level].build(
174
+ (None, None, None, 256)
175
+ if self.data_format == "channels_last"
176
+ else (None, 256, None, None)
177
+ )
178
+
179
+ # Build coarser layers
180
+ for i in range(backbone_max_level + 1, self.max_level + 1):
181
+ level = f"P{i}"
182
+ self.output_conv_layers[level] = keras.layers.Conv2D(
183
+ filters=self.num_filters,
184
+ strides=2,
185
+ kernel_size=3,
186
+ padding="same",
187
+ data_format=self.data_format,
188
+ kernel_initializer=self.kernel_initializer,
189
+ bias_initializer=self.bias_initializer,
190
+ kernel_regularizer=self.kernel_regularizer,
191
+ bias_regularizer=self.bias_regularizer,
192
+ dtype=self.dtype_policy,
193
+ name=f"coarser_{level}",
194
+ )
195
+ self.output_conv_layers[level].build(
196
+ (None, None, None, 256)
197
+ if self.data_format == "channels_last"
198
+ else (None, 256, None, None)
199
+ )
200
+
201
+ # Build batch norm layers
202
+ self.output_batch_norms = {}
203
+ if self.use_batch_norm:
204
+ for i in range(self.min_level, self.max_level + 1):
205
+ level = f"P{i}"
206
+ self.output_batch_norms[level] = (
207
+ keras.layers.BatchNormalization(
208
+ axis=self.batch_norm_axis,
209
+ momentum=self.batch_norm_epsilon,
210
+ epsilon=self.batch_norm_epsilon,
211
+ name=f"output_norm_{level}",
212
+ )
213
+ )
214
+ self.output_batch_norms[level].build(
215
+ (None, None, None, 256)
216
+ if self.data_format == "channels_last"
217
+ else (None, 256, None, None)
218
+ )
219
+
220
+ # The same upsampling layer is used for all levels
221
+ self.top_down_op = keras.layers.UpSampling2D(
222
+ size=2,
223
+ data_format=self.data_format,
224
+ dtype=self.dtype_policy,
225
+ name="upsampling",
226
+ )
227
+ # The same merge layer is used for all levels
228
+ self.merge_op = keras.layers.Add(
229
+ dtype=self.dtype_policy, name="merge_op"
230
+ )
231
+
232
+ self.built = True
233
+
234
+ def call(self, inputs):
235
+ """
236
+ Inputs:
237
+ The input to the model is expected to be an `Dict[Tensors]`,
238
+ containing the feature maps on top of which the FPN
239
+ will be added.
240
+
241
+ Outputs:
242
+ A dictionary of feature maps and added coarser levels based
243
+ on minimum and maximum levels provided to the layer.
244
+ """
245
+
246
+ output_features = {}
247
+
248
+ # Get the backbone max level
249
+ input_levels = [int(level[1]) for level in inputs]
250
+ backbone_max_level = min(max(input_levels), self.max_level)
251
+
252
+ for i in range(backbone_max_level, self.min_level - 1, -1):
253
+ level = f"P{i}"
254
+ output = self.lateral_conv_layers[level](inputs[level])
255
+ if i < backbone_max_level:
256
+ # for the top most output, it doesn't need to merge with any
257
+ # upper stream outputs
258
+ upstream_output = self.top_down_op(output_features[f"P{i+1}"])
259
+ output = self.merge_op([output, upstream_output])
260
+ output_features[level] = (
261
+ self.lateral_batch_norm_layers[level](output)
262
+ if self.use_batch_norm
263
+ else output
264
+ )
265
+
266
+ # Post apply the output layers so that we don't leak them to the down
267
+ # stream level
268
+ for i in range(backbone_max_level, self.min_level - 1, -1):
269
+ level = f"P{i}"
270
+ output_features[level] = self.output_conv_layers[level](
271
+ output_features[level]
272
+ )
273
+
274
+ for i in range(backbone_max_level + 1, self.max_level + 1):
275
+ level = f"P{i}"
276
+ feats_in = output_features[f"P{i-1}"]
277
+ if i > backbone_max_level + 1:
278
+ feats_in = self.activation(feats_in)
279
+ output_features[level] = (
280
+ self.output_batch_norms[level](
281
+ self.output_conv_layers[level](feats_in)
282
+ )
283
+ if self.use_batch_norm
284
+ else self.output_conv_layers[level](feats_in)
285
+ )
286
+
287
+ return output_features
288
+
289
+ def get_config(self):
290
+ config = super().get_config()
291
+ config.update(
292
+ {
293
+ "min_level": self.min_level,
294
+ "max_level": self.max_level,
295
+ "num_filters": self.num_filters,
296
+ "use_batch_norm": self.use_batch_norm,
297
+ "activation": keras.activations.serialize(self.activation),
298
+ "kernel_initializer": keras.initializers.serialize(
299
+ self.kernel_initializer
300
+ ),
301
+ "bias_initializer": keras.initializers.serialize(
302
+ self.bias_initializer
303
+ ),
304
+ "batch_norm_momentum": self.batch_norm_momentum,
305
+ "batch_norm_epsilon": self.batch_norm_epsilon,
306
+ "kernel_regularizer": (
307
+ keras.regularizers.serialize(self.kernel_regularizer)
308
+ if self.kernel_regularizer is not None
309
+ else None
310
+ ),
311
+ "bias_regularizer": (
312
+ keras.regularizers.serialize(self.bias_regularizer)
313
+ if self.bias_regularizer is not None
314
+ else None
315
+ ),
316
+ }
317
+ )
318
+
319
+ return config
320
+
321
+ def compute_output_shape(self, input_shapes):
322
+ output_shape = {}
323
+ print(input_shapes)
324
+ input_levels = [int(level[1]) for level in input_shapes]
325
+ backbone_max_level = min(max(input_levels), self.max_level)
326
+
327
+ for i in range(self.min_level, backbone_max_level + 1):
328
+ level = f"P{i}"
329
+ if self.data_format == "channels_last":
330
+ output_shape[level] = input_shapes[level][:-1] + (256,)
331
+ else:
332
+ output_shape[level] = (
333
+ input_shapes[level][0],
334
+ 256,
335
+ ) + input_shapes[level][1:3]
336
+
337
+ intermediate_shape = input_shapes[f"P{backbone_max_level}"]
338
+ intermediate_shape = (
339
+ (
340
+ intermediate_shape[0],
341
+ intermediate_shape[1] // 2,
342
+ intermediate_shape[2] // 2,
343
+ 256,
344
+ )
345
+ if self.data_format == "channels_last"
346
+ else (
347
+ intermediate_shape[0],
348
+ 256,
349
+ intermediate_shape[1] // 2,
350
+ intermediate_shape[2] // 2,
351
+ )
352
+ )
353
+
354
+ for i in range(backbone_max_level + 1, self.max_level + 1):
355
+ level = f"P{i}"
356
+ output_shape[level] = intermediate_shape
357
+ intermediate_shape = (
358
+ (
359
+ intermediate_shape[0],
360
+ intermediate_shape[1] // 2,
361
+ intermediate_shape[2] // 2,
362
+ 256,
363
+ )
364
+ if self.data_format == "channels_last"
365
+ else (
366
+ intermediate_shape[0],
367
+ 256,
368
+ intermediate_shape[1] // 2,
369
+ intermediate_shape[2] // 2,
370
+ )
371
+ )
372
+
373
+ return output_shape
@@ -0,0 +1,270 @@
1
+ import keras
2
+ from keras import ops
3
+
4
+ from keras_hub.src.bounding_box.converters import _encode_box_to_deltas
5
+ from keras_hub.src.bounding_box.iou import compute_iou
6
+ from keras_hub.src.models.retinanet.anchor_generator import AnchorGenerator
7
+ from keras_hub.src.models.retinanet.box_matcher import BoxMatcher
8
+ from keras_hub.src.utils import tensor_utils
9
+
10
+
11
+ class RetinaNetLabelEncoder(keras.layers.Layer):
12
+ """Transforms the raw labels into targets for training.
13
+
14
+ RetinaNet is a single-stage object detection network that uses a feature
15
+ pyramid network and focal loss. This class is crucial for preparing the
16
+ ground truth data to match the network's anchor-based detection approach.
17
+
18
+ This class generates targets for a batch of samples which consists of input
19
+ images, bounding boxes for the objects present, and their class ids. It
20
+ matches ground truth boxes to anchor boxes based on IoU (Intersection over
21
+ Union) and encodes the box coordinates as offsets from the anchors.
22
+
23
+ Targets are always represented in 'center_yxwh' format for numerical
24
+ consistency during training, regardless of the input format.
25
+
26
+ Args:
27
+ bounding_box_format: str. The format of bounding boxes of input dataset.
28
+ Refer TODO: Add link to Keras Core Docs.
29
+ min_level: int. Minimum level of the output feature pyramid.
30
+ max_level: int. Maximum level of the output feature pyramid.
31
+ num_scales: int. Number of intermediate scales added on each level.
32
+ For example, num_scales=2 adds one additional intermediate anchor
33
+ scale [2^0, 2^0.5] on each level.
34
+ aspect_ratios: List[float]. Aspect ratios of anchors added on
35
+ each level. Each number indicates the ratio of width to height.
36
+ anchor_size: float. Scale of size of the base anchor relative to the
37
+ feature stride 2^level.
38
+ positive_threshold: float. the threshold to set an anchor to positive
39
+ match to gt box. Values above it are positive matches.
40
+ Defaults to `0.5`
41
+ negative_threshold: float. the threshold to set an anchor to negative
42
+ match to gt box. Values below it are negative matches.
43
+ Defaults to `0.4`
44
+ box_variance: List[float]. The scaling factors used to scale the
45
+ bounding box targets.
46
+ Defaults to `[0.1, 0.1, 0.2, 0.2]`.
47
+ background_class: int. The class ID used for the background class,
48
+ Defaults to `-1`.
49
+ ignore_class: int. The class ID used for the ignore class,
50
+ Defaults to `-2`.
51
+ box_matcher_match_values: List[int]. Representing
52
+ matched results (e.g. positive or negative or ignored match).
53
+ `len(match_values)` must equal to `len(thresholds) + 1`.
54
+ Defaults to `[-1, -2, -1]`.
55
+ box_matcher_force_match_for_each_col: bool. If True, each column
56
+ (ground truth box) will be matched to at least one row (anchor box).
57
+ This means some columns may be matched to multiple rows while others
58
+ may not be matched to any.
59
+ Defaults to `False`.
60
+
61
+ Note: `tf.RaggedTensor` are not supported.
62
+ """
63
+
64
+ def __init__(
65
+ self,
66
+ bounding_box_format,
67
+ min_level,
68
+ max_level,
69
+ num_scales,
70
+ aspect_ratios,
71
+ anchor_size,
72
+ positive_threshold=0.5,
73
+ negative_threshold=0.4,
74
+ box_variance=[0.1, 0.1, 0.2, 0.2],
75
+ background_class=-1.0,
76
+ ignore_class=-2.0,
77
+ box_matcher_match_values=[-1, -2, 1],
78
+ box_matcher_force_match_for_each_col=False,
79
+ **kwargs,
80
+ ):
81
+ super().__init__(**kwargs)
82
+ self.bounding_box_format = bounding_box_format
83
+ self.min_level = min_level
84
+ self.max_level = max_level
85
+ self.num_scales = num_scales
86
+ self.aspect_ratios = aspect_ratios
87
+ self.anchor_size = anchor_size
88
+ self.positive_threshold = positive_threshold
89
+ self.box_variance = box_variance
90
+ self.negative_threshold = negative_threshold
91
+ self.background_class = background_class
92
+ self.ignore_class = ignore_class
93
+
94
+ self.anchor_generator = AnchorGenerator(
95
+ bounding_box_format=bounding_box_format,
96
+ min_level=min_level,
97
+ max_level=max_level,
98
+ num_scales=num_scales,
99
+ aspect_ratios=aspect_ratios,
100
+ anchor_size=anchor_size,
101
+ )
102
+
103
+ self.box_matcher = BoxMatcher(
104
+ thresholds=[negative_threshold, positive_threshold],
105
+ match_values=box_matcher_match_values,
106
+ force_match_for_each_col=box_matcher_force_match_for_each_col,
107
+ )
108
+
109
+ def build(self, images_shape, gt_boxes_shape, gt_classes_shape):
110
+ self.built = True
111
+
112
+ def call(self, images, gt_boxes, gt_classes):
113
+ """Creates box and classification targets for a batch.
114
+
115
+ Args:
116
+ images: A Tensor. The input images argument should be
117
+ of shape `[B, H, W, C]` or `[B, C, H, W]`.
118
+ gt_boxes: A Tensor with shape of `[B, num_boxes, 4]`.
119
+ gt_labels: A Tensor with shape of `[B, num_boxes, num_classes]`
120
+
121
+ Returns:
122
+ box_targets: A Tensor of shape `[batch_size, num_anchors, 4]`
123
+ containing the encoded box targets.
124
+ class_targets: A Tensor of shape `[batch_size, num_anchors, 1]`
125
+ containing the class targets for each anchor.
126
+ """
127
+
128
+ images_shape = ops.shape(images)
129
+ if len(images_shape) != 4:
130
+ raise ValueError(
131
+ "`RetinaNetLabelEncoder`'s `call()` method does not "
132
+ "support unbatched inputs for the `images` argument. "
133
+ f"Received `shape(images)={images_shape}`."
134
+ )
135
+ image_shape = images_shape[1:]
136
+
137
+ if len(ops.shape(gt_classes)) == 2:
138
+ gt_classes = ops.expand_dims(gt_classes, axis=-1)
139
+
140
+ anchor_boxes = self.anchor_generator(images)
141
+ anchor_boxes = ops.concatenate(list(anchor_boxes.values()), axis=0)
142
+
143
+ box_targets, class_targets = self._encode_sample(
144
+ gt_boxes, gt_classes, anchor_boxes, image_shape
145
+ )
146
+ box_targets = ops.reshape(
147
+ box_targets, (-1, ops.shape(box_targets)[1], 4)
148
+ )
149
+ return box_targets, class_targets
150
+
151
+ def _encode_sample(self, gt_boxes, gt_classes, anchor_boxes, image_shape):
152
+ """Creates box and classification targets for a batched sample.
153
+
154
+ Matches ground truth boxes to anchor boxes based on IOU.
155
+ 1. Calculates the pairwise IOU for the M `anchor_boxes` and N `gt_boxes`
156
+ to get a `(M, N)` shaped matrix.
157
+ 2. The ground truth box with the maximum IOU in each row is assigned to
158
+ the anchor box provided the IOU is greater than `match_iou`.
159
+ 3. If the maximum IOU in a row is less than `ignore_iou`, the anchor
160
+ box is assigned with the background class.
161
+ 4. The remaining anchor boxes that do not have any class assigned are
162
+ ignored during training.
163
+
164
+ Args:
165
+ gt_boxes: A Tensor of shape `[B, num_boxes, 4]`. Should be in
166
+ `bounding_box_format`.
167
+ gt_classes: A Tensor fo shape `[B, num_boxes, num_classes, 1]`.
168
+ anchor_boxes: A Tensor with the shape `[total_anchors, 4]`
169
+ representing all the anchor boxes for a given input image shape,
170
+ where each anchor box is of the format `[x, y, width, height]`.
171
+ image_shape: Tuple indicating the image shape `[H, W, C]`.
172
+
173
+ Returns:
174
+ Encoded boudning boxes in the format of `center_yxwh` and
175
+ corresponding labels for each encoded bounding box.
176
+ """
177
+
178
+ iou_matrix = compute_iou(
179
+ anchor_boxes,
180
+ gt_boxes,
181
+ bounding_box_format=self.bounding_box_format,
182
+ image_shape=image_shape,
183
+ )
184
+
185
+ matched_gt_idx, matched_vals = self.box_matcher(iou_matrix)
186
+ matched_vals = ops.expand_dims(matched_vals, axis=-1)
187
+ positive_mask = ops.cast(ops.equal(matched_vals, 1), self.dtype)
188
+ ignore_mask = ops.cast(ops.equal(matched_vals, -2), self.dtype)
189
+
190
+ matched_gt_boxes = tensor_utils.target_gather(gt_boxes, matched_gt_idx)
191
+
192
+ matched_gt_boxes = ops.reshape(
193
+ matched_gt_boxes, (-1, ops.shape(matched_gt_boxes)[1], 4)
194
+ )
195
+
196
+ box_target = _encode_box_to_deltas(
197
+ anchors=anchor_boxes,
198
+ boxes=matched_gt_boxes,
199
+ anchor_format=self.bounding_box_format,
200
+ box_format=self.bounding_box_format,
201
+ variance=self.box_variance,
202
+ image_shape=image_shape,
203
+ )
204
+
205
+ matched_gt_cls_ids = tensor_utils.target_gather(
206
+ gt_classes, matched_gt_idx
207
+ )
208
+ cls_target = ops.where(
209
+ ops.not_equal(positive_mask, 1.0),
210
+ self.background_class,
211
+ matched_gt_cls_ids,
212
+ )
213
+ cls_target = ops.where(
214
+ ops.equal(ignore_mask, 1.0), self.ignore_class, cls_target
215
+ )
216
+ label = ops.concatenate(
217
+ [box_target, ops.cast(cls_target, box_target.dtype)], axis=-1
218
+ )
219
+
220
+ # In the case that a box in the corner of an image matches with an all
221
+ # -1 box that is outside the image, we should assign the box to the
222
+ # ignore class. There are rare cases where a -1 box can be matched,
223
+ # resulting in a NaN during training. The unit test passing all -1s to
224
+ # the label encoder ensures that we properly handle this edge-case.
225
+ label = ops.where(
226
+ ops.expand_dims(ops.any(ops.isnan(label), axis=-1), axis=-1),
227
+ self.ignore_class,
228
+ label,
229
+ )
230
+
231
+ return label[:, :, :4], label[:, :, 4]
232
+
233
+ def get_config(self):
234
+ config = super().get_config()
235
+ config.update(
236
+ {
237
+ "bounding_box_format": self.bounding_box_format,
238
+ "min_level": self.min_level,
239
+ "max_level": self.max_level,
240
+ "num_scales": self.num_scales,
241
+ "aspect_ratios": self.aspect_ratios,
242
+ "anchor_size": self.anchor_size,
243
+ "positive_threshold": self.positive_threshold,
244
+ "box_variance": self.box_variance,
245
+ "negative_threshold": self.negative_threshold,
246
+ "background_class": self.background_class,
247
+ "ignore_class": self.ignore_class,
248
+ }
249
+ )
250
+ return config
251
+
252
+ def compute_output_shape(
253
+ self, images_shape, gt_boxes_shape, gt_classes_shape
254
+ ):
255
+ min_level = self.anchor_generator.min_level
256
+ max_level = self.anchor_generator.max_level
257
+ batch_size, image_H, image_W = images_shape[:-1]
258
+
259
+ total_num_anchors = 0
260
+ for i in range(min_level, max_level + 1):
261
+ total_num_anchors += (
262
+ (image_H // 2 ** (i))
263
+ * (image_W // 2 ** (i))
264
+ * self.anchor_generator.anchors_per_location
265
+ )
266
+
267
+ return (batch_size, total_num_anchors, 4), (
268
+ batch_size,
269
+ total_num_anchors,
270
+ )
@@ -1,16 +1,3 @@
1
- # Copyright 2024 The KerasHub Authors
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # https://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
1
  """StableDiffusion3 preset configurations."""
15
2
 
16
3
  backbone_presets = {
@@ -13,6 +13,7 @@ from keras import tree
13
13
  from keras_hub.src.layers.modeling.reversible_embedding import (
14
14
  ReversibleEmbedding,
15
15
  )
16
+ from keras_hub.src.models.retinanet.feature_pyramid import FeaturePyramid
16
17
  from keras_hub.src.tokenizers.tokenizer import Tokenizer
17
18
  from keras_hub.src.utils.keras_utils import has_quantization_support
18
19
  from keras_hub.src.utils.tensor_utils import is_float_dtype
@@ -127,7 +128,10 @@ class TestCase(tf.test.TestCase, parameterized.TestCase):
127
128
 
128
129
  def call(self, x):
129
130
  if isinstance(x, dict):
130
- return self.layer(**x)
131
+ if isinstance(layer, FeaturePyramid):
132
+ return self.layer(x)
133
+ else:
134
+ return self.layer(**x)
131
135
  else:
132
136
  return self.layer(x)
133
137
 
@@ -147,7 +151,10 @@ class TestCase(tf.test.TestCase, parameterized.TestCase):
147
151
  layer = cls(**init_kwargs)
148
152
  if isinstance(input_data, dict):
149
153
  shapes = {k + "_shape": v.shape for k, v in input_data.items()}
150
- layer.build(**shapes)
154
+ if isinstance(layer, FeaturePyramid):
155
+ layer.build(shapes)
156
+ else:
157
+ layer.build(**shapes)
151
158
  else:
152
159
  layer.build(input_data.shape)
153
160
  run_build_asserts(layer)
@@ -158,7 +165,10 @@ class TestCase(tf.test.TestCase, parameterized.TestCase):
158
165
  )
159
166
  layer = cls(**init_kwargs)
160
167
  if isinstance(keras_tensor_inputs, dict):
161
- keras_tensor_outputs = layer(**keras_tensor_inputs)
168
+ if isinstance(layer, FeaturePyramid):
169
+ keras_tensor_outputs = layer(keras_tensor_inputs)
170
+ else:
171
+ keras_tensor_outputs = layer(**keras_tensor_inputs)
162
172
  else:
163
173
  keras_tensor_outputs = layer(keras_tensor_inputs)
164
174
  run_build_asserts(layer)
@@ -167,7 +177,10 @@ class TestCase(tf.test.TestCase, parameterized.TestCase):
167
177
  # Eager call test and compiled training test.
168
178
  layer = cls(**init_kwargs)
169
179
  if isinstance(input_data, dict):
170
- output_data = layer(**input_data)
180
+ if isinstance(layer, FeaturePyramid):
181
+ output_data = layer(input_data)
182
+ else:
183
+ output_data = layer(**input_data)
171
184
  else:
172
185
  output_data = layer(input_data)
173
186
  run_output_asserts(layer, output_data, eager=True)
@@ -305,8 +318,12 @@ class TestCase(tf.test.TestCase, parameterized.TestCase):
305
318
  output_data = layer(input_data)
306
319
  output_spec = layer.compute_output_spec(input_data)
307
320
  elif isinstance(input_data, dict):
308
- output_data = layer(**input_data)
309
- output_spec = layer.compute_output_spec(**input_data)
321
+ if isinstance(layer, FeaturePyramid):
322
+ output_data = layer(input_data)
323
+ output_spec = layer.compute_output_spec(input_data)
324
+ else:
325
+ output_data = layer(**input_data)
326
+ output_spec = layer.compute_output_spec(**input_data)
310
327
  else:
311
328
  output_data = layer(input_data)
312
329
  output_spec = layer.compute_output_spec(input_data)
@@ -308,3 +308,109 @@ def any_equal(inputs, values, padding_mask):
308
308
  output = ops.logical_or(output, value_equality)
309
309
 
310
310
  return ops.logical_and(output, padding_mask)
311
+
312
+
313
+ def target_gather(
314
+ targets,
315
+ indices,
316
+ mask=None,
317
+ mask_val=0.0,
318
+ ):
319
+ """A utility function wrapping `ops.take`, which deals with:
320
+ 1) both batched and unbatched `targets`.
321
+ 2) when unbatched `targets` have empty rows, the result will be filled
322
+ with `mask_val`.
323
+ 3) target masking.
324
+
325
+ Args:
326
+ targets: `[N, ...]` or `[batch_size, N, ...]` Tensor representing
327
+ targets such as boxes, keypoints, etc.
328
+ indices: [M] or [batch_size, M] int32 Tensor representing indices within
329
+ `targets` to gather.
330
+ mask: `[M, ...]` or `[batch_size, M, ...]` boolean Tensor
331
+ representing the masking for each target. `True` means the
332
+ corresponding entity should be masked to `mask_val`, `False`
333
+ means the corresponding entity should be the target value.
334
+ Defaults to `None`.
335
+ mask_val: float. representing the masking value if `mask` is True
336
+ on the entity.
337
+ Defaults to `0.0`
338
+
339
+ Returns:
340
+ targets: `[M, ...]` or `[batch_size, M, ...]` Tensor representing
341
+ selected targets.
342
+
343
+ Raise:
344
+ ValueError: If `targets` is higher than rank 3.
345
+ """
346
+ targets_shape = list(targets.shape)
347
+ if len(targets_shape) > 3:
348
+ raise ValueError(
349
+ f"`target_gather` does not support `targets` with rank "
350
+ f"larger than 3, got {len(targets.shape)}"
351
+ )
352
+
353
+ def gather_unbatched(labels, match_indices, mask, mask_val):
354
+ """Gather based on unbatched labels and boxes."""
355
+ num_gt_boxes = labels.shape[0]
356
+
357
+ def assign_when_rows_empty():
358
+ if len(labels.shape) > 1:
359
+ mask_shape = [match_indices.shape[0], labels.shape[-1]]
360
+ else:
361
+ mask_shape = [match_indices.shape[0]]
362
+ return ops.cast(mask_val, labels.dtype) * ops.ones(
363
+ mask_shape, dtype=labels.dtype
364
+ )
365
+
366
+ def assign_when_rows_not_empty():
367
+ targets = ops.take(labels, match_indices, axis=0)
368
+ if mask is None:
369
+ return targets
370
+ else:
371
+ masked_targets = ops.cast(
372
+ mask_val, labels.dtype
373
+ ) * ops.ones_like(mask, dtype=labels.dtype)
374
+ return ops.where(mask, masked_targets, targets)
375
+
376
+ if num_gt_boxes > 0:
377
+ return assign_when_rows_not_empty()
378
+ else:
379
+ return assign_when_rows_empty()
380
+
381
+ def _gather_batched(labels, match_indices, mask, mask_val):
382
+ """Gather based on batched labels."""
383
+ batch_size = labels.shape[0]
384
+ if batch_size == 1:
385
+ if mask is not None:
386
+ result = gather_unbatched(
387
+ ops.squeeze(labels, axis=0),
388
+ ops.squeeze(match_indices, axis=0),
389
+ ops.squeeze(mask, axis=0),
390
+ mask_val,
391
+ )
392
+ else:
393
+ result = gather_unbatched(
394
+ ops.squeeze(labels, axis=0),
395
+ ops.squeeze(match_indices, axis=0),
396
+ None,
397
+ mask_val,
398
+ )
399
+ return ops.expand_dims(result, axis=0)
400
+ else:
401
+ targets = ops.take_along_axis(
402
+ labels, ops.expand_dims(match_indices, axis=-1), axis=1
403
+ )
404
+
405
+ if mask is None:
406
+ return targets
407
+ else:
408
+ masked_targets = ops.cast(
409
+ mask_val, labels.dtype
410
+ ) * ops.ones_like(mask, dtype=labels.dtype)
411
+ return ops.where(mask, masked_targets, targets)
412
+
413
+ if len(targets_shape) <= 2:
414
+ return gather_unbatched(targets, indices, mask, mask_val)
415
+ elif len(targets_shape) == 3:
416
+ return _gather_batched(targets, indices, mask, mask_val)
@@ -1,16 +1,3 @@
1
- # Copyright 2024 The KerasHUB Authors
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # https://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
1
  import numpy as np
15
2
 
16
3
  from keras_hub.src.models.densenet.densenet_backbone import DenseNetBackbone
@@ -1,7 +1,7 @@
1
1
  from keras_hub.src.api_export import keras_hub_export
2
2
 
3
3
  # Unique source of truth for the version number.
4
- __version__ = "0.16.1.dev202409270338"
4
+ __version__ = "0.16.1.dev202409290341"
5
5
 
6
6
 
7
7
  @keras_hub_export("keras_hub.version")
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: keras-hub-nightly
3
- Version: 0.16.1.dev202409270338
3
+ Version: 0.16.1.dev202409290341
4
4
  Summary: Industry-strength Natural Language Processing extensions for Keras.
5
5
  Home-page: https://github.com/keras-team/keras-hub
6
6
  Author: Keras team
@@ -9,7 +9,7 @@ keras_hub/api/tokenizers/__init__.py,sha256=_f-r_cyUM2fjBB7iO84ThOdqqsAxHNIewJ2E
9
9
  keras_hub/api/utils/__init__.py,sha256=Gp1E6gG-RtKQS3PBEQEOz9PQvXkXaJ0ySGMqZ7myN7A,215
10
10
  keras_hub/src/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
11
11
  keras_hub/src/api_export.py,sha256=9pQZK27JObxWZ96QPLBp1OBsjWigh1iuV6RglPGMRk0,1499
12
- keras_hub/src/version_utils.py,sha256=lQWO4vCCgOyriN1VAz3HplTFidXjzWlTLLiS7pKSG4U,222
12
+ keras_hub/src/version_utils.py,sha256=GxYmS5YH-wnfPE281Ma_mqC_6TDvSlrBelngquLu2LQ,222
13
13
  keras_hub/src/bounding_box/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
14
14
  keras_hub/src/bounding_box/converters.py,sha256=a5po8DBm87oz2EXfi-0uEZHCMlCJPIb4-MaZIdYx3Dg,17865
15
15
  keras_hub/src/bounding_box/formats.py,sha256=YmskOz2BOSat7NaE__J9VfpSNGPJJR0znSzA4lp8MMI,3868
@@ -118,9 +118,9 @@ keras_hub/src/models/deberta_v3/relative_embedding.py,sha256=3WIQ1nWcEhfWF0U9DcK
118
118
  keras_hub/src/models/densenet/__init__.py,sha256=r7StyamnWeeZxOk9r4ZYNbS_YVhu9YGPyXhNxljvdPg,269
119
119
  keras_hub/src/models/densenet/densenet_backbone.py,sha256=dN9lUwKzO3E2HthNV2x54ozeBEQ0ilNs5uYHshFQpT0,6723
120
120
  keras_hub/src/models/densenet/densenet_image_classifier.py,sha256=oWjN_Rn5ffOF8WW8U_KJJFn-EcJbqyyuAbgmr0lp2gk,4742
121
- keras_hub/src/models/densenet/densenet_image_classifier_preprocessor.py,sha256=83OcsZp2RZ626YGQ99roR8RKArlink-6VsUqwlv7L_s,1148
122
- keras_hub/src/models/densenet/densenet_image_converter.py,sha256=oynHnALbasOWF0f4iYbJPyfYQ8_u0oSZAK8-zSdG7kM,973
123
- keras_hub/src/models/densenet/densenet_presets.py,sha256=RypneRdDz0OlsRidmjh03gWalre7MlrHNOce97PLEjw,2119
121
+ keras_hub/src/models/densenet/densenet_image_classifier_preprocessor.py,sha256=xDZbTw_h6pjLDzf8QmbDyMnMsFzgh-dPX1ldg9kddhg,563
122
+ keras_hub/src/models/densenet/densenet_image_converter.py,sha256=dJEMrevAL7F3OF6W-Xh7h0AZLtgUoa1BFTP963Bj3Ao,388
123
+ keras_hub/src/models/densenet/densenet_presets.py,sha256=GawLJOd_Kn_Kj_1ue7DYFLx7UPYvPGGOYKrNIqhQe2I,1534
124
124
  keras_hub/src/models/distil_bert/__init__.py,sha256=3Z0w-Mt3aOR0u9RGzjHQ7B3J3qBF2pGjupDGQ9yyzoc,303
125
125
  keras_hub/src/models/distil_bert/distil_bert_backbone.py,sha256=rnAf_GokB3wAeJwVZtgUKQO_bKJIa8RavhL_ykTJpNw,6440
126
126
  keras_hub/src/models/distil_bert/distil_bert_masked_lm.py,sha256=L0DvOl01MIwqc2f6H_E8si9qVUXPd0OKknJ5Rha33TA,4275
@@ -239,9 +239,11 @@ keras_hub/src/models/resnet/resnet_image_classifier_preprocessor.py,sha256=fM7gy
239
239
  keras_hub/src/models/resnet/resnet_image_converter.py,sha256=zO1cO76eYR70qQyGm5F9msiF7D08BprItvpvm8VOSuY,376
240
240
  keras_hub/src/models/resnet/resnet_presets.py,sha256=eYB6vrtoSd9xC2KzUToa3R9e5G6T-AyuFKZDOKOBbMI,2965
241
241
  keras_hub/src/models/retinanet/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
242
- keras_hub/src/models/retinanet/anchor_generator.py,sha256=4X7VuQmAzjqdOC6PhDKjieu_uY2TkZmtXjbce9GVK6g,6441
242
+ keras_hub/src/models/retinanet/anchor_generator.py,sha256=43NoI7djbRudH98hUm-9fw5OEGQNRXOUYzypIZhLYhE,6750
243
243
  keras_hub/src/models/retinanet/box_matcher.py,sha256=l820r1R-ByqiyVgmZ0YFjjz0njchDda-wItzLn1X84o,10834
244
+ keras_hub/src/models/retinanet/feature_pyramid.py,sha256=Z6-5VB49LXCQBhjFPATd7PBuEg-s3LU7F69WrTo_Lt4,14912
244
245
  keras_hub/src/models/retinanet/non_max_supression.py,sha256=jY_j4X24-tStb_Asld3jOo8xrXrThJ90XRbzwarRZZs,20936
246
+ keras_hub/src/models/retinanet/retinanet_label_encoder.py,sha256=rOEc4jpA7sw8kBRX7gdVzKoKwK8C-NoXbAnYONo5iNM,11217
245
247
  keras_hub/src/models/roberta/__init__.py,sha256=3ouSnKdLlMwoDDLVKD9cNtxam6f8XWgCyc0pwWJ0Zjo,263
246
248
  keras_hub/src/models/roberta/roberta_backbone.py,sha256=2eBSHuzFF5EJJPU0Ef3S3i396g70WCPtcJ7VLJM1guE,6339
247
249
  keras_hub/src/models/roberta/roberta_masked_lm.py,sha256=j2dFANRFHd1MNFP_REchljGWOcpOjCpdSya-WGdRzPA,4176
@@ -261,7 +263,7 @@ keras_hub/src/models/stable_diffusion_3/__init__.py,sha256=ZKYQuaRObyhKq8GVAHmoR
261
263
  keras_hub/src/models/stable_diffusion_3/flow_match_euler_discrete_scheduler.py,sha256=9SLbOpAv50q8yv8I6H4DHbsIgwNo8TJmwZfAH8Ew6Zw,2827
262
264
  keras_hub/src/models/stable_diffusion_3/mmdit.py,sha256=ntmxjDJtZbHDGVPPAnasVZyoOTp5bbMPhxM30SYmpoQ,25711
263
265
  keras_hub/src/models/stable_diffusion_3/stable_diffusion_3_backbone.py,sha256=9rWSG0C23_pwN1pymZbial3GX_UM4tmDLXtB4kTQ04w,22599
264
- keras_hub/src/models/stable_diffusion_3/stable_diffusion_3_presets.py,sha256=t74qsx-fc76DWoiyRCg-1qVQfv2Ac_Qds_-CdR6baXs,1243
266
+ keras_hub/src/models/stable_diffusion_3/stable_diffusion_3_presets.py,sha256=gfF5ZOhJx03IQTPnb2Nf65i3pNz-fQlhdAJ3DjKHHZ8,658
265
267
  keras_hub/src/models/stable_diffusion_3/stable_diffusion_3_text_to_image.py,sha256=XH4osHG9EE1sJpfj7rf0bCqrIHpeXaswFoEojWnE0pw,4419
266
268
  keras_hub/src/models/stable_diffusion_3/stable_diffusion_3_text_to_image_preprocessor.py,sha256=TB0KESt5dnFYiS292PbzB0LdiH23AD6aTSTGmQEuzGM,2742
267
269
  keras_hub/src/models/stable_diffusion_3/t5_encoder.py,sha256=oV7P1uwCKdGiD93zXq7kmqX0elMZQU4UvBa8wg6P1hs,5113
@@ -311,7 +313,7 @@ keras_hub/src/samplers/serialization.py,sha256=K6FC4AY1sfOLLIk2k4G783XWnQ_Rk3z1Q
311
313
  keras_hub/src/samplers/top_k_sampler.py,sha256=WSyrhmOCan55X2JYAnNWE88rkx66sXqdoerl87nOrDQ,2250
312
314
  keras_hub/src/samplers/top_p_sampler.py,sha256=9r29WdqBlrW_2TBma6QqkRps2Uit4a6iZPmq1Gsiuko,3400
313
315
  keras_hub/src/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
314
- keras_hub/src/tests/test_case.py,sha256=RkHdvyAfbWjqFZEkbW82k2L7da0ETGOIGSrzw9Wh_hs,25144
316
+ keras_hub/src/tests/test_case.py,sha256=pgjT5CkkkX4BTNfaDD6i-YChO6Ig3But66Ls4RxEymw,25937
315
317
  keras_hub/src/tokenizers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
316
318
  keras_hub/src/tokenizers/byte_pair_tokenizer.py,sha256=Wocarha6ZuzrfiWHPiQUPLLRLrDITyc0hQzjRupw4xA,23849
317
319
  keras_hub/src/tokenizers/byte_tokenizer.py,sha256=vjgrTT8FdtZVAlr0mU13alzADcUhtMrzgOs4lYeHvAQ,10648
@@ -326,11 +328,11 @@ keras_hub/src/utils/keras_utils.py,sha256=lrZuC8HL2lmQfbHaS_t1JUyJann_ji2iTYE0Fz
326
328
  keras_hub/src/utils/pipeline_model.py,sha256=33-0vIB9KGYh2mRtyjHxBPvgGZHDusRcRy-xjki3_gg,9024
327
329
  keras_hub/src/utils/preset_utils.py,sha256=sfEJm5ia1N5tD_7TWS4e4_Z-luPS1rNAifSVAlgfbis,27613
328
330
  keras_hub/src/utils/python_utils.py,sha256=N8nWeO3san4YnGkffRXG3Ix7VEIMTKSN21FX5TuL7G8,202
329
- keras_hub/src/utils/tensor_utils.py,sha256=_MJvIAw4j4dNRTR1xVeaZCRpbnYQm_i2fCsprcRVvSE,10776
331
+ keras_hub/src/utils/tensor_utils.py,sha256=JipeJUDnnvLuT-ToVQC0t9dmSzebwPG6XiZgEwGEGI4,14646
330
332
  keras_hub/src/utils/imagenet/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
331
333
  keras_hub/src/utils/imagenet/imagenet_utils.py,sha256=MvIvv1WJo51ZXBxy4S7t_DsN3ZMtJWlC4cmRvKM2kIA,39304
332
334
  keras_hub/src/utils/timm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
333
- keras_hub/src/utils/timm/convert_densenet.py,sha256=RqPlE0J7iXgcjyAPEfLLMGIKvmGs3QJQlYNTFrhOLKM,3927
335
+ keras_hub/src/utils/timm/convert_densenet.py,sha256=V-GRjWuDnlh3b1EMxqahwZ3GMwSgOa3v0HOfb2ZZ-d0,3342
334
336
  keras_hub/src/utils/timm/convert_resnet.py,sha256=ee8eTml0ffJKE8avzGoLFcpjPF63DsvoIUArAGa8Ngg,5832
335
337
  keras_hub/src/utils/timm/preset_loader.py,sha256=toecv57_A1QbmAehNZe4X1Pdf5xU7Ut4AvJc_ptdMPQ,3156
336
338
  keras_hub/src/utils/transformers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -345,7 +347,7 @@ keras_hub/src/utils/transformers/convert_mistral.py,sha256=kVhN9h1ZFVhwkNW8p3wnS
345
347
  keras_hub/src/utils/transformers/convert_pali_gemma.py,sha256=B1leeDw96Yvu81hYumf66hIid07k5NLqoeWAJgPnaLs,10649
346
348
  keras_hub/src/utils/transformers/preset_loader.py,sha256=GS44hZUuGQCtzsyn8z44ZpHdftd3DFemwV2hx2bQa-U,2738
347
349
  keras_hub/src/utils/transformers/safetensor_utils.py,sha256=rPK-Uw1CG0DX0d_UAD-r2cG9fw8GI8bvAlrcXfQ9g4c,3323
348
- keras_hub_nightly-0.16.1.dev202409270338.dist-info/METADATA,sha256=Im-TR8_hyiR4nbLkWbq8Dw-gX0pViH0f39L_Rpwe3Mk,7310
349
- keras_hub_nightly-0.16.1.dev202409270338.dist-info/WHEEL,sha256=GV9aMThwP_4oNCtvEC2ec3qUYutgWeAzklro_0m4WJQ,91
350
- keras_hub_nightly-0.16.1.dev202409270338.dist-info/top_level.txt,sha256=N4J6piIWBKa38A4uV-CnIopnOEf8mHAbkNXafXm_CuA,10
351
- keras_hub_nightly-0.16.1.dev202409270338.dist-info/RECORD,,
350
+ keras_hub_nightly-0.16.1.dev202409290341.dist-info/METADATA,sha256=x4KCDdTpn0-dCiiXij4SGFDFTt_e2WIBa0z3_8cUcnY,7310
351
+ keras_hub_nightly-0.16.1.dev202409290341.dist-info/WHEEL,sha256=GV9aMThwP_4oNCtvEC2ec3qUYutgWeAzklro_0m4WJQ,91
352
+ keras_hub_nightly-0.16.1.dev202409290341.dist-info/top_level.txt,sha256=N4J6piIWBKa38A4uV-CnIopnOEf8mHAbkNXafXm_CuA,10
353
+ keras_hub_nightly-0.16.1.dev202409290341.dist-info/RECORD,,