keras-hub-nightly 0.19.0.dev202501090358__py3-none-any.whl → 0.19.0.dev202501260345__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (24) hide show
  1. keras_hub/src/metrics/bleu.py +3 -2
  2. keras_hub/src/models/basnet/basnet_backbone.py +1 -1
  3. keras_hub/src/models/basnet/basnet_presets.py +15 -1
  4. keras_hub/src/models/deeplab_v3/deeplab_v3_layers.py +3 -3
  5. keras_hub/src/models/densenet/densenet_backbone.py +3 -3
  6. keras_hub/src/models/flux/flux_text_to_image.py +1 -1
  7. keras_hub/src/models/pali_gemma/pali_gemma_presets.py +2 -2
  8. keras_hub/src/models/resnet/resnet_backbone.py +1 -1
  9. keras_hub/src/models/retinanet/feature_pyramid.py +5 -5
  10. keras_hub/src/models/stable_diffusion_3/stable_diffusion_3_inpaint.py +1 -2
  11. keras_hub/src/models/vit/vit_layers.py +1 -1
  12. keras_hub/src/tests/test_case.py +1 -1
  13. keras_hub/src/tokenizers/byte_tokenizer.py +1 -2
  14. keras_hub/src/tokenizers/sentence_piece_tokenizer_trainer.py +3 -0
  15. keras_hub/src/tokenizers/unicode_codepoint_tokenizer.py +1 -2
  16. keras_hub/src/tokenizers/word_piece_tokenizer_trainer.py +3 -0
  17. keras_hub/src/utils/timm/convert_densenet.py +6 -4
  18. keras_hub/src/utils/timm/convert_efficientnet.py +1 -1
  19. keras_hub/src/utils/timm/convert_resnet.py +1 -1
  20. keras_hub/src/version_utils.py +1 -1
  21. {keras_hub_nightly-0.19.0.dev202501090358.dist-info → keras_hub_nightly-0.19.0.dev202501260345.dist-info}/METADATA +1 -1
  22. {keras_hub_nightly-0.19.0.dev202501090358.dist-info → keras_hub_nightly-0.19.0.dev202501260345.dist-info}/RECORD +24 -24
  23. {keras_hub_nightly-0.19.0.dev202501090358.dist-info → keras_hub_nightly-0.19.0.dev202501260345.dist-info}/WHEEL +0 -0
  24. {keras_hub_nightly-0.19.0.dev202501090358.dist-info → keras_hub_nightly-0.19.0.dev202501260345.dist-info}/top_level.txt +0 -0
@@ -329,8 +329,9 @@ class Bleu(keras.metrics.Metric):
329
329
  return tf.squeeze(inputs, axis=-1)
330
330
  else:
331
331
  raise ValueError(
332
- f"{tensor_name} must be of rank {base_rank}, {base_rank+1} "
333
- f"or {base_rank+2}. Found rank: {inputs.shape.rank}"
332
+ f"{tensor_name} must be of rank {base_rank}, "
333
+ f"{base_rank + 1}, or {base_rank + 2}. "
334
+ f"Found rank: {inputs.shape.rank}"
334
335
  )
335
336
 
336
337
  y_true = validate_and_fix_rank(y_true, "y_true", 1)
@@ -219,7 +219,7 @@ def get_resnet_block(_resnet, block_num):
219
219
  else:
220
220
  x = _resnet.pyramid_outputs[extractor_levels[block_num - 1]]
221
221
  y = _resnet.get_layer(
222
- f"stack{block_num}_block{num_blocks[block_num]-1}_add"
222
+ f"stack{block_num}_block{num_blocks[block_num] - 1}_add"
223
223
  ).output
224
224
  return keras.models.Model(
225
225
  inputs=x,
@@ -1,3 +1,17 @@
1
1
  """BASNet model preset configurations."""
2
2
 
3
- basnet_presets = {}
3
+ basnet_presets = {
4
+ "basnet_duts": {
5
+ "metadata": {
6
+ "description": (
7
+ "BASNet model with a 34-layer ResNet backbone, pre-trained "
8
+ "on the DUTS image dataset at a 288x288 resolution. Model "
9
+ "training was performed by Hamid Ali "
10
+ "(https://github.com/hamidriasat/BASNet)."
11
+ ),
12
+ "params": 108886792,
13
+ "path": "basnet",
14
+ },
15
+ "kaggle_handle": "kaggle://keras/basnet/keras/base1",
16
+ },
17
+ }
@@ -88,13 +88,13 @@ class SpatialPyramidPooling(keras.layers.Layer):
88
88
  dilation_rate=dilation_rate,
89
89
  use_bias=False,
90
90
  data_format=self.data_format,
91
- name=f"aspp_conv_{i+2}",
91
+ name=f"aspp_conv_{i + 2}",
92
92
  ),
93
93
  keras.layers.BatchNormalization(
94
- axis=self.channel_axis, name=f"aspp_bn_{i+2}"
94
+ axis=self.channel_axis, name=f"aspp_bn_{i + 2}"
95
95
  ),
96
96
  keras.layers.Activation(
97
- self.activation, name=f"aspp_activation_{i+2}"
97
+ self.activation, name=f"aspp_activation_{i + 2}"
98
98
  ),
99
99
  ]
100
100
  )
@@ -81,14 +81,14 @@ class DenseNetBackbone(FeaturePyramidBackbone):
81
81
  channel_axis,
82
82
  stackwise_num_repeats[stack_index],
83
83
  growth_rate,
84
- name=f"stack{stack_index+1}",
84
+ name=f"stack{stack_index + 1}",
85
85
  )
86
86
  pyramid_outputs[f"P{index}"] = x
87
87
  x = apply_transition_block(
88
88
  x,
89
89
  channel_axis,
90
90
  compression_ratio,
91
- name=f"transition{stack_index+1}",
91
+ name=f"transition{stack_index + 1}",
92
92
  )
93
93
 
94
94
  x = apply_dense_block(
@@ -140,7 +140,7 @@ def apply_dense_block(x, channel_axis, num_repeats, growth_rate, name=None):
140
140
 
141
141
  for i in range(num_repeats):
142
142
  x = apply_conv_block(
143
- x, channel_axis, growth_rate, name=f"{name}_block{i+1}"
143
+ x, channel_axis, growth_rate, name=f"{name}_block{i + 1}"
144
144
  )
145
145
  return x
146
146
 
@@ -81,7 +81,7 @@ class FluxTextToImage(TextToImage):
81
81
 
82
82
  def fit(self, *args, **kwargs):
83
83
  raise NotImplementedError(
84
- "Currently, `fit` is not supported for " "`FluxTextToImage`."
84
+ "Currently, `fit` is not supported for `FluxTextToImage`."
85
85
  )
86
86
 
87
87
  def generate_step(
@@ -5,7 +5,7 @@ backbone_presets = {
5
5
  "pali_gemma_3b_mix_224": {
6
6
  "metadata": {
7
7
  "description": (
8
- "image size 224, mix fine tuned, text sequence " "length is 256"
8
+ "image size 224, mix fine tuned, text sequence length is 256"
9
9
  ),
10
10
  "params": 2923335408,
11
11
  "path": "pali_gemma",
@@ -45,7 +45,7 @@ backbone_presets = {
45
45
  "pali_gemma_3b_896": {
46
46
  "metadata": {
47
47
  "description": (
48
- "image size 896, pre trained, text sequence length " "is 512"
48
+ "image size 896, pre trained, text sequence length is 512"
49
49
  ),
50
50
  "params": 2927759088,
51
51
  "path": "pali_gemma",
@@ -177,7 +177,7 @@ class ResNetBackbone(FeaturePyramidBackbone):
177
177
  use_bias=False,
178
178
  padding="same",
179
179
  dtype=dtype,
180
- name=f"conv{conv_index+1}_conv",
180
+ name=f"conv{conv_index + 1}_conv",
181
181
  )(x)
182
182
 
183
183
  if not use_pre_activation:
@@ -209,9 +209,9 @@ class FeaturePyramid(keras.layers.Layer):
209
209
  )
210
210
  if i == backbone_max_level + 1 and self.use_p5:
211
211
  self.output_conv_layers[level].build(
212
- (None, None, None, input_shapes[f"P{i-1}"][-1])
212
+ (None, None, None, input_shapes[f"P{i - 1}"][-1])
213
213
  if self.data_format == "channels_last"
214
- else (None, input_shapes[f"P{i-1}"][1], None, None)
214
+ else (None, input_shapes[f"P{i - 1}"][1], None, None)
215
215
  )
216
216
  else:
217
217
  self.output_conv_layers[level].build(
@@ -277,7 +277,7 @@ class FeaturePyramid(keras.layers.Layer):
277
277
  if i < backbone_max_level:
278
278
  # for the top most output, it doesn't need to merge with any
279
279
  # upper stream outputs
280
- upstream_output = self.top_down_op(output_features[f"P{i+1}"])
280
+ upstream_output = self.top_down_op(output_features[f"P{i + 1}"])
281
281
  output = self.merge_op([output, upstream_output])
282
282
  output_features[level] = (
283
283
  self.lateral_batch_norm_layers[level](output)
@@ -296,9 +296,9 @@ class FeaturePyramid(keras.layers.Layer):
296
296
  for i in range(backbone_max_level + 1, self.max_level + 1):
297
297
  level = f"P{i}"
298
298
  feats_in = (
299
- inputs[f"P{i-1}"]
299
+ inputs[f"P{i - 1}"]
300
300
  if i == backbone_max_level + 1 and self.use_p5
301
- else output_features[f"P{i-1}"]
301
+ else output_features[f"P{i - 1}"]
302
302
  )
303
303
  if i > backbone_max_level + 1:
304
304
  feats_in = self.activation(feats_in)
@@ -82,8 +82,7 @@ class StableDiffusion3Inpaint(Inpaint):
82
82
 
83
83
  def fit(self, *args, **kwargs):
84
84
  raise NotImplementedError(
85
- "Currently, `fit` is not supported for "
86
- "`StableDiffusion3Inpaint`."
85
+ "Currently, `fit` is not supported for `StableDiffusion3Inpaint`."
87
86
  )
88
87
 
89
88
  def generate_step(
@@ -351,7 +351,7 @@ class ViTEncoder(keras.layers.Layer):
351
351
  attention_dropout=self.attention_dropout,
352
352
  layer_norm_epsilon=self.layer_norm_epsilon,
353
353
  dtype=self.dtype_policy,
354
- name=f"tranformer_block_{i+1}",
354
+ name=f"tranformer_block_{i + 1}",
355
355
  )
356
356
  encoder_block.build((None, None, self.hidden_dim))
357
357
  self.encoder_layers.append(encoder_block)
@@ -479,7 +479,7 @@ class TestCase(tf.test.TestCase, parameterized.TestCase):
479
479
  # Check name maps to classname.
480
480
  name = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", cls.__name__)
481
481
  name = re.sub("([a-z])([A-Z])", r"\1_\2", name).lower()
482
- self.assertRegexpMatches(backbone.name, name)
482
+ self.assertRegex(backbone.name, name)
483
483
 
484
484
  # Check mixed precision.
485
485
  if run_mixed_precision_check:
@@ -150,8 +150,7 @@ class ByteTokenizer(tokenizer.Tokenizer):
150
150
  ):
151
151
  if not is_int_dtype(dtype):
152
152
  raise ValueError(
153
- "Output dtype must be an integer type. "
154
- f"Received: dtype={dtype}"
153
+ f"Output dtype must be an integer type. Received: dtype={dtype}"
155
154
  )
156
155
 
157
156
  # Check normalization_form.
@@ -1,5 +1,7 @@
1
1
  import io
2
2
 
3
+ from keras_hub.src.utils.tensor_utils import assert_tf_libs_installed
4
+
3
5
  try:
4
6
  import sentencepiece as spm
5
7
  import tensorflow as tf
@@ -77,6 +79,7 @@ def compute_sentence_piece_proto(
77
79
  tf.Tensor([ 4 8 12 5 9 14 5 6 13 4 7 10 11 6 13],
78
80
  shape=(15,), dtype=int32)
79
81
  """
82
+ assert_tf_libs_installed("compute_sentence_piece_proto")
80
83
 
81
84
  if spm is None:
82
85
  raise ImportError(
@@ -203,8 +203,7 @@ class UnicodeCodepointTokenizer(tokenizer.Tokenizer):
203
203
  ) -> None:
204
204
  if not is_int_dtype(dtype):
205
205
  raise ValueError(
206
- "Output dtype must be an integer type. "
207
- f"Received: dtype={dtype}"
206
+ f"Output dtype must be an integer type. Received: dtype={dtype}"
208
207
  )
209
208
 
210
209
  # Check normalization_form.
@@ -1,5 +1,6 @@
1
1
  from keras_hub.src.api_export import keras_hub_export
2
2
  from keras_hub.src.tokenizers.word_piece_tokenizer import pretokenize
3
+ from keras_hub.src.utils.tensor_utils import assert_tf_libs_installed
3
4
 
4
5
  try:
5
6
  import tensorflow as tf
@@ -117,6 +118,8 @@ def compute_word_piece_vocabulary(
117
118
  inputs.map(tokenizer.tokenize)
118
119
  ```
119
120
  """ # noqa: E501
121
+ assert_tf_libs_installed("compute_word_piece_vocabulary")
122
+
120
123
  # Read data files.
121
124
  if not isinstance(data, (list, tf.data.Dataset)):
122
125
  raise ValueError(
@@ -59,9 +59,11 @@ def convert_weights(backbone, loader, timm_config):
59
59
  num_stacks = len(backbone.stackwise_num_repeats)
60
60
  for stack_index in range(num_stacks):
61
61
  for block_idx in range(backbone.stackwise_num_repeats[stack_index]):
62
- keras_name = f"stack{stack_index+1}_block{block_idx+1}"
62
+ keras_name = f"stack{stack_index + 1}_block{block_idx + 1}"
63
63
  hf_name = (
64
- f"features.denseblock{stack_index+1}.denselayer{block_idx+1}"
64
+ "features."
65
+ f"denseblock{stack_index + 1}"
66
+ f".denselayer{block_idx + 1}"
65
67
  )
66
68
  port_batch_normalization(f"{keras_name}_1_bn", f"{hf_name}.norm1")
67
69
  port_conv2d(f"{keras_name}_1_conv", f"{hf_name}.conv1")
@@ -69,8 +71,8 @@ def convert_weights(backbone, loader, timm_config):
69
71
  port_conv2d(f"{keras_name}_2_conv", f"{hf_name}.conv2")
70
72
 
71
73
  for stack_index in range(num_stacks - 1):
72
- keras_transition_name = f"transition{stack_index+1}"
73
- hf_transition_name = f"features.transition{stack_index+1}"
74
+ keras_transition_name = f"transition{stack_index + 1}"
75
+ hf_transition_name = f"features.transition{stack_index + 1}"
74
76
  port_batch_normalization(
75
77
  f"{keras_transition_name}_bn", f"{hf_transition_name}.norm"
76
78
  )
@@ -268,7 +268,7 @@ def convert_weights(backbone, loader, timm_config):
268
268
  # 97 is the start of the lowercase alphabet.
269
269
  letter_identifier = chr(block_idx + 97)
270
270
 
271
- keras_block_prefix = f"block{stack_index+1}{letter_identifier}_"
271
+ keras_block_prefix = f"block{stack_index + 1}{letter_identifier}_"
272
272
  hf_block_prefix = f"blocks.{stack_index}.{block_idx}."
273
273
 
274
274
  if block_type == "v1":
@@ -89,7 +89,7 @@ def convert_weights(backbone, loader, timm_config):
89
89
  for block_idx in range(backbone.stackwise_num_blocks[stack_index]):
90
90
  if version == "v1":
91
91
  keras_name = f"stack{stack_index}_block{block_idx}"
92
- hf_name = f"layer{stack_index+1}.{block_idx}"
92
+ hf_name = f"layer{stack_index + 1}.{block_idx}"
93
93
  else:
94
94
  keras_name = f"stack{stack_index}_block{block_idx}"
95
95
  hf_name = f"stages.{stack_index}.blocks.{block_idx}"
@@ -1,7 +1,7 @@
1
1
  from keras_hub.src.api_export import keras_hub_export
2
2
 
3
3
  # Unique source of truth for the version number.
4
- __version__ = "0.19.0.dev202501090358"
4
+ __version__ = "0.19.0.dev202501260345"
5
5
 
6
6
 
7
7
  @keras_hub_export("keras_hub.version")
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: keras-hub-nightly
3
- Version: 0.19.0.dev202501090358
3
+ Version: 0.19.0.dev202501260345
4
4
  Summary: Industry-strength Natural Language Processing extensions for Keras.
5
5
  Home-page: https://github.com/keras-team/keras-hub
6
6
  Author: Keras team
@@ -9,7 +9,7 @@ keras_hub/api/tokenizers/__init__.py,sha256=mtJgQy1spfQnPAkeLoeinsT_W9iCWHlJXwzc
9
9
  keras_hub/api/utils/__init__.py,sha256=Gp1E6gG-RtKQS3PBEQEOz9PQvXkXaJ0ySGMqZ7myN7A,215
10
10
  keras_hub/src/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
11
11
  keras_hub/src/api_export.py,sha256=9pQZK27JObxWZ96QPLBp1OBsjWigh1iuV6RglPGMRk0,1499
12
- keras_hub/src/version_utils.py,sha256=1azFcDsz3l9ou6a0Z5UvGUYOSfX9oH5PRsuYxd9JBI8,222
12
+ keras_hub/src/version_utils.py,sha256=tTFXFFeAG8omIjQguMXQRXJIRBjW6nmc21CUsBB4mP8,222
13
13
  keras_hub/src/bounding_box/__init__.py,sha256=7i6KnGupN4AVivR_dFjQyuuTbI0GkHy8d-aMXeqZdU8,95
14
14
  keras_hub/src/bounding_box/converters.py,sha256=UUp1hwegpDZyIo8sh9TLNy1v6JjwmvwzL6wmHFMAtbk,21916
15
15
  keras_hub/src/bounding_box/formats.py,sha256=YmskOz2BOSat7NaE__J9VfpSNGPJJR0znSzA4lp8MMI,3868
@@ -43,7 +43,7 @@ keras_hub/src/layers/preprocessing/random_deletion.py,sha256=x23nRo0ir2J4Ps42i9X
43
43
  keras_hub/src/layers/preprocessing/random_swap.py,sha256=w2z7yNQsII5g4sEFi4GXfgxIc1S6UUt3a8YWZew_f4U,9504
44
44
  keras_hub/src/layers/preprocessing/start_end_packer.py,sha256=lY2K937z6JucxNe7VknynhhjrcUfFigU6mqIdv2gS-Y,7973
45
45
  keras_hub/src/metrics/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
46
- keras_hub/src/metrics/bleu.py,sha256=N4hnlCIFk558nZAHxGlzLYBx6gwpWS3Wvw1iFM69xiA,13665
46
+ keras_hub/src/metrics/bleu.py,sha256=pnid5azpAxO6vKEfUtAby3nH29OGbwYKgVGOGeoaA3I,13694
47
47
  keras_hub/src/metrics/edit_distance.py,sha256=kjhe8uNjvv8aN49RyrKAbNi7a8_OlB8fMza0J_CfNQg,6353
48
48
  keras_hub/src/metrics/perplexity.py,sha256=dDUQcfE5JbAruG3spEkgue6IjHcynqgmGpJAqWg22Tw,6139
49
49
  keras_hub/src/metrics/rouge_base.py,sha256=Pt2DUznhTTeR-fX1nQ_wSbPtmuTgxQTvrGpu8LRVapE,6264
@@ -87,10 +87,10 @@ keras_hub/src/models/bart/bart_seq_2_seq_lm_preprocessor.py,sha256=3_e-ULIcm_3DK
87
87
  keras_hub/src/models/bart/bart_tokenizer.py,sha256=Q7IXmIwXzhPSN427oQRyF9ufoExQGS184Yo_4boaOZo,2811
88
88
  keras_hub/src/models/basnet/__init__.py,sha256=4N6XvIUYYJl5xtoaL3_9fawUX_qP3WmTYNEEU7tn8Gw,253
89
89
  keras_hub/src/models/basnet/basnet.py,sha256=JA58Q9lmygdSOm5MUaPAlaL6B8XnmqCcRaGrk9c8P3Q,4287
90
- keras_hub/src/models/basnet/basnet_backbone.py,sha256=t_52WW6jetONS7AnPf9YsiMLDqOjVwjNuayQEv6ZAk4,13503
90
+ keras_hub/src/models/basnet/basnet_backbone.py,sha256=P-jogkYIu9j7_28fl2RFQRMl87BXz1wcY_LtIrxBy1E,13505
91
91
  keras_hub/src/models/basnet/basnet_image_converter.py,sha256=DwzAwtZeggYw_qyRQ-Abnnm885Wobv3wClxRzOTscI0,342
92
92
  keras_hub/src/models/basnet/basnet_preprocessor.py,sha256=uM504utaXODSqR5zpKnopRuaV_l84zCg06RkNoNSKIs,510
93
- keras_hub/src/models/basnet/basnet_presets.py,sha256=z6tR2q_EvYnUmGfsWIWYfmR_8gvWYPH3QmtpAu_T8f8,63
93
+ keras_hub/src/models/basnet/basnet_presets.py,sha256=GQx-ijM1bqYRoz6_vXczKuCSZsfgmvyRoSvtTQBKres,561
94
94
  keras_hub/src/models/bert/__init__.py,sha256=K_UmCqDgOFFvXgzjXRn5oG0WWi53rAsQMOmUrsiBe1k,245
95
95
  keras_hub/src/models/bert/bert_backbone.py,sha256=o8GXUpoKPXLpfFzx5u9wI_3rZJeabPfYJEYSI09Clos,8069
96
96
  keras_hub/src/models/bert/bert_masked_lm.py,sha256=8gb1g8h5VFVLmKNEPfLe26z7SOlFnzf9R9okK3rp8AU,4045
@@ -135,11 +135,11 @@ keras_hub/src/models/deeplab_v3/__init__.py,sha256=FHAUPM4a1DJj4EsNTbYEd1riNq__u
135
135
  keras_hub/src/models/deeplab_v3/deeplab_v3_backbone.py,sha256=dH7HHu_NAnE-HP6ivOL7fFLQZHt_MWmehlMccLljhPc,7764
136
136
  keras_hub/src/models/deeplab_v3/deeplab_v3_image_converter.py,sha256=mRkH3HdhpV0fCcQcVXEvIX7SNk-bAMb3SAHzgK-FD5c,371
137
137
  keras_hub/src/models/deeplab_v3/deeplab_v3_image_segmeter_preprocessor.py,sha256=hR9S6lNYamY0EBDBo3e1qTCiwtftmLXrN-UYuzfw5Io,581
138
- keras_hub/src/models/deeplab_v3/deeplab_v3_layers.py,sha256=qmEiolOOriLAojXB67xXW9IOo717kaCGeDVZJLaGY98,7834
138
+ keras_hub/src/models/deeplab_v3/deeplab_v3_layers.py,sha256=mz9nG55gdXSTDE96AXgeTCwUFB95DIpTuqrvWIt5Lco,7840
139
139
  keras_hub/src/models/deeplab_v3/deeplab_v3_presets.py,sha256=ZKYY8A7mV2QvwXwjDUd9xAbVHo58-Hgj_IqNUbuyCIU,625
140
140
  keras_hub/src/models/deeplab_v3/deeplab_v3_segmenter.py,sha256=pubi30sPJKLOpz9fRQff2FZt_53KBvwf2uyaJ5YL7J8,3726
141
141
  keras_hub/src/models/densenet/__init__.py,sha256=r7StyamnWeeZxOk9r4ZYNbS_YVhu9YGPyXhNxljvdPg,269
142
- keras_hub/src/models/densenet/densenet_backbone.py,sha256=5QawyB4EhyaXpmm8l_QUYveU7kEet3jRD3s94XAz8Tw,6738
142
+ keras_hub/src/models/densenet/densenet_backbone.py,sha256=f2nfsXyXQert2aYHq-L-JZtp8inq1fs1K47rzZQ9nTI,6744
143
143
  keras_hub/src/models/densenet/densenet_image_classifier.py,sha256=ye-Ix3oU42pfsDoh-h1PG4di1kzldO0ZO7Nj304p_X4,544
144
144
  keras_hub/src/models/densenet/densenet_image_classifier_preprocessor.py,sha256=xDZbTw_h6pjLDzf8QmbDyMnMsFzgh-dPX1ldg9kddhg,563
145
145
  keras_hub/src/models/densenet/densenet_image_converter.py,sha256=DoxYlJVZ9uaabFhVjWOmzvhONoc8KNcQj2vQ6Z1AUpU,354
@@ -186,7 +186,7 @@ keras_hub/src/models/flux/flux_layers.py,sha256=wevcAEbayBD8bVm-21FBi2LQ13pZzB99
186
186
  keras_hub/src/models/flux/flux_maths.py,sha256=2pnHW8HW7V2JZ8HIrUwE-UU4klpFQaOkoAvG5nWVfyY,7502
187
187
  keras_hub/src/models/flux/flux_model.py,sha256=K92PyeFHIp8SwXuxhv__XCEaQ2wqSW1jOb97I4S24Rw,8991
188
188
  keras_hub/src/models/flux/flux_presets.py,sha256=z7C_FbI1_F5YETXuWpc7Yh_0w-5N0eBQy6Oks_X9W88,54
189
- keras_hub/src/models/flux/flux_text_to_image.py,sha256=mI_QxOzjXl3b5s7Q1LZemceCdeboqPD5ilEPEEyer40,4169
189
+ keras_hub/src/models/flux/flux_text_to_image.py,sha256=Rf5dD2EhG0bE8Gyg9sqaA8YEexS1kdraofIkxiZDjvc,4166
190
190
  keras_hub/src/models/flux/flux_text_to_image_preprocessor.py,sha256=Fs9jr97QtmRUbRRz1kITpkuhDM2GoV3n0XSFC-qQA14,2252
191
191
  keras_hub/src/models/gemma/__init__.py,sha256=rVzOJMJ39bgVlT8UdC0t8PlN2c237GKTBmfHIsbPuOQ,251
192
192
  keras_hub/src/models/gemma/gemma_attention.py,sha256=1CVN5z9GKoU8TuNMih2_MweDkpd98xSqdic9F8xIBE8,8317
@@ -257,7 +257,7 @@ keras_hub/src/models/pali_gemma/pali_gemma_causal_lm.py,sha256=AViEs6YltUqWnIVo7
257
257
  keras_hub/src/models/pali_gemma/pali_gemma_causal_lm_preprocessor.py,sha256=F57y0fZ0wYYxfGIjfrJc1W9uQpViYFx5bvFjj5CqUbI,4814
258
258
  keras_hub/src/models/pali_gemma/pali_gemma_decoder_block.py,sha256=24ABQ1vGlppV-KfWh0YqJjzM_Lu2GIwvyJ4X2XXie_A,5616
259
259
  keras_hub/src/models/pali_gemma/pali_gemma_image_converter.py,sha256=5yM_jUtrFsWIieiwfFBoP7mtPmQAwywkeLKbd7fhmzk,371
260
- keras_hub/src/models/pali_gemma/pali_gemma_presets.py,sha256=O648iwzs0wooiQCfDQ-n0wOtzIOEDGXRSwSb_Brx2Ck,8985
260
+ keras_hub/src/models/pali_gemma/pali_gemma_presets.py,sha256=Ka1ChUUSKw-yY2th3QtmNtkeXt0krYfwhkHrScioMls,8979
261
261
  keras_hub/src/models/pali_gemma/pali_gemma_tokenizer.py,sha256=ljTiADHo0Ok88q-jVzwJIle2C8xcxnudLTsBLzIySaM,2415
262
262
  keras_hub/src/models/pali_gemma/pali_gemma_vit.py,sha256=ViPKfGksbxBGJ3iS3M_KWxRc8Ie4LF7rWWUKDiqECJE,18285
263
263
  keras_hub/src/models/phi3/__init__.py,sha256=zIbf1MU-ks91mEkjTRJAsk51N3BBnXDF2JM1vO-13PQ,245
@@ -271,7 +271,7 @@ keras_hub/src/models/phi3/phi3_presets.py,sha256=sb2ce7Gq1OikFEf2KIYG69rFKHYKj8q
271
271
  keras_hub/src/models/phi3/phi3_rotary_embedding.py,sha256=wqiRn8nETNcLc5Vsm_d_8s11Ro6ibWZbWvODdLqIOo4,5013
272
272
  keras_hub/src/models/phi3/phi3_tokenizer.py,sha256=bOPH14wTVVHJHq8mgzXLjsgvKMNhfO8eayevAPpjYVA,1992
273
273
  keras_hub/src/models/resnet/__init__.py,sha256=C5UqlQ6apm8WSp1bnrxB6Bi3BGaknxRQs-r3b2wpaGA,257
274
- keras_hub/src/models/resnet/resnet_backbone.py,sha256=3acTjdWbnos8l_TPxYLgoV3Y4V_vJ_o1AqGhiQu459k,31274
274
+ keras_hub/src/models/resnet/resnet_backbone.py,sha256=Q7nlqcTXZzjqd0e-DsjHC4ok58yOX7qxseotym3uZpM,31276
275
275
  keras_hub/src/models/resnet/resnet_image_classifier.py,sha256=nf35EKDzvBkfhHsK-s6Ks0nbhvKO7HEOYZm94YckyWE,510
276
276
  keras_hub/src/models/resnet/resnet_image_classifier_preprocessor.py,sha256=fM7gyQ0qB-RRuI4USJkRD6q9-HVfuC71e-BLTo-UhHQ,543
277
277
  keras_hub/src/models/resnet/resnet_image_converter.py,sha256=fgTxihJznGFss-y3Z-jp0JE3X1gaaB2y-f2KMwrT8Pk,342
@@ -279,7 +279,7 @@ keras_hub/src/models/resnet/resnet_presets.py,sha256=cryfXlC_FSEN_jrexKIh5aVbzp8
279
279
  keras_hub/src/models/retinanet/__init__.py,sha256=veWIFvMN6151M69l7FvTcI-IIEe_8dLmNO5NLOszQ1c,275
280
280
  keras_hub/src/models/retinanet/anchor_generator.py,sha256=0OgKSW3OKmbc0cOPHF6FYTAzn7fcHklg665PGSwAaDM,6504
281
281
  keras_hub/src/models/retinanet/box_matcher.py,sha256=l820r1R-ByqiyVgmZ0YFjjz0njchDda-wItzLn1X84o,10834
282
- keras_hub/src/models/retinanet/feature_pyramid.py,sha256=VxLcOEjJSXIDu30oMcZEYdVlpHaOP3IutZNwh0N3uHQ,17604
282
+ keras_hub/src/models/retinanet/feature_pyramid.py,sha256=hbdrj6X-D2SlwOp2h1WcBlTdSAlLmFK43X7OrkJRoMA,17614
283
283
  keras_hub/src/models/retinanet/non_max_supression.py,sha256=PMOLlRw-EnyEmhlUhJjEbHf1xXiplN95pUxQbiJQbN4,20996
284
284
  keras_hub/src/models/retinanet/prediction_head.py,sha256=xWHt21-SS2t7vCmTONlR1lSbJXhml5jx68V8MGbGybg,7863
285
285
  keras_hub/src/models/retinanet/retinanet_backbone.py,sha256=BJBPJLxpOCOU0Br7b4JsgCZBHQHLAhxLqo9BHNIsl1g,5659
@@ -317,7 +317,7 @@ keras_hub/src/models/stable_diffusion_3/flow_match_euler_discrete_scheduler.py,s
317
317
  keras_hub/src/models/stable_diffusion_3/mmdit.py,sha256=0gq2tcIqcbiGKKDDj3vrRsF67U3qE9g706XPs2BfCOY,40979
318
318
  keras_hub/src/models/stable_diffusion_3/stable_diffusion_3_backbone.py,sha256=w8lsMampk34M9xQi96mEnXmkaKQqFQtoFTW8zP7ilEA,24078
319
319
  keras_hub/src/models/stable_diffusion_3/stable_diffusion_3_image_to_image.py,sha256=oQcVCWOwrdUTrr_JNekoMqdSlKYMGz5tG6v8uD25lTc,5479
320
- keras_hub/src/models/stable_diffusion_3/stable_diffusion_3_inpaint.py,sha256=aZMIC-GYjLhdU_yM7fJEznApCo1zwRAgwQbW0tCW0xY,6399
320
+ keras_hub/src/models/stable_diffusion_3/stable_diffusion_3_inpaint.py,sha256=t4uw920Jn1k80air3WRGimKf71aMVu6q73oWFH348vk,6384
321
321
  keras_hub/src/models/stable_diffusion_3/stable_diffusion_3_presets.py,sha256=x7Ez4L955MJE4ABtBy-63YpU9XpR0Ro8QWPzYYJs1yE,2167
322
322
  keras_hub/src/models/stable_diffusion_3/stable_diffusion_3_text_to_image.py,sha256=Yt-UIatVKANjjKFCFEj1rIHhOrt8hqefKKQJIAWcTLc,4567
323
323
  keras_hub/src/models/stable_diffusion_3/stable_diffusion_3_text_to_image_preprocessor.py,sha256=m5PdVSgTcYuqd7jOQ8wD4PAnMa7wY2WdhwpK3hdydhM,2756
@@ -344,7 +344,7 @@ keras_hub/src/models/vit/vit_backbone.py,sha256=kGmRZO4u-1q4PBcbhJbiWVIEVYAcp2H4
344
344
  keras_hub/src/models/vit/vit_image_classifier.py,sha256=lMVxiD1_6drx7XQ7P7YzlqnFP7kT1zlMe84f-T3SDQI,6332
345
345
  keras_hub/src/models/vit/vit_image_classifier_preprocessor.py,sha256=wu6YcBlXMWB9sKCPvmNdGBZKTLQt_HyHWS6P9nyDwsk,504
346
346
  keras_hub/src/models/vit/vit_image_converter.py,sha256=5xVF04BzMcdTDc6aErAYj3_BuGmVd3zoJMcH1ho4T0g,2561
347
- keras_hub/src/models/vit/vit_layers.py,sha256=s4j3n3qnJnv6W9AdUkNsO3Vsi_BhxEGECYkaLVCU6XY,13238
347
+ keras_hub/src/models/vit/vit_layers.py,sha256=Zsz-ARPY49S1nXLUtpFwtPfw31D-vCtKesEo_2JIKPA,13240
348
348
  keras_hub/src/models/vit/vit_presets.py,sha256=zZhxUleOom1ie3gn0Mi-_xhhdFEEsnqSQyKADV2L38k,4479
349
349
  keras_hub/src/models/vit_det/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
350
350
  keras_hub/src/models/vit_det/vit_det_backbone.py,sha256=DOZ5J7c1t5PAZ6y0pMmBoQTMOUup7UoUrYVfCs69ltY,7697
@@ -380,16 +380,16 @@ keras_hub/src/samplers/serialization.py,sha256=K6FC4AY1sfOLLIk2k4G783XWnQ_Rk3z1Q
380
380
  keras_hub/src/samplers/top_k_sampler.py,sha256=WSyrhmOCan55X2JYAnNWE88rkx66sXqdoerl87nOrDQ,2250
381
381
  keras_hub/src/samplers/top_p_sampler.py,sha256=9r29WdqBlrW_2TBma6QqkRps2Uit4a6iZPmq1Gsiuko,3400
382
382
  keras_hub/src/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
383
- keras_hub/src/tests/test_case.py,sha256=oGWoUhlKgjVMNIjvUVnQR-k5iKvodztHsFMOs669Trw,27402
383
+ keras_hub/src/tests/test_case.py,sha256=JVD1srV8SgVvCLRUvFsKAYH-NeNyZTavveBw0bHsgWQ,27394
384
384
  keras_hub/src/tokenizers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
385
385
  keras_hub/src/tokenizers/byte_pair_tokenizer.py,sha256=WeUlHMAf5y_MUjFIfVhEcFoOZu-z4kkSj-Dq-pegM9w,24052
386
- keras_hub/src/tokenizers/byte_tokenizer.py,sha256=c1a41eVuLzGmBtscQ0RxPIqFi41m_604KJ9fdpPR7Sc,10437
386
+ keras_hub/src/tokenizers/byte_tokenizer.py,sha256=GPIKaddXugbfckfhodADsBpaYb72DgFMs_xfXHnK4qU,10418
387
387
  keras_hub/src/tokenizers/sentence_piece_tokenizer.py,sha256=nOqkpa2nHitITpdowPHdwxiN87e8huLW8Dt2gozVnhI,9350
388
- keras_hub/src/tokenizers/sentence_piece_tokenizer_trainer.py,sha256=LhUxwcaDKt5V58DBzK9Sh4D-hOL80SHGpL4LavWbq74,4642
388
+ keras_hub/src/tokenizers/sentence_piece_tokenizer_trainer.py,sha256=caqgV9N4lH97zBviFPdpwo_O95AaJBEJLQv6Icq3Hs8,4774
389
389
  keras_hub/src/tokenizers/tokenizer.py,sha256=v0Ka5ayrBwpsGBlkIadXK-b4RsMTbhV6BZrvKullbxY,9722
390
- keras_hub/src/tokenizers/unicode_codepoint_tokenizer.py,sha256=KxuVsUx3ntGsuqaQ-gnFWFfoVLsl5Hag7rBk6xfq-fQ,13572
390
+ keras_hub/src/tokenizers/unicode_codepoint_tokenizer.py,sha256=hRv_XxoPIPDpHfO0ZttSOv_M89sMaFpvmllojvKz_ac,13553
391
391
  keras_hub/src/tokenizers/word_piece_tokenizer.py,sha256=vP6AZgbzsRiuPCt3W_n94nsF7XiERnagWcH_rqJHtVU,19943
392
- keras_hub/src/tokenizers/word_piece_tokenizer_trainer.py,sha256=Zz1SGgArykxBVWnS5YV-ViqyMOrw3j3i_i_jto96zCg,6610
392
+ keras_hub/src/tokenizers/word_piece_tokenizer_trainer.py,sha256=cylrs02ZrYQ1TuZr9oyS3NrVbDwGctA3VXbIh1pFJMQ,6743
393
393
  keras_hub/src/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
394
394
  keras_hub/src/utils/keras_utils.py,sha256=0yKIfFuO_IqAH8vHbG3ncRmCVKg__xRGfQtLYWZ8YuA,1695
395
395
  keras_hub/src/utils/pipeline_model.py,sha256=jgzB6NQPSl0KOu08N-TazfOnXnUJbZjH2EXXhx25Ftg,9084
@@ -399,9 +399,9 @@ keras_hub/src/utils/tensor_utils.py,sha256=YVJesN91bk-OzJXY1mOKBppuY8noBU7zhPQNX
399
399
  keras_hub/src/utils/imagenet/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
400
400
  keras_hub/src/utils/imagenet/imagenet_utils.py,sha256=MvIvv1WJo51ZXBxy4S7t_DsN3ZMtJWlC4cmRvKM2kIA,39304
401
401
  keras_hub/src/utils/timm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
402
- keras_hub/src/utils/timm/convert_densenet.py,sha256=V-GRjWuDnlh3b1EMxqahwZ3GMwSgOa3v0HOfb2ZZ-d0,3342
403
- keras_hub/src/utils/timm/convert_efficientnet.py,sha256=wkOKTLS_N_VKy1CQQGjSlD_TPSOOmCMMXQvbjravN6g,17098
404
- keras_hub/src/utils/timm/convert_resnet.py,sha256=ee8eTml0ffJKE8avzGoLFcpjPF63DsvoIUArAGa8Ngg,5832
402
+ keras_hub/src/utils/timm/convert_densenet.py,sha256=fu8HBIQis5o3ib2tyI2qnmYScVrVIQySok8vTfa1qJ8,3393
403
+ keras_hub/src/utils/timm/convert_efficientnet.py,sha256=SgEIlyyinS04qoQpEgh3WazHq544zNUCCpfmWh3EjSs,17100
404
+ keras_hub/src/utils/timm/convert_resnet.py,sha256=8JFkVtdpy5z9h83LJ97rD-a8FRejXPZvMNksNuStqjM,5834
405
405
  keras_hub/src/utils/timm/convert_vgg.py,sha256=MT5jGnLrzenPpe66Af_Lp1IdR9KGtsSrcmn6_UPqHvQ,2419
406
406
  keras_hub/src/utils/timm/preset_loader.py,sha256=cdZDjthZdTD2myMOenQar4ACyi7VTuIzNRg24LuqS-4,3374
407
407
  keras_hub/src/utils/transformers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -417,7 +417,7 @@ keras_hub/src/utils/transformers/convert_pali_gemma.py,sha256=B1leeDw96Yvu81hYum
417
417
  keras_hub/src/utils/transformers/convert_vit.py,sha256=9SUZ9utNJhW_5cj3acMn9cRy47u2eIcDsrhmzj77o9k,5187
418
418
  keras_hub/src/utils/transformers/preset_loader.py,sha256=DgGJXbTSB9Na8FIR-YWWVqQPOFxHwWrGm41EwcS_EFs,3797
419
419
  keras_hub/src/utils/transformers/safetensor_utils.py,sha256=CYUHyA4y-B61r7NDnCsFb4t_UmSwZ1k9L-8gzEd6KRg,3339
420
- keras_hub_nightly-0.19.0.dev202501090358.dist-info/METADATA,sha256=ywWExWZy14kzevtOFQZcdFDiqRJ2I72oWaeiFbjpZZE,7498
421
- keras_hub_nightly-0.19.0.dev202501090358.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
422
- keras_hub_nightly-0.19.0.dev202501090358.dist-info/top_level.txt,sha256=N4J6piIWBKa38A4uV-CnIopnOEf8mHAbkNXafXm_CuA,10
423
- keras_hub_nightly-0.19.0.dev202501090358.dist-info/RECORD,,
420
+ keras_hub_nightly-0.19.0.dev202501260345.dist-info/METADATA,sha256=RfHDb52lzO--RroElhD5NYIPT2rH7y1D1K8WhSHlphA,7498
421
+ keras_hub_nightly-0.19.0.dev202501260345.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
422
+ keras_hub_nightly-0.19.0.dev202501260345.dist-info/top_level.txt,sha256=N4J6piIWBKa38A4uV-CnIopnOEf8mHAbkNXafXm_CuA,10
423
+ keras_hub_nightly-0.19.0.dev202501260345.dist-info/RECORD,,