keras-hub-nightly 0.16.1.dev202410160341__py3-none-any.whl → 0.16.1.dev202410180341__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -274,6 +274,7 @@ class CausalLM(Task):
274
274
  inputs,
275
275
  max_length=None,
276
276
  stop_token_ids="auto",
277
+ strip_prompt=False,
277
278
  ):
278
279
  """Generate text given prompt `inputs`.
279
280
 
@@ -309,6 +310,9 @@ class CausalLM(Task):
309
310
  specify a list of token id's the model should stop on. Note that
310
311
  sequences of tokens will each be interpreted as a stop token,
311
312
  multi-token stop sequences are not supported.
313
+ strip_prompt: Optional. By default, generate() returns the full prompt
314
+ followed by its completion generated by the model. If this option
315
+ is set to True, only the newly generated text is returned.
312
316
  """
313
317
  # Setup our three main passes.
314
318
  # 1. Optionally preprocessing strings to dense integer tensors.
@@ -339,6 +343,33 @@ class CausalLM(Task):
339
343
  def generate(x):
340
344
  return generate_function(x, stop_token_ids=stop_token_ids)
341
345
 
346
+ def strip_prompt_function(x, prompt):
347
+ # This function removes the prompt from the generated
348
+ # response, in a batch-friendly fashion.
349
+ y = {}
350
+ prompt_mask = prompt["padding_mask"]
351
+ seq_len = prompt_mask.shape[1]
352
+
353
+ # We need to shift every output sequence by the size of the prompt.
354
+ shifts = -ops.sum(ops.cast(prompt_mask, "int"), axis=1) % seq_len
355
+ ix = ops.arange(seq_len, dtype="int")
356
+ ix = ops.expand_dims(ix, axis=0) - ops.expand_dims(shifts, axis=1)
357
+
358
+ # This produces the desired shift (in fact a rollover).
359
+ def roll_sequence(seq):
360
+ return ops.take_along_axis(seq, ix, axis=1)
361
+
362
+ # The shifting rolls the content over so the prompt is at the end of
363
+ # the sequence and the generated text is at the beginning. We mask
364
+ # it to retain the generated text only.
365
+ y["padding_mask"] = ops.logical_xor(
366
+ roll_sequence(prompt_mask), roll_sequence(x["padding_mask"])
367
+ )
368
+ # we assume the mask is enough and there is no need to zero-out the values
369
+ y["token_ids"] = roll_sequence(x["token_ids"])
370
+
371
+ return y
372
+
342
373
  def postprocess(x):
343
374
  return self.preprocessor.generate_postprocess(x)
344
375
 
@@ -347,7 +378,12 @@ class CausalLM(Task):
347
378
 
348
379
  if self.preprocessor is not None:
349
380
  inputs = [preprocess(x) for x in inputs]
350
- outputs = [generate(x) for x in inputs]
381
+
382
+ if strip_prompt:
383
+ outputs = [strip_prompt_function(generate(x), x) for x in inputs]
384
+ else:
385
+ outputs = [generate(x) for x in inputs]
386
+
351
387
  if self.preprocessor is not None:
352
388
  outputs = [postprocess(x) for x in outputs]
353
389
 
@@ -1,7 +1,7 @@
1
1
  """DeepLabV3 preset configurations."""
2
2
 
3
3
  backbone_presets = {
4
- "deeplabv3_plus_resnet50_pascalvoc": {
4
+ "deeplab_v3_plus_resnet50_pascalvoc": {
5
5
  "metadata": {
6
6
  "description": (
7
7
  "DeepLabV3+ model with ResNet50 as image encoder and trained on "
@@ -10,9 +10,9 @@ backbone_presets = {
10
10
  ),
11
11
  "params": 39190656,
12
12
  "official_name": "DeepLabV3",
13
- "path": "deeplabv3",
13
+ "path": "deeplab_v3",
14
14
  "model_card": "https://arxiv.org/abs/1802.02611",
15
15
  },
16
- "kaggle_handle": "kaggle://keras/deeplabv3/keras/deeplabv3_plus_resnet50_pascalvoc/3",
16
+ "kaggle_handle": "kaggle://keras/deeplabv3plus/keras/deeplab_v3_plus_resnet50_pascalvoc/3",
17
17
  },
18
18
  }
@@ -12,7 +12,7 @@ backbone_presets = {
12
12
  "path": "densenet",
13
13
  "model_card": "https://arxiv.org/abs/1608.06993",
14
14
  },
15
- "kaggle_handle": "kaggle://keras/densenet/keras/densenet_121_imagenet/1",
15
+ "kaggle_handle": "kaggle://keras/densenet/keras/densenet_121_imagenet/2",
16
16
  },
17
17
  "densenet_169_imagenet": {
18
18
  "metadata": {
@@ -25,7 +25,7 @@ backbone_presets = {
25
25
  "path": "densenet",
26
26
  "model_card": "https://arxiv.org/abs/1608.06993",
27
27
  },
28
- "kaggle_handle": "kaggle://keras/densenet/keras/densenet_169_imagenet/1",
28
+ "kaggle_handle": "kaggle://keras/densenet/keras/densenet_169_imagenet/2",
29
29
  },
30
30
  "densenet_201_imagenet": {
31
31
  "metadata": {
@@ -38,6 +38,6 @@ backbone_presets = {
38
38
  "path": "densenet",
39
39
  "model_card": "https://arxiv.org/abs/1608.06993",
40
40
  },
41
- "kaggle_handle": "kaggle://keras/densenet/keras/densenet_201_imagenet/1",
41
+ "kaggle_handle": "kaggle://keras/densenet/keras/densenet_201_imagenet/2",
42
42
  },
43
43
  }
@@ -224,7 +224,7 @@ class GemmaBackbone(Backbone):
224
224
 
225
225
  Example:
226
226
  ```
227
- # Feel free to change the mesh shape to balance data and model parallel
227
+ # Feel free to change the mesh shape to balance data and model parallelism
228
228
  mesh = keras.distribution.DeviceMesh(
229
229
  shape=(1, 8), axis_names=('batch', 'model'),
230
230
  devices=keras.distribution.list_devices())
@@ -232,11 +232,19 @@ class GemmaBackbone(Backbone):
232
232
  mesh, model_parallel_dim_name="model")
233
233
 
234
234
  distribution = keras.distribution.ModelParallel(
235
- mesh, layout_map, batch_dim_name='batch')
235
+ layout_map=layout_map, batch_dim_name='batch')
236
236
  with distribution.scope():
237
237
  gemma_model = keras_hub.models.GemmaCausalLM.from_preset()
238
238
  ```
239
239
 
240
+ To see how the layout map was applied, load the model then run (for one decoder block):
241
+ ```
242
+ embedding_layer = gemma_model.backbone.get_layer("token_embedding")
243
+ decoder_block_1 = gemma_model.backbone.get_layer('decoder_block_1')
244
+ for variable in embedding_layer.weights + decoder_block_1.weights:
245
+ print(f'{variable.path:<58} {str(variable.shape):<16} {str(variable.value.sharding.spec)}')
246
+ ```
247
+
240
248
  Args:
241
249
  device_mesh: The `keras.distribution.DeviceMesh` instance for
242
250
  distribution.
@@ -246,7 +254,7 @@ class GemmaBackbone(Backbone):
246
254
  the data should be partition on.
247
255
  Return:
248
256
  `keras.distribution.LayoutMap` that contains the sharding spec
249
- of all the model weights.
257
+ for all the model weights.
250
258
  """
251
259
  # The weight path and shape of the Gemma backbone is like below (for 2G)
252
260
  # token_embedding/embeddings, (256128, 2048), 524550144
@@ -175,3 +175,121 @@ class LlamaBackbone(Backbone):
175
175
  }
176
176
  )
177
177
  return config
178
+
179
+ @staticmethod
180
+ def get_layout_map(
181
+ device_mesh,
182
+ model_parallel_dim_name="model",
183
+ data_parallel_dim_name="batch",
184
+ ):
185
+ """Get a `keras.distribution.LayoutMap` for model parallel distribution.
186
+
187
+ The returned `LayoutMap` contains the sharding spec for the Llama
188
+ backbone weights, so that you can use it to distribute weights across
189
+ the accelerators.
190
+
191
+ Example:
192
+ ```
193
+ # Feel free to change the mesh shape to balance data and model parallelism
194
+ mesh = keras.distribution.DeviceMesh(
195
+ shape=(1, 8),
196
+ axis_names=('batch', 'model'),
197
+ devices=keras.distribution.list_devices(),
198
+ )
199
+ layout_map = LlamaBackbone.get_layout_map(
200
+ mesh,
201
+ model_parallel_dim_name="model",
202
+ )
203
+
204
+ distribution = keras.distribution.ModelParallel(
205
+ layout_map=layout_map,
206
+ batch_dim_name='batch',
207
+ )
208
+
209
+ with distribution.scope():
210
+ llama_model = keras_hub.models.LlamaCausalLM.from_preset()
211
+ ```
212
+
213
+ To see how the layout map was applied, load the model then run (for one decoder block):
214
+ ```
215
+ embedding_layer = llama_model.backbone.get_layer("token_embedding")
216
+ decoder_block_1 = llama_model.backbone.get_layer('transformer_layer_0')
217
+ for variable in embedding_layer.weights + decoder_block_1.weights:
218
+ print(f'{variable.path:<58} {str(variable.shape):<16} {str(variable.value.sharding.spec)}')
219
+ ```
220
+
221
+ Args:
222
+ device_mesh: The `keras.distribution.DeviceMesh` instance for
223
+ distribution.
224
+ model_parallel_dim_name: The axis name of the device mesh, where
225
+ the weights should be partition on.
226
+ data_parallel_dim_name: The axis name of the device mesh, where
227
+ the data should be partition on.
228
+ Return:
229
+ `keras.distribution.LayoutMap` that contains the sharding spec
230
+ for all the model weights.
231
+ """
232
+ # The weight path and shape of the Llama backbone is like below
233
+ # token_embedding/embeddings (128256, 2048)
234
+ # repeat block for decoder
235
+ # transformer_layer_0/self_attention/query/kernel (2048, 32, 64)
236
+ # transformer_layer_0/self_attention/key/kernel (2048, 8, 64)
237
+ # transformer_layer_0/self_attention/value/kernel (2048, 8, 64)
238
+ # transformer_layer_0/self_attention/attention_output/kernel (32, 64, 2048)
239
+ # transformer_layer_0/self_attention_layernorm/scale (2048,)
240
+ # transformer_layer_0/feedforward_intermediate_dense/kernel (2048, 8192)
241
+ # transformer_layer_0/feedforward_gate_dense/kernel (2048, 8192)
242
+ # transformer_layer_0/feedforward_output_dense/kernel (8192, 2048)
243
+ # transformer_layer_0/feedforward_layernorm/scale (2048,)
244
+
245
+ if not isinstance(device_mesh, keras.distribution.DeviceMesh):
246
+ raise ValueError(
247
+ "Invalid device_mesh type. Expected `keras.distribution.Device`,"
248
+ f" got {type(device_mesh)}"
249
+ )
250
+ if model_parallel_dim_name not in device_mesh.axis_names:
251
+ raise ValueError(
252
+ f"{model_parallel_dim_name} is not found in the "
253
+ f"device_mesh.axis_names. {device_mesh.axis_name=}"
254
+ )
255
+ if data_parallel_dim_name not in device_mesh.axis_names:
256
+ raise ValueError(
257
+ f"{data_parallel_dim_name} is not found in the "
258
+ f"device_mesh.axis_names. {device_mesh.axis_name=}"
259
+ )
260
+ # Note that it is possible to further config the mesh to be 3D, eg
261
+ # (data, seq, model). We leave it as 2D for now for simplicity.
262
+ data_dim = data_parallel_dim_name
263
+ model_dim = model_parallel_dim_name
264
+ # The sharding config is based on the Gemma team training config.
265
+ # See https://arxiv.org/abs/2403.08295
266
+ layout_map = keras.distribution.LayoutMap(device_mesh)
267
+ layout_map["token_embedding/embeddings"] = (model_dim, data_dim)
268
+ layout_map[
269
+ "transformer_layer.*self_attention.*(query|key|value).kernel"
270
+ ] = (
271
+ model_dim,
272
+ data_dim,
273
+ None,
274
+ )
275
+ layout_map["transformer_layer.*attention_output.kernel"] = (
276
+ model_dim,
277
+ None,
278
+ data_dim,
279
+ )
280
+ layout_map[
281
+ "transformer_layer.*feedforward_intermediate_dense.kernel"
282
+ ] = (
283
+ data_dim,
284
+ model_dim,
285
+ )
286
+ layout_map["transformer_layer.*feedforward_gate_dense.kernel"] = (
287
+ data_dim,
288
+ model_dim,
289
+ )
290
+ layout_map["transformer_layer.*feedforward_output_dense.kernel"] = (
291
+ model_dim,
292
+ data_dim,
293
+ )
294
+
295
+ return layout_map
@@ -42,7 +42,9 @@ class LlamaCausalLM(CausalLM):
42
42
  self.preprocessor = preprocessor
43
43
 
44
44
  # === Functional Model ===
45
- inputs = backbone.inputs
45
+ # This must be "backbone.input" i.e. the full input structure,
46
+ # rather than "backbone.inputs" which is the flattened list of inputs.
47
+ inputs = backbone.input
46
48
  hidden_states = backbone(inputs)
47
49
  outputs = backbone.token_embedding(hidden_states, reverse=True)
48
50
  super().__init__(
@@ -42,7 +42,9 @@ class MistralCausalLM(CausalLM):
42
42
  self.preprocessor = preprocessor
43
43
 
44
44
  # === Functional Model ===
45
- inputs = backbone.inputs
45
+ # This must be "backbone.input" i.e. the full input structure,
46
+ # rather than "backbone.inputs" which is the flattened list of inputs.
47
+ inputs = backbone.input
46
48
  hidden_states = backbone(inputs)
47
49
  outputs = backbone.token_embedding(hidden_states, reverse=True)
48
50
  super().__init__(
@@ -61,8 +61,6 @@ class PaliGemmaBackbone(Backbone):
61
61
  vit_classifier_activation: activation function. The activation that
62
62
  is used for final output classification in the vision transformer.
63
63
  vit_name: string. The name used for vision transformer layers.
64
- include_rescaling: bool. If true, the image input will be rescaled from
65
- the range `[0, 255]`, to the range `[0, 1]`.
66
64
  layer_norm_epsilon: float. The epsilon value user for every layer norm
67
65
  in all transformer blocks.
68
66
  dropout: float. Dropout probability for the Transformer decoder blocks.
@@ -121,7 +119,6 @@ class PaliGemmaBackbone(Backbone):
121
119
  vit_pooling=None,
122
120
  vit_classifier_activation=None,
123
121
  vit_name=None,
124
- include_rescaling=True,
125
122
  layer_norm_epsilon=1e-6,
126
123
  dropout=0,
127
124
  dtype=None,
@@ -145,7 +142,6 @@ class PaliGemmaBackbone(Backbone):
145
142
  vit_intermediate_dim = vit_intermediate_dim or 4304
146
143
  self.vit_encoder = PaliGemmaVit(
147
144
  image_size=image_size,
148
- include_rescaling=include_rescaling,
149
145
  patch_size=vit_patch_size,
150
146
  num_heads=vit_num_heads,
151
147
  hidden_dim=vit_hidden_dim,
@@ -215,7 +211,6 @@ class PaliGemmaBackbone(Backbone):
215
211
  # === Config ===
216
212
  self.vocabulary_size = vocabulary_size
217
213
  self.image_size = image_size
218
- self.include_rescaling = include_rescaling
219
214
  self.num_layers = num_layers
220
215
  self.num_query_heads = num_query_heads
221
216
  self.num_key_value_heads = num_key_value_heads
@@ -242,7 +237,6 @@ class PaliGemmaBackbone(Backbone):
242
237
  {
243
238
  "vocabulary_size": self.vocabulary_size,
244
239
  "image_size": self.image_size,
245
- "include_rescaling": self.include_rescaling,
246
240
  "num_layers": self.num_layers,
247
241
  "num_query_heads": self.num_query_heads,
248
242
  "num_key_value_heads": self.num_key_value_heads,
@@ -110,7 +110,9 @@ class PaliGemmaCausalLM(CausalLM):
110
110
  self.backbone = backbone
111
111
 
112
112
  # === Functional Model ===
113
- inputs = backbone.inputs
113
+ # This must be "backbone.input" i.e. the full input structure,
114
+ # rather than "backbone.inputs" which is the flattened list of inputs.
115
+ inputs = backbone.input
114
116
  hidden_state = backbone(inputs=inputs)
115
117
  outputs = backbone.token_embedding(hidden_state, reverse=True)
116
118
  outputs = outputs[:, backbone.image_sequence_length :, :]
@@ -12,7 +12,7 @@ backbone_presets = {
12
12
  "path": "pali_gemma",
13
13
  "model_card": "https://www.kaggle.com/models/google/paligemma",
14
14
  },
15
- "kaggle_handle": "kaggle://keras/paligemma/keras/pali_gemma_3b_mix_224/2",
15
+ "kaggle_handle": "kaggle://keras/paligemma/keras/pali_gemma_3b_mix_224/3",
16
16
  },
17
17
  "pali_gemma_3b_mix_448": {
18
18
  "metadata": {
@@ -24,7 +24,7 @@ backbone_presets = {
24
24
  "path": "pali_gemma",
25
25
  "model_card": "https://www.kaggle.com/models/google/paligemma",
26
26
  },
27
- "kaggle_handle": "kaggle://keras/paligemma/keras/pali_gemma_3b_mix_448/2",
27
+ "kaggle_handle": "kaggle://keras/paligemma/keras/pali_gemma_3b_mix_448/3",
28
28
  },
29
29
  "pali_gemma_3b_224": {
30
30
  "metadata": {
@@ -36,7 +36,7 @@ backbone_presets = {
36
36
  "path": "pali_gemma",
37
37
  "model_card": "https://www.kaggle.com/models/google/paligemma",
38
38
  },
39
- "kaggle_handle": "kaggle://keras/paligemma/keras/pali_gemma_3b_224/2",
39
+ "kaggle_handle": "kaggle://keras/paligemma/keras/pali_gemma_3b_224/3",
40
40
  },
41
41
  "pali_gemma_3b_448": {
42
42
  "metadata": {
@@ -48,7 +48,7 @@ backbone_presets = {
48
48
  "path": "pali_gemma",
49
49
  "model_card": "https://www.kaggle.com/models/google/paligemma",
50
50
  },
51
- "kaggle_handle": "kaggle://keras/paligemma/keras/pali_gemma_3b_448/2",
51
+ "kaggle_handle": "kaggle://keras/paligemma/keras/pali_gemma_3b_448/3",
52
52
  },
53
53
  "pali_gemma_3b_896": {
54
54
  "metadata": {
@@ -60,6 +60,6 @@ backbone_presets = {
60
60
  "path": "pali_gemma",
61
61
  "model_card": "https://www.kaggle.com/models/google/paligemma",
62
62
  },
63
- "kaggle_handle": "kaggle://keras/paligemma/keras/pali_gemma_3b_896/2",
63
+ "kaggle_handle": "kaggle://keras/paligemma/keras/pali_gemma_3b_896/3",
64
64
  },
65
65
  }
@@ -410,8 +410,6 @@ class PaliGemmaVit(keras.Model):
410
410
  Args:
411
411
  image_size: int. The height/width of the image. Both height and width is
412
412
  expected to be the same.
413
- include_rescaling: bool. If true, the image input will be rescaled from
414
- the range `[0, 255]`, to the range `[0, 1]`.
415
413
  patch_size: int. The size of each square patch in the input image.
416
414
  num_heads: int. The number of attention heads for the vision(image)
417
415
  transformer encoder.
@@ -452,7 +450,6 @@ class PaliGemmaVit(keras.Model):
452
450
  num_layers,
453
451
  intermediate_dim,
454
452
  num_classes,
455
- include_rescaling=True,
456
453
  pooling=None,
457
454
  classifier_activation=None,
458
455
  dtype=None,
@@ -463,14 +460,6 @@ class PaliGemmaVit(keras.Model):
463
460
  shape=(image_size, image_size, 3), name="images"
464
461
  )
465
462
  x = image_input # Intermediate result.
466
- # TODO we have moved this rescaling to preprocessing layers for most
467
- # models. We should consider removing it here, though it would break
468
- # compatibility.
469
- if include_rescaling:
470
- rescaling = keras.layers.Rescaling(
471
- scale=1.0 / 127.5, offset=-1.0, name="rescaling"
472
- )
473
- x = rescaling(image_input)
474
463
  x = PaliGemmaVitEncoder(
475
464
  hidden_dim=hidden_dim,
476
465
  num_layers=num_layers,
@@ -520,7 +509,6 @@ class PaliGemmaVit(keras.Model):
520
509
  self.pooling = pooling
521
510
  self.num_classes = num_classes
522
511
  self.image_size = image_size
523
- self.include_rescaling = include_rescaling
524
512
  self.patch_size = patch_size
525
513
  self.classifier_activation = keras.activations.get(
526
514
  classifier_activation
@@ -549,7 +537,6 @@ class PaliGemmaVit(keras.Model):
549
537
  self.classifier_activation
550
538
  ),
551
539
  "image_size": self.image_size,
552
- "include_rescaling": self.include_rescaling,
553
540
  "patch_size": self.patch_size,
554
541
  }
555
542
  )
@@ -41,7 +41,9 @@ class Phi3CausalLM(CausalLM):
41
41
  self.preprocessor = preprocessor
42
42
 
43
43
  # === Functional Model ===
44
- inputs = backbone.inputs
44
+ # This must be "backbone.input" i.e. the full input structure,
45
+ # rather than "backbone.inputs" which is the flattened list of inputs.
46
+ inputs = backbone.input
45
47
  hidden_states = backbone(inputs)
46
48
  outputs = backbone.token_embedding(hidden_states, reverse=True)
47
49
  super().__init__(
@@ -71,6 +71,22 @@ class Preprocessor(PreprocessingLayer):
71
71
  def image_converter(self, value):
72
72
  self._image_converter = value
73
73
 
74
+ @property
75
+ def image_size(self):
76
+ """Shortcut to get/set the image size of the image converter."""
77
+ if self.image_converter is None:
78
+ return None
79
+ return self.image_converter.image_size
80
+
81
+ @image_size.setter
82
+ def image_size(self, value):
83
+ if self.image_converter is None:
84
+ raise ValueError(
85
+ "Cannot set `image_size` on preprocessor if `image_converter` "
86
+ " is `None`."
87
+ )
88
+ self.image_converter.image_size = value
89
+
74
90
  def get_config(self):
75
91
  config = super().get_config()
76
92
  if self.tokenizer:
@@ -12,7 +12,7 @@ backbone_presets = {
12
12
  "path": "resnet",
13
13
  "model_card": "https://arxiv.org/abs/2110.00476",
14
14
  },
15
- "kaggle_handle": "kaggle://keras/resnetv1/keras/resnet_18_imagenet/1",
15
+ "kaggle_handle": "kaggle://keras/resnetv1/keras/resnet_18_imagenet/2",
16
16
  },
17
17
  "resnet_50_imagenet": {
18
18
  "metadata": {
@@ -25,7 +25,7 @@ backbone_presets = {
25
25
  "path": "resnet",
26
26
  "model_card": "https://arxiv.org/abs/2110.00476",
27
27
  },
28
- "kaggle_handle": "kaggle://keras/resnetv1/keras/resnet_50_imagenet/1",
28
+ "kaggle_handle": "kaggle://keras/resnetv1/keras/resnet_50_imagenet/2",
29
29
  },
30
30
  "resnet_101_imagenet": {
31
31
  "metadata": {
@@ -38,7 +38,7 @@ backbone_presets = {
38
38
  "path": "resnet",
39
39
  "model_card": "https://arxiv.org/abs/2110.00476",
40
40
  },
41
- "kaggle_handle": "kaggle://keras/resnetv1/keras/resnet_101_imagenet/1",
41
+ "kaggle_handle": "kaggle://keras/resnetv1/keras/resnet_101_imagenet/2",
42
42
  },
43
43
  "resnet_152_imagenet": {
44
44
  "metadata": {
@@ -51,7 +51,7 @@ backbone_presets = {
51
51
  "path": "resnet",
52
52
  "model_card": "https://arxiv.org/abs/2110.00476",
53
53
  },
54
- "kaggle_handle": "kaggle://keras/resnetv1/keras/resnet_152_imagenet/1",
54
+ "kaggle_handle": "kaggle://keras/resnetv1/keras/resnet_152_imagenet/2",
55
55
  },
56
56
  "resnet_v2_50_imagenet": {
57
57
  "metadata": {
@@ -64,7 +64,7 @@ backbone_presets = {
64
64
  "path": "resnet",
65
65
  "model_card": "https://arxiv.org/abs/2110.00476",
66
66
  },
67
- "kaggle_handle": "kaggle://keras/resnetv2/keras/resnet_v2_50_imagenet/1",
67
+ "kaggle_handle": "kaggle://keras/resnetv2/keras/resnet_v2_50_imagenet/2",
68
68
  },
69
69
  "resnet_v2_101_imagenet": {
70
70
  "metadata": {
@@ -77,6 +77,6 @@ backbone_presets = {
77
77
  "path": "resnet",
78
78
  "model_card": "https://arxiv.org/abs/2110.00476",
79
79
  },
80
- "kaggle_handle": "kaggle://keras/resnetv2/keras/resnet_v2_101_imagenet/1",
80
+ "kaggle_handle": "kaggle://keras/resnetv2/keras/resnet_v2_101_imagenet/2",
81
81
  },
82
82
  }
@@ -280,7 +280,7 @@ class Task(PipelineModel):
280
280
 
281
281
  def highlight_number(x):
282
282
  if x is None:
283
- f"[color(45)]{x}[/]"
283
+ return f"[color(45)]{x}[/]"
284
284
  return f"[color(34)]{x:,}[/]" # Format number with commas.
285
285
 
286
286
  def highlight_symbol(x):
@@ -339,7 +339,10 @@ class Task(PipelineModel):
339
339
  add_layer(layer, info)
340
340
  elif isinstance(layer, ImageConverter):
341
341
  info = "Image size: "
342
- info += highlight_shape(layer.image_size)
342
+ image_size = layer.image_size
343
+ if image_size is None:
344
+ image_size = (None, None)
345
+ info += highlight_shape(image_size)
343
346
  add_layer(layer, info)
344
347
  elif isinstance(layer, AudioConverter):
345
348
  info = "Audio shape: "
@@ -569,6 +569,15 @@ class TestCase(tf.test.TestCase, parameterized.TestCase):
569
569
  ds = tf.data.Dataset.from_tensor_slices(train_data).batch(batch_size)
570
570
  x, y, sw = keras.utils.unpack_x_y_sample_weight(train_data)
571
571
 
572
+ # Test: the tree struct output by the
573
+ # preprocessor must match what model expects.
574
+ preprocessed_data = preprocessor(*train_data)[0]
575
+ tree.assert_same_structure(
576
+ preprocessed_data,
577
+ task._inputs_struct,
578
+ check_types=False,
579
+ )
580
+
572
581
  # Test predict.
573
582
  output = task.predict(x)
574
583
  if expected_output_shape is not None:
@@ -53,10 +53,11 @@ class TimmPresetLoader(PresetLoader):
53
53
 
54
54
  def load_image_converter(self, cls, **kwargs):
55
55
  pretrained_cfg = self.config.get("pretrained_cfg", None)
56
- if not pretrained_cfg:
56
+ if not pretrained_cfg or "input_size" not in pretrained_cfg:
57
57
  return None
58
58
  # This assumes the same basic setup for all timm preprocessing, We may
59
59
  # need to extend this as we cover more model types.
60
+ input_size = pretrained_cfg["input_size"]
60
61
  mean = pretrained_cfg["mean"]
61
62
  std = pretrained_cfg["std"]
62
63
  scale = [1.0 / 255.0 / s for s in std]
@@ -65,6 +66,7 @@ class TimmPresetLoader(PresetLoader):
65
66
  if interpolation not in ("bilinear", "nearest", "bicubic"):
66
67
  interpolation = "bilinear" # Unsupported interpolation type.
67
68
  return cls(
69
+ image_size=input_size[1:],
68
70
  scale=scale,
69
71
  offset=offset,
70
72
  interpolation=interpolation,
@@ -1,7 +1,7 @@
1
1
  from keras_hub.src.api_export import keras_hub_export
2
2
 
3
3
  # Unique source of truth for the version number.
4
- __version__ = "0.16.1.dev202410160341"
4
+ __version__ = "0.16.1.dev202410180341"
5
5
 
6
6
 
7
7
  @keras_hub_export("keras_hub.version")
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: keras-hub-nightly
3
- Version: 0.16.1.dev202410160341
3
+ Version: 0.16.1.dev202410180341
4
4
  Summary: Industry-strength Natural Language Processing extensions for Keras.
5
5
  Home-page: https://github.com/keras-team/keras-hub
6
6
  Author: Keras team
@@ -9,7 +9,7 @@ keras_hub/api/tokenizers/__init__.py,sha256=_f-r_cyUM2fjBB7iO84ThOdqqsAxHNIewJ2E
9
9
  keras_hub/api/utils/__init__.py,sha256=Gp1E6gG-RtKQS3PBEQEOz9PQvXkXaJ0ySGMqZ7myN7A,215
10
10
  keras_hub/src/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
11
11
  keras_hub/src/api_export.py,sha256=9pQZK27JObxWZ96QPLBp1OBsjWigh1iuV6RglPGMRk0,1499
12
- keras_hub/src/version_utils.py,sha256=fxvdcthdquC4Lv7XrXXJHPaBhpk1Vu7DIa8GgHGnOhE,222
12
+ keras_hub/src/version_utils.py,sha256=u4lRruStAMtT-Vd1NqG2WDfk_hI_aY5rRtlyOLK2wBo,222
13
13
  keras_hub/src/bounding_box/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
14
14
  keras_hub/src/bounding_box/converters.py,sha256=a5po8DBm87oz2EXfi-0uEZHCMlCJPIb4-MaZIdYx3Dg,17865
15
15
  keras_hub/src/bounding_box/formats.py,sha256=YmskOz2BOSat7NaE__J9VfpSNGPJJR0znSzA4lp8MMI,3868
@@ -50,7 +50,7 @@ keras_hub/src/metrics/rouge_l.py,sha256=JlZhMBV6wS_6zMd57pkTc6yxHkEJT9fVQMlPZKek
50
50
  keras_hub/src/metrics/rouge_n.py,sha256=JoFtmgjF4Ic263ny6bfD6vMHKreH9le3HnOOxemupRc,3620
51
51
  keras_hub/src/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
52
52
  keras_hub/src/models/backbone.py,sha256=2OZx6WAx2q9JK2yue5BoUUipIBjpOJRVNnMjXLVDLRk,11185
53
- keras_hub/src/models/causal_lm.py,sha256=zGUamLuL2HlTgummUhfnA8Uoe4QMsGGLD4uJazxJe-Y,15079
53
+ keras_hub/src/models/causal_lm.py,sha256=0Asl5v86jm4PnqCoQv7u4Sy8NfMoUvrQxnxQSTQLKog,16721
54
54
  keras_hub/src/models/causal_lm_preprocessor.py,sha256=YY7VJZicdmnjDSWi9g4_pEpd5bdJK166GlWcapvokF0,6663
55
55
  keras_hub/src/models/feature_pyramid_backbone.py,sha256=clEW-TTQSVJ_5qFNdDF0iABkin1p_xlBUFjJrC7T0IA,2247
56
56
  keras_hub/src/models/image_classifier.py,sha256=yt6cjhPfqs8A_eWXBsXdXFzn-aRgH2rVHUq7Zu7CyK8,7804
@@ -61,10 +61,10 @@ keras_hub/src/models/image_to_image.py,sha256=z2TfFh9DiaEj9u6hEY8May3B0etxhptttg
61
61
  keras_hub/src/models/inpaint.py,sha256=8TTusRRS7ntPoAd0BsuhEZjedtaoljI4ZbgKQ_bnF34,20411
62
62
  keras_hub/src/models/masked_lm.py,sha256=uXO_dE_hILlOC9jNr6oK6IHi9IGUqLyNGvr6nMt8Rk0,3576
63
63
  keras_hub/src/models/masked_lm_preprocessor.py,sha256=g8vrnyYwqdnSw5xppROM1Gzo_jmMWKYZoQCsKdfrFKk,5656
64
- keras_hub/src/models/preprocessor.py,sha256=3CWLsMpQC77w7GzM3fU3Jf-G62ldJjufKyzPVvnGdeI,7970
64
+ keras_hub/src/models/preprocessor.py,sha256=KqUJrF24h_6h2CnkuyneqOioCa1Sd3ZA0qzq3BdLqUA,8496
65
65
  keras_hub/src/models/seq_2_seq_lm.py,sha256=w0gX-5YZjatfvAJmFAgSHyqS_BLqc8FF8DPLGK8mrgI,1864
66
66
  keras_hub/src/models/seq_2_seq_lm_preprocessor.py,sha256=HUHRbWRG5SF1pPpotGzBhXlrMh4pLFxgAoFk05FIrB4,9687
67
- keras_hub/src/models/task.py,sha256=VN-CClNw3EB5Byb7HyyI3CqaS140od7-dmQInmYFSKg,14414
67
+ keras_hub/src/models/task.py,sha256=06ISrWbn7ab-H1uszIPogpt6PuM90xiXKvwrAIEsC-o,14570
68
68
  keras_hub/src/models/text_classifier.py,sha256=VBDvQUHTpJPqKp7A4VAtm35FOmJ3yMo0DW6GdX67xG0,4159
69
69
  keras_hub/src/models/text_classifier_preprocessor.py,sha256=EoWp-GHnaLnAKTdAzDmC-soAV92ATF3QozdubdV2WXI,4722
70
70
  keras_hub/src/models/text_to_image.py,sha256=7s6rB1To46A7l9ItqRw3Pe4DGRm7YnqbHJ-RyNAlLPE,12973
@@ -122,14 +122,14 @@ keras_hub/src/models/deeplab_v3/deeplab_v3_backbone.py,sha256=WyFhuLcjFPFVuNL09b
122
122
  keras_hub/src/models/deeplab_v3/deeplab_v3_image_converter.py,sha256=mRkH3HdhpV0fCcQcVXEvIX7SNk-bAMb3SAHzgK-FD5c,371
123
123
  keras_hub/src/models/deeplab_v3/deeplab_v3_image_segmeter_preprocessor.py,sha256=hR9S6lNYamY0EBDBo3e1qTCiwtftmLXrN-UYuzfw5Io,581
124
124
  keras_hub/src/models/deeplab_v3/deeplab_v3_layers.py,sha256=qmEiolOOriLAojXB67xXW9IOo717kaCGeDVZJLaGY98,7834
125
- keras_hub/src/models/deeplab_v3/deeplab_v3_presets.py,sha256=tfTOz0H_XX1o-0oM7O3j7OyKxPDIesrV8FMO4IfbbBk,702
125
+ keras_hub/src/models/deeplab_v3/deeplab_v3_presets.py,sha256=jdSycE_H2Dm1z2WHYu0WtpEJBMiAoioHgJL1gMEGLDI,709
126
126
  keras_hub/src/models/deeplab_v3/deeplab_v3_segmenter.py,sha256=tiMDcCFANHMUx3IVtW3r1P_JTazgPPsbW4IktIytKEU,3650
127
127
  keras_hub/src/models/densenet/__init__.py,sha256=r7StyamnWeeZxOk9r4ZYNbS_YVhu9YGPyXhNxljvdPg,269
128
128
  keras_hub/src/models/densenet/densenet_backbone.py,sha256=dN9lUwKzO3E2HthNV2x54ozeBEQ0ilNs5uYHshFQpT0,6723
129
129
  keras_hub/src/models/densenet/densenet_image_classifier.py,sha256=ptuV6PwgoUpmrSPqX7-a85IpWsElwcCv_G5IVkP9E_Q,530
130
130
  keras_hub/src/models/densenet/densenet_image_classifier_preprocessor.py,sha256=xDZbTw_h6pjLDzf8QmbDyMnMsFzgh-dPX1ldg9kddhg,563
131
131
  keras_hub/src/models/densenet/densenet_image_converter.py,sha256=DoxYlJVZ9uaabFhVjWOmzvhONoc8KNcQj2vQ6Z1AUpU,354
132
- keras_hub/src/models/densenet/densenet_presets.py,sha256=2emOQuwcWo2i1MhvXwi081S55Z7vHkQ0r6h6Z9Yn68Q,1531
132
+ keras_hub/src/models/densenet/densenet_presets.py,sha256=QoluKQJnV391K6hoIX5X8UquD8f647u_8Ygta-UxmwE,1531
133
133
  keras_hub/src/models/distil_bert/__init__.py,sha256=3Z0w-Mt3aOR0u9RGzjHQ7B3J3qBF2pGjupDGQ9yyzoc,303
134
134
  keras_hub/src/models/distil_bert/distil_bert_backbone.py,sha256=rnAf_GokB3wAeJwVZtgUKQO_bKJIa8RavhL_ykTJpNw,6440
135
135
  keras_hub/src/models/distil_bert/distil_bert_masked_lm.py,sha256=L0DvOl01MIwqc2f6H_E8si9qVUXPd0OKknJ5Rha33TA,4275
@@ -164,7 +164,7 @@ keras_hub/src/models/falcon/falcon_tokenizer.py,sha256=2B5vmpakj_iVB7T95_8OVreJb
164
164
  keras_hub/src/models/falcon/falcon_transformer_decoder.py,sha256=QqIK6v97uBXZFBG3qS6O8HrP9_93uOFzvHQgOiMO2eY,8125
165
165
  keras_hub/src/models/gemma/__init__.py,sha256=rVzOJMJ39bgVlT8UdC0t8PlN2c237GKTBmfHIsbPuOQ,251
166
166
  keras_hub/src/models/gemma/gemma_attention.py,sha256=1CVN5z9GKoU8TuNMih2_MweDkpd98xSqdic9F8xIBE8,8317
167
- keras_hub/src/models/gemma/gemma_backbone.py,sha256=EttzmZHWXWl95__09reVFJxT__MtUSUtJAw15-Dao28,12914
167
+ keras_hub/src/models/gemma/gemma_backbone.py,sha256=P5srrrqIrFIBF84KCKKl9vKyYiq0CxjhdcVk76PKVTQ,13377
168
168
  keras_hub/src/models/gemma/gemma_causal_lm.py,sha256=BNBoQIf0HoqCooalYsWE-28v5BGUNvL9YdUB8_NSkBU,16770
169
169
  keras_hub/src/models/gemma/gemma_causal_lm_preprocessor.py,sha256=bpKkEurWIfa6Kp9s4pz84-sBDSA6ZFNHP8nXG1fFQrg,2912
170
170
  keras_hub/src/models/gemma/gemma_decoder_block.py,sha256=tpBfH86Q48EvV0COkd1g2FJg9zHp7ktZBjegs3ehOYo,7588
@@ -187,8 +187,8 @@ keras_hub/src/models/gpt_neo_x/gpt_neo_x_decoder.py,sha256=xSLDgavOhhm3SZc18VN60
187
187
  keras_hub/src/models/gpt_neo_x/gpt_neo_x_tokenizer.py,sha256=aKso-8yGrynn3tZ5xm2egcXIBQo3__sWZDBtjmS3ZgU,1991
188
188
  keras_hub/src/models/llama/__init__.py,sha256=svVZjGi71R3lVbq0AdbqlXj909mr3Rp9EPXdiO0w0G0,251
189
189
  keras_hub/src/models/llama/llama_attention.py,sha256=HzTWtvTjfN_j0vA9-ComstHpI81tzUrJU3RSSvSCaI4,7194
190
- keras_hub/src/models/llama/llama_backbone.py,sha256=6tkTvAwhFZjnHFIzQbUYlgByMt2qQE2F3sfBluVhON0,6703
191
- keras_hub/src/models/llama/llama_causal_lm.py,sha256=JyTiCt1mxvf6QNxhjCjAW-aopTL4teS1EHTb_K-RGrs,13109
190
+ keras_hub/src/models/llama/llama_backbone.py,sha256=ElMjhfyTwXcChQPcrKo3bZozeRhzGyCXqOWA_siQFj8,11687
191
+ keras_hub/src/models/llama/llama_causal_lm.py,sha256=9bP4-XDCMgsZuH1ILIMzmwq2Fyy6vkk1Vsht-lMGCNo,13258
192
192
  keras_hub/src/models/llama/llama_causal_lm_preprocessor.py,sha256=VTboOMiRBoxHrwP343upLUTsv3AG65r2H8h_PNPVphE,3047
193
193
  keras_hub/src/models/llama/llama_decoder.py,sha256=6iERIblED0ZB5w_EUlHks4UvMnsrWONdO_Xdz2OzhWM,8623
194
194
  keras_hub/src/models/llama/llama_layernorm.py,sha256=LfRbePHUJs00Ptf7dvNaw3Aj9n1xBMBpE_rS5zzsYMo,1050
@@ -203,7 +203,7 @@ keras_hub/src/models/llama3/llama3_tokenizer.py,sha256=J-KxRc08vGs4olFw_4mtJs0W_
203
203
  keras_hub/src/models/mistral/__init__.py,sha256=vjBlzcrIsFSwJKnfwfTNMKstIEKGFTE3kVcdAdfwlnE,263
204
204
  keras_hub/src/models/mistral/mistral_attention.py,sha256=HCkUIc2DVIlYC5hhwomENlqLOsKTvbCKF0lx0_OBAyA,7862
205
205
  keras_hub/src/models/mistral/mistral_backbone.py,sha256=x4BfyfWTCUXcjPSxdPSl8QITXgzUg1oJlAQt2acZfv4,7245
206
- keras_hub/src/models/mistral/mistral_causal_lm.py,sha256=OQ3IbdkVlNIXsByZ5ClJoCs0PA86AbMeG6UESbnlfE8,13085
206
+ keras_hub/src/models/mistral/mistral_causal_lm.py,sha256=gEGUnB6yOib9G71n5Em5X8TPOllJW53UXlUCNJkL_ZU,13234
207
207
  keras_hub/src/models/mistral/mistral_causal_lm_preprocessor.py,sha256=_4qq-uKktfIg_i081ZWjZGEIYZpedBwtBGpchQQ-qEk,3079
208
208
  keras_hub/src/models/mistral/mistral_layer_norm.py,sha256=nimMZ5CTPK8v9eflfrGuzqmv-2vd2rGlPvcHOMwYZyg,1063
209
209
  keras_hub/src/models/mistral/mistral_presets.py,sha256=gucgdaFAiU-vRDS1g9zWGHjbDF_jaCiljPibCF4yVqY,1329
@@ -226,18 +226,18 @@ keras_hub/src/models/opt/opt_causal_lm_preprocessor.py,sha256=xHfslVMOZlAIj2V2jI
226
226
  keras_hub/src/models/opt/opt_presets.py,sha256=J1IJ5VRcZZ6UZJSLrxpbWXw39YmbRd_WQujX1a6dxHo,2329
227
227
  keras_hub/src/models/opt/opt_tokenizer.py,sha256=oDHeed4xf07tm14hj_C78BkzMuuRwRP2cRHmqYnObrs,2557
228
228
  keras_hub/src/models/pali_gemma/__init__.py,sha256=uODWTlttOOchcTLpiYHCEWMXnDxIz8ZVIeYFQN2bd8o,288
229
- keras_hub/src/models/pali_gemma/pali_gemma_backbone.py,sha256=srZyBsA5tulO_Fb03g9FE-vaw2j9ftfxnAy4P8cYB6o,10916
230
- keras_hub/src/models/pali_gemma/pali_gemma_causal_lm.py,sha256=qlcBnFtPgKIRtdHgA4rrhiktBJq4h_uV-HriuuRBVwc,11196
229
+ keras_hub/src/models/pali_gemma/pali_gemma_backbone.py,sha256=Rrl7nof_gAZL2Nge1cFymCsRdwqxQjwmEEhucGspUr0,10586
230
+ keras_hub/src/models/pali_gemma/pali_gemma_causal_lm.py,sha256=AViEs6YltUqWnIVo7J02JkXcanBgLSdwZwF56TVr8gc,11345
231
231
  keras_hub/src/models/pali_gemma/pali_gemma_causal_lm_preprocessor.py,sha256=F57y0fZ0wYYxfGIjfrJc1W9uQpViYFx5bvFjj5CqUbI,4814
232
232
  keras_hub/src/models/pali_gemma/pali_gemma_decoder_block.py,sha256=Q_sPAULiSo_ZJeXklZjCLhvOMXk8MrPZhEXtL5yNOiI,5175
233
233
  keras_hub/src/models/pali_gemma/pali_gemma_image_converter.py,sha256=5yM_jUtrFsWIieiwfFBoP7mtPmQAwywkeLKbd7fhmzk,371
234
- keras_hub/src/models/pali_gemma/pali_gemma_presets.py,sha256=4D6qfWuxJtY-tyo31gxAaUlhV6wF7BhL1_FgiPmTQT0,2401
234
+ keras_hub/src/models/pali_gemma/pali_gemma_presets.py,sha256=yLLuPwhIDE7HuMNJwLw1_yhHGz3w3mvYCxVcgAtSydc,2401
235
235
  keras_hub/src/models/pali_gemma/pali_gemma_tokenizer.py,sha256=ljTiADHo0Ok88q-jVzwJIle2C8xcxnudLTsBLzIySaM,2415
236
- keras_hub/src/models/pali_gemma/pali_gemma_vit.py,sha256=wP1UtW0WnlRmga-JQRxWTfAZNt_q-vaF1Qy4siJDpyY,18685
236
+ keras_hub/src/models/pali_gemma/pali_gemma_vit.py,sha256=UpmymNkwuN9iuTV2I4M6lvHnlqpZIDmPb5pAADKs-Vg,18029
237
237
  keras_hub/src/models/phi3/__init__.py,sha256=zIbf1MU-ks91mEkjTRJAsk51N3BBnXDF2JM1vO-13PQ,245
238
238
  keras_hub/src/models/phi3/phi3_attention.py,sha256=dN8QwwTP9TxPBDv0MCvObLF3nHm1H6xbYr3T1K0nmg8,9243
239
239
  keras_hub/src/models/phi3/phi3_backbone.py,sha256=fY-OY2ZrqxDHglYjTM0OCacBdEQHwj-XNmU0MnXL7iU,8885
240
- keras_hub/src/models/phi3/phi3_causal_lm.py,sha256=a1TVyDgEv3Sd66Cf7xfa28dESGrUX1bM7pHAw1QfTaw,8240
240
+ keras_hub/src/models/phi3/phi3_causal_lm.py,sha256=kMMq7fQ8hlb_mLO_nU1lGVqILayulVvzzZgl2EvY9_k,8389
241
241
  keras_hub/src/models/phi3/phi3_causal_lm_preprocessor.py,sha256=gNx1k-n7d0XDwpNbcZiO9yLkwdXYCvwGyA3b0QCnPAE,3043
242
242
  keras_hub/src/models/phi3/phi3_decoder.py,sha256=1raVexz1TkpqvMwW1Zs08KSxTs9gDc6VWUKJ9sM1VFY,9587
243
243
  keras_hub/src/models/phi3/phi3_layernorm.py,sha256=Oqu81tGd97Lzx3kG1QEtZ0S6gbfn3GLgRzY8UWGJRBo,1049
@@ -249,7 +249,7 @@ keras_hub/src/models/resnet/resnet_backbone.py,sha256=mqVdGUj8YtjZ3zIhAQXgNqu8Sq
249
249
  keras_hub/src/models/resnet/resnet_image_classifier.py,sha256=nf35EKDzvBkfhHsK-s6Ks0nbhvKO7HEOYZm94YckyWE,510
250
250
  keras_hub/src/models/resnet/resnet_image_classifier_preprocessor.py,sha256=fM7gyQ0qB-RRuI4USJkRD6q9-HVfuC71e-BLTo-UhHQ,543
251
251
  keras_hub/src/models/resnet/resnet_image_converter.py,sha256=fgTxihJznGFss-y3Z-jp0JE3X1gaaB2y-f2KMwrT8Pk,342
252
- keras_hub/src/models/resnet/resnet_presets.py,sha256=fqyA7rXB6IwD_x7TMq40RyArzjdDbD4jLxH5OaPjWIs,2947
252
+ keras_hub/src/models/resnet/resnet_presets.py,sha256=FwQuCH9IZM0c7eRnbqxviQcfypbA_lg0-yVvnsGY1Dc,2947
253
253
  keras_hub/src/models/retinanet/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
254
254
  keras_hub/src/models/retinanet/anchor_generator.py,sha256=43NoI7djbRudH98hUm-9fw5OEGQNRXOUYzypIZhLYhE,6750
255
255
  keras_hub/src/models/retinanet/box_matcher.py,sha256=l820r1R-ByqiyVgmZ0YFjjz0njchDda-wItzLn1X84o,10834
@@ -333,7 +333,7 @@ keras_hub/src/samplers/serialization.py,sha256=K6FC4AY1sfOLLIk2k4G783XWnQ_Rk3z1Q
333
333
  keras_hub/src/samplers/top_k_sampler.py,sha256=WSyrhmOCan55X2JYAnNWE88rkx66sXqdoerl87nOrDQ,2250
334
334
  keras_hub/src/samplers/top_p_sampler.py,sha256=9r29WdqBlrW_2TBma6QqkRps2Uit4a6iZPmq1Gsiuko,3400
335
335
  keras_hub/src/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
336
- keras_hub/src/tests/test_case.py,sha256=9-yV87k508ciVOJUBiypd8HmnDTHVtWU2m_RbOHMv5Q,26005
336
+ keras_hub/src/tests/test_case.py,sha256=KMFdQoTqAGotj8Pt8AxXjTJ_f0qwavIGUh-iqN1nQvA,26304
337
337
  keras_hub/src/tokenizers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
338
338
  keras_hub/src/tokenizers/byte_pair_tokenizer.py,sha256=fGFp3WgPNYGTztpSGMl0kKFjn1bCeZB71lSJfT1eqEE,24052
339
339
  keras_hub/src/tokenizers/byte_tokenizer.py,sha256=vjgrTT8FdtZVAlr0mU13alzADcUhtMrzgOs4lYeHvAQ,10648
@@ -355,7 +355,7 @@ keras_hub/src/utils/timm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZ
355
355
  keras_hub/src/utils/timm/convert_densenet.py,sha256=V-GRjWuDnlh3b1EMxqahwZ3GMwSgOa3v0HOfb2ZZ-d0,3342
356
356
  keras_hub/src/utils/timm/convert_resnet.py,sha256=ee8eTml0ffJKE8avzGoLFcpjPF63DsvoIUArAGa8Ngg,5832
357
357
  keras_hub/src/utils/timm/convert_vgg.py,sha256=MT5jGnLrzenPpe66Af_Lp1IdR9KGtsSrcmn6_UPqHvQ,2419
358
- keras_hub/src/utils/timm/preset_loader.py,sha256=2GJI2YeKGVovtDqc930uGta12yiyuCL9YrsTyGhqt9Y,3094
358
+ keras_hub/src/utils/timm/preset_loader.py,sha256=PBqmnEj-fash_-GH-_ulb9YYaHAIESlOsI3wXCwKGRo,3221
359
359
  keras_hub/src/utils/transformers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
360
360
  keras_hub/src/utils/transformers/convert_albert.py,sha256=VdKclZpCxtDWq3UbUUQZf4fR9DJK_JYZ73B4O_G9skg,7695
361
361
  keras_hub/src/utils/transformers/convert_bart.py,sha256=Tk4h9Md9rwN5wjQbGIVrC7qzDpF8kI8qm-FKL8HlUok,14411
@@ -368,7 +368,7 @@ keras_hub/src/utils/transformers/convert_mistral.py,sha256=kVhN9h1ZFVhwkNW8p3wnS
368
368
  keras_hub/src/utils/transformers/convert_pali_gemma.py,sha256=B1leeDw96Yvu81hYumf66hIid07k5NLqoeWAJgPnaLs,10649
369
369
  keras_hub/src/utils/transformers/preset_loader.py,sha256=GS44hZUuGQCtzsyn8z44ZpHdftd3DFemwV2hx2bQa-U,2738
370
370
  keras_hub/src/utils/transformers/safetensor_utils.py,sha256=rPK-Uw1CG0DX0d_UAD-r2cG9fw8GI8bvAlrcXfQ9g4c,3323
371
- keras_hub_nightly-0.16.1.dev202410160341.dist-info/METADATA,sha256=_n3hoMkRkYt2kxNV1cSZdgnbKiWeRCtcq0OjX67QcV8,7458
372
- keras_hub_nightly-0.16.1.dev202410160341.dist-info/WHEEL,sha256=GV9aMThwP_4oNCtvEC2ec3qUYutgWeAzklro_0m4WJQ,91
373
- keras_hub_nightly-0.16.1.dev202410160341.dist-info/top_level.txt,sha256=N4J6piIWBKa38A4uV-CnIopnOEf8mHAbkNXafXm_CuA,10
374
- keras_hub_nightly-0.16.1.dev202410160341.dist-info/RECORD,,
371
+ keras_hub_nightly-0.16.1.dev202410180341.dist-info/METADATA,sha256=I3yyfeBrwqtrZvH5HpEzkgeAs0iwD05aUdSbwxlTKy0,7458
372
+ keras_hub_nightly-0.16.1.dev202410180341.dist-info/WHEEL,sha256=OVMc5UfuAQiSplgO0_WdW7vXVGAt9Hdd6qtN4HotdyA,91
373
+ keras_hub_nightly-0.16.1.dev202410180341.dist-info/top_level.txt,sha256=N4J6piIWBKa38A4uV-CnIopnOEf8mHAbkNXafXm_CuA,10
374
+ keras_hub_nightly-0.16.1.dev202410180341.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (75.1.0)
2
+ Generator: setuptools (75.2.0)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5