keras-hub-nightly 0.16.1.dev202410080341__py3-none-any.whl → 0.16.1.dev202410100339__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (29) hide show
  1. keras_hub/api/layers/__init__.py +3 -0
  2. keras_hub/api/models/__init__.py +11 -0
  3. keras_hub/src/layers/preprocessing/image_converter.py +2 -1
  4. keras_hub/src/models/image_to_image.py +411 -0
  5. keras_hub/src/models/inpaint.py +513 -0
  6. keras_hub/src/models/mix_transformer/__init__.py +12 -0
  7. keras_hub/src/models/mix_transformer/mix_transformer_classifier.py +4 -0
  8. keras_hub/src/models/mix_transformer/mix_transformer_classifier_preprocessor.py +16 -0
  9. keras_hub/src/models/mix_transformer/mix_transformer_image_converter.py +8 -0
  10. keras_hub/src/models/mix_transformer/mix_transformer_layers.py +9 -5
  11. keras_hub/src/models/mix_transformer/mix_transformer_presets.py +151 -0
  12. keras_hub/src/models/preprocessor.py +4 -4
  13. keras_hub/src/models/stable_diffusion_3/mmdit.py +308 -177
  14. keras_hub/src/models/stable_diffusion_3/stable_diffusion_3_backbone.py +87 -55
  15. keras_hub/src/models/stable_diffusion_3/stable_diffusion_3_image_to_image.py +171 -0
  16. keras_hub/src/models/stable_diffusion_3/stable_diffusion_3_inpaint.py +194 -0
  17. keras_hub/src/models/stable_diffusion_3/stable_diffusion_3_presets.py +1 -1
  18. keras_hub/src/models/stable_diffusion_3/stable_diffusion_3_text_to_image.py +13 -8
  19. keras_hub/src/models/task.py +1 -1
  20. keras_hub/src/models/text_to_image.py +89 -36
  21. keras_hub/src/tests/test_case.py +3 -1
  22. keras_hub/src/tokenizers/tokenizer.py +7 -7
  23. keras_hub/src/utils/preset_utils.py +7 -7
  24. keras_hub/src/utils/timm/preset_loader.py +1 -3
  25. keras_hub/src/version_utils.py +1 -1
  26. {keras_hub_nightly-0.16.1.dev202410080341.dist-info → keras_hub_nightly-0.16.1.dev202410100339.dist-info}/METADATA +1 -1
  27. {keras_hub_nightly-0.16.1.dev202410080341.dist-info → keras_hub_nightly-0.16.1.dev202410100339.dist-info}/RECORD +29 -22
  28. {keras_hub_nightly-0.16.1.dev202410080341.dist-info → keras_hub_nightly-0.16.1.dev202410100339.dist-info}/WHEEL +0 -0
  29. {keras_hub_nightly-0.16.1.dev202410080341.dist-info → keras_hub_nightly-0.16.1.dev202410100339.dist-info}/top_level.txt +0 -0
@@ -56,6 +56,11 @@ class TextToImage(Task):
56
56
  # Default compilation.
57
57
  self.compile()
58
58
 
59
+ @property
60
+ def support_negative_prompts(self):
61
+ """Whether the model supports `negative_prompts` key in `generate()`."""
62
+ return bool(True)
63
+
59
64
  @property
60
65
  def latent_shape(self):
61
66
  return tuple(self.backbone.latent_shape)
@@ -171,9 +176,26 @@ class TextToImage(Task):
171
176
  This function converts all inputs to tensors, adds a batch dimension if
172
177
  necessary, and returns a iterable "dataset like" object (either an
173
178
  actual `tf.data.Dataset` or a list with a single batch element).
179
+
180
+ The input format must be one of the following:
181
+ - A single string
182
+ - A list of strings
183
+ - A dict with "prompts" and/or "negative_prompts" keys
184
+ - A tf.data.Dataset with "prompts" and/or "negative_prompts" keys
185
+
186
+ The output will be a dict with "prompts" and/or "negative_prompts" keys.
174
187
  """
175
188
  if tf and isinstance(inputs, tf.data.Dataset):
176
- return inputs.as_numpy_iterator(), False
189
+ _inputs = {
190
+ "prompts": inputs.map(
191
+ lambda x: x["prompts"]
192
+ ).as_numpy_iterator()
193
+ }
194
+ if self.support_negative_prompts:
195
+ _inputs["negative_prompts"] = inputs.map(
196
+ lambda x: x["negative_prompts"]
197
+ ).as_numpy_iterator()
198
+ return _inputs, False
177
199
 
178
200
  def normalize(x):
179
201
  if isinstance(x, str):
@@ -182,13 +204,24 @@ class TextToImage(Task):
182
204
  return x[tf.newaxis], True
183
205
  return x, False
184
206
 
207
+ def get_dummy_prompts(x):
208
+ dummy_prompts = [""] * len(x)
209
+ if tf and isinstance(x, tf.Tensor):
210
+ return tf.convert_to_tensor(dummy_prompts)
211
+ else:
212
+ return dummy_prompts
213
+
185
214
  if isinstance(inputs, dict):
186
215
  for key in inputs:
187
216
  inputs[key], input_is_scalar = normalize(inputs[key])
188
217
  else:
189
218
  inputs, input_is_scalar = normalize(inputs)
219
+ inputs = {"prompts": inputs}
190
220
 
191
- return inputs, input_is_scalar
221
+ if self.support_negative_prompts and "negative_prompts" not in inputs:
222
+ inputs["negative_prompts"] = get_dummy_prompts(inputs["prompts"])
223
+
224
+ return [inputs], input_is_scalar
192
225
 
193
226
  def _normalize_generate_outputs(self, outputs, input_is_scalar):
194
227
  """Normalize user output from the generate function.
@@ -199,12 +232,11 @@ class TextToImage(Task):
199
232
  """
200
233
 
201
234
  def normalize(x):
202
- outputs = ops.clip(ops.divide(ops.add(x, 1.0), 2.0), 0.0, 1.0)
235
+ outputs = ops.concatenate(x, axis=0)
236
+ outputs = ops.clip(ops.divide(ops.add(outputs, 1.0), 2.0), 0.0, 1.0)
203
237
  outputs = ops.cast(ops.round(ops.multiply(outputs, 255.0)), "uint8")
204
- outputs = ops.convert_to_numpy(outputs)
205
- if input_is_scalar:
206
- outputs = outputs[0]
207
- return outputs
238
+ outputs = ops.squeeze(outputs, 0) if input_is_scalar else outputs
239
+ return ops.convert_to_numpy(outputs)
208
240
 
209
241
  if isinstance(outputs[0], dict):
210
242
  normalized = {}
@@ -216,23 +248,40 @@ class TextToImage(Task):
216
248
  def generate(
217
249
  self,
218
250
  inputs,
219
- negative_inputs,
220
251
  num_steps,
221
252
  guidance_scale,
222
253
  seed=None,
223
254
  ):
224
- """Generate image based on the provided `inputs` and `negative_inputs`.
255
+ """Generate image based on the provided `inputs`.
256
+
257
+ Typically, `inputs` contains a text description (known as a prompt) used
258
+ to guide the image generation.
259
+
260
+ Some models support a `negative_prompts` key, which helps steer the
261
+ model away from generating certain styles and elements. To enable this,
262
+ pass `prompts` and `negative_prompts` as a dict:
263
+
264
+ ```python
265
+ text_to_image.generate(
266
+ {
267
+ "prompts": "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k",
268
+ "negative_prompts": "green color",
269
+ }
270
+ )
271
+ ```
225
272
 
226
273
  If `inputs` are a `tf.data.Dataset`, outputs will be generated
227
274
  "batch-by-batch" and concatenated. Otherwise, all inputs will be
228
275
  processed as batches.
229
276
 
230
277
  Args:
231
- inputs: python data, tensor data, or a `tf.data.Dataset`.
232
- negative_inputs: python data, tensor data, or a `tf.data.Dataset`.
233
- Unlike `inputs`, these are used as negative inputs to guide the
234
- generation. If not provided, it defaults to `""` for each input
235
- in `inputs`.
278
+ inputs: python data, tensor data, or a `tf.data.Dataset`. The format
279
+ must be one of the following:
280
+ - A single string
281
+ - A list of strings
282
+ - A dict with "prompts" and/or "negative_prompts" keys
283
+ - A `tf.data.Dataset` with "prompts" and/or "negative_prompts"
284
+ keys
236
285
  num_steps: int. The number of diffusion steps to take.
237
286
  guidance_scale: float. The classifier free guidance scale defined in
238
287
  [Classifier-Free Diffusion Guidance](
@@ -251,32 +300,36 @@ class TextToImage(Task):
251
300
  generate_function = self.make_generate_function()
252
301
 
253
302
  def preprocess(x):
254
- return self.preprocessor.generate_preprocess(x)
303
+ if self.preprocessor is not None:
304
+ return self.preprocessor.generate_preprocess(x)
305
+ else:
306
+ return x
307
+
308
+ def generate(x):
309
+ token_ids = x[0] if self.support_negative_prompts else x
310
+
311
+ # Initialize latents.
312
+ if isinstance(token_ids, dict):
313
+ arbitrary_key = list(token_ids.keys())[0]
314
+ batch_size = ops.shape(token_ids[arbitrary_key])[0]
315
+ else:
316
+ batch_size = ops.shape(token_ids)[0]
317
+ latent_shape = (batch_size,) + self.latent_shape[1:]
318
+ latents = random.normal(latent_shape, dtype="float32", seed=seed)
319
+
320
+ return generate_function(latents, x, num_steps, guidance_scale)
255
321
 
256
322
  # Normalize and preprocess inputs.
257
323
  inputs, input_is_scalar = self._normalize_generate_inputs(inputs)
258
- if negative_inputs is None:
259
- negative_inputs = [""] * len(inputs)
260
- negative_inputs, _ = self._normalize_generate_inputs(negative_inputs)
261
-
262
- if self.preprocessor is not None:
263
- inputs = preprocess(inputs)
264
- negative_inputs = preprocess(negative_inputs)
265
- if isinstance(inputs, dict):
266
- batch_size = len(inputs[list(inputs.keys())[0]])
324
+ if self.support_negative_prompts:
325
+ token_ids = [preprocess(x["prompts"]) for x in inputs]
326
+ negative_token_ids = [
327
+ preprocess(x["negative_prompts"]) for x in inputs
328
+ ]
329
+ inputs = [x for x in zip(token_ids, negative_token_ids)]
267
330
  else:
268
- batch_size = len(inputs)
269
-
270
- # Initialize random latents.
271
- latent_shape = (batch_size,) + self.latent_shape[1:]
272
- latents = random.normal(latent_shape, dtype="float32", seed=seed)
331
+ inputs = [preprocess(x["prompts"]) for x in inputs]
273
332
 
274
333
  # Text-to-image.
275
- outputs = generate_function(
276
- latents,
277
- inputs,
278
- negative_inputs,
279
- num_steps,
280
- guidance_scale,
281
- )
334
+ outputs = [generate(x) for x in inputs]
282
335
  return self._normalize_generate_outputs(outputs, input_is_scalar)
@@ -388,6 +388,8 @@ class TestCase(tf.test.TestCase, parameterized.TestCase):
388
388
  cls,
389
389
  init_kwargs,
390
390
  input_data,
391
+ atol=0.000001,
392
+ rtol=0.000001,
391
393
  ):
392
394
  """Save and load a model from disk and assert output is unchanged."""
393
395
  model = cls(**init_kwargs)
@@ -401,7 +403,7 @@ class TestCase(tf.test.TestCase, parameterized.TestCase):
401
403
 
402
404
  # Check that output matches.
403
405
  restored_output = restored_model(input_data)
404
- self.assertAllClose(model_output, restored_output)
406
+ self.assertAllClose(model_output, restored_output, atol=atol, rtol=rtol)
405
407
 
406
408
  def run_backbone_test(
407
409
  self,
@@ -66,7 +66,7 @@ class Tokenizer(PreprocessingLayer):
66
66
  backbone_cls = None
67
67
 
68
68
  def __init__(self, *args, **kwargs):
69
- self.config_name = kwargs.pop("config_name", TOKENIZER_CONFIG_FILE)
69
+ self.config_file = kwargs.pop("config_file", TOKENIZER_CONFIG_FILE)
70
70
  super().__init__(*args, **kwargs)
71
71
  self.file_assets = None
72
72
 
@@ -178,7 +178,7 @@ class Tokenizer(PreprocessingLayer):
178
178
  config = super().get_config()
179
179
  config.update(
180
180
  {
181
- "config_name": self.config_name,
181
+ "config_file": self.config_file,
182
182
  }
183
183
  )
184
184
  return config
@@ -199,11 +199,11 @@ class Tokenizer(PreprocessingLayer):
199
199
  def load_preset_assets(self, preset):
200
200
  asset_path = None
201
201
  for asset in self.file_assets:
202
- subdir = self.config_name.split(".")[0]
202
+ subdir = self.config_file.split(".")[0]
203
203
  preset_path = os.path.join(ASSET_DIR, subdir, asset)
204
204
  asset_path = get_file(preset, preset_path)
205
- tokenizer_config_name = os.path.dirname(asset_path)
206
- self.load_assets(tokenizer_config_name)
205
+ tokenizer_config_file = os.path.dirname(asset_path)
206
+ self.load_assets(tokenizer_config_file)
207
207
 
208
208
  @classproperty
209
209
  def presets(cls):
@@ -214,7 +214,7 @@ class Tokenizer(PreprocessingLayer):
214
214
  def from_preset(
215
215
  cls,
216
216
  preset,
217
- config_name=TOKENIZER_CONFIG_FILE,
217
+ config_file=TOKENIZER_CONFIG_FILE,
218
218
  **kwargs,
219
219
  ):
220
220
  """Instantiate a `keras_hub.models.Tokenizer` from a model preset.
@@ -260,4 +260,4 @@ class Tokenizer(PreprocessingLayer):
260
260
  backbone_cls = loader.check_backbone_class()
261
261
  if cls.backbone_cls != backbone_cls:
262
262
  cls = find_subclass(preset, cls, backbone_cls)
263
- return loader.load_tokenizer(cls, config_name, **kwargs)
263
+ return loader.load_tokenizer(cls, config_file, **kwargs)
@@ -578,7 +578,7 @@ class PresetLoader:
578
578
  """Load the backbone model from the preset."""
579
579
  raise NotImplementedError
580
580
 
581
- def load_tokenizer(self, cls, config_name=TOKENIZER_CONFIG_FILE, **kwargs):
581
+ def load_tokenizer(self, cls, config_file=TOKENIZER_CONFIG_FILE, **kwargs):
582
582
  """Load a tokenizer layer from the preset."""
583
583
  raise NotImplementedError
584
584
 
@@ -609,7 +609,7 @@ class PresetLoader:
609
609
  return cls(**kwargs)
610
610
 
611
611
  def load_preprocessor(
612
- self, cls, config_name=PREPROCESSOR_CONFIG_FILE, **kwargs
612
+ self, cls, config_file=PREPROCESSOR_CONFIG_FILE, **kwargs
613
613
  ):
614
614
  """Load a prepocessor layer from the preset.
615
615
 
@@ -632,8 +632,8 @@ class KerasPresetLoader(PresetLoader):
632
632
  backbone.load_weights(get_file(self.preset, MODEL_WEIGHTS_FILE))
633
633
  return backbone
634
634
 
635
- def load_tokenizer(self, cls, config_name=TOKENIZER_CONFIG_FILE, **kwargs):
636
- tokenizer_config = load_json(self.preset, config_name)
635
+ def load_tokenizer(self, cls, config_file=TOKENIZER_CONFIG_FILE, **kwargs):
636
+ tokenizer_config = load_json(self.preset, config_file)
637
637
  tokenizer = load_serialized_object(tokenizer_config, **kwargs)
638
638
  if hasattr(tokenizer, "load_preset_assets"):
639
639
  tokenizer.load_preset_assets(self.preset)
@@ -678,13 +678,13 @@ class KerasPresetLoader(PresetLoader):
678
678
  return task
679
679
 
680
680
  def load_preprocessor(
681
- self, cls, config_name=PREPROCESSOR_CONFIG_FILE, **kwargs
681
+ self, cls, config_file=PREPROCESSOR_CONFIG_FILE, **kwargs
682
682
  ):
683
683
  # If there is no `preprocessing.json` or it's for the wrong class,
684
684
  # delegate to the super class loader.
685
- if not check_file_exists(self.preset, config_name):
685
+ if not check_file_exists(self.preset, config_file):
686
686
  return super().load_preprocessor(cls, **kwargs)
687
- preprocessor_json = load_json(self.preset, config_name)
687
+ preprocessor_json = load_json(self.preset, config_file)
688
688
  if not issubclass(check_config_class(preprocessor_json), cls):
689
689
  return super().load_preprocessor(cls, **kwargs)
690
690
  # We found a `preprocessing.json` with a complete config for our class.
@@ -50,11 +50,10 @@ class TimmPresetLoader(PresetLoader):
50
50
 
51
51
  def load_image_converter(self, cls, **kwargs):
52
52
  pretrained_cfg = self.config.get("pretrained_cfg", None)
53
- if not pretrained_cfg or "input_size" not in pretrained_cfg:
53
+ if not pretrained_cfg:
54
54
  return None
55
55
  # This assumes the same basic setup for all timm preprocessing, We may
56
56
  # need to extend this as we cover more model types.
57
- input_size = pretrained_cfg["input_size"]
58
57
  mean = pretrained_cfg["mean"]
59
58
  std = pretrained_cfg["std"]
60
59
  scale = [1.0 / 255.0 / s for s in std]
@@ -63,7 +62,6 @@ class TimmPresetLoader(PresetLoader):
63
62
  if interpolation not in ("bilinear", "nearest", "bicubic"):
64
63
  interpolation = "bilinear" # Unsupported interpolation type.
65
64
  return cls(
66
- image_size=input_size[1:],
67
65
  scale=scale,
68
66
  offset=offset,
69
67
  interpolation=interpolation,
@@ -1,7 +1,7 @@
1
1
  from keras_hub.src.api_export import keras_hub_export
2
2
 
3
3
  # Unique source of truth for the version number.
4
- __version__ = "0.16.1.dev202410080341"
4
+ __version__ = "0.16.1.dev202410100339"
5
5
 
6
6
 
7
7
  @keras_hub_export("keras_hub.version")
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: keras-hub-nightly
3
- Version: 0.16.1.dev202410080341
3
+ Version: 0.16.1.dev202410100339
4
4
  Summary: Industry-strength Natural Language Processing extensions for Keras.
5
5
  Home-page: https://github.com/keras-team/keras-hub
6
6
  Author: Keras team
@@ -1,15 +1,15 @@
1
1
  keras_hub/__init__.py,sha256=QGdXyHgYt6cMUAP1ebxwc6oR86dE0dkMxNy2eOCQtFo,855
2
2
  keras_hub/api/__init__.py,sha256=spMxsgqzjpeuC8rY4WP-2kAZ2qwwKRSbFwddXgUjqQE,524
3
3
  keras_hub/api/bounding_box/__init__.py,sha256=T8R_X7BPm0et1xaZq8565uJmid7dylsSFSj4V-rGuFQ,1097
4
- keras_hub/api/layers/__init__.py,sha256=XImD0tHdnDR1a7q3u-Pw-VRMASi9sDtrV6hr2beVYTw,2331
4
+ keras_hub/api/layers/__init__.py,sha256=jQbYVdHrjC0PnJVa3myfJbAmRef9KGwrTgPDaTsWPJw,2439
5
5
  keras_hub/api/metrics/__init__.py,sha256=So8Ec-lOcTzn_UUMmAdzDm8RKkPu2dbRUm2px8gpUEI,381
6
- keras_hub/api/models/__init__.py,sha256=sMfVpa2N90cG7qjkwSEI_x3uCvZNwQqFbedn5wcUzbE,14311
6
+ keras_hub/api/models/__init__.py,sha256=Z3GLSmrvds7-XDtkcB9gq_sNZ5zJ_3Rwl_xoZ-dTX14,14786
7
7
  keras_hub/api/samplers/__init__.py,sha256=n-_SEXxr2LNUzK2FqVFN7alsrkx1P_HOVTeLZKeGCdE,730
8
8
  keras_hub/api/tokenizers/__init__.py,sha256=_f-r_cyUM2fjBB7iO84ThOdqqsAxHNIewJ2EBDlM0cA,2524
9
9
  keras_hub/api/utils/__init__.py,sha256=Gp1E6gG-RtKQS3PBEQEOz9PQvXkXaJ0ySGMqZ7myN7A,215
10
10
  keras_hub/src/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
11
11
  keras_hub/src/api_export.py,sha256=9pQZK27JObxWZ96QPLBp1OBsjWigh1iuV6RglPGMRk0,1499
12
- keras_hub/src/version_utils.py,sha256=ZcW3wGP8G9ckkrN4UDSpLre640ME6s_nJGCdK-nY_JI,222
12
+ keras_hub/src/version_utils.py,sha256=ta3Ul-3W63rzG1EOq-JStAvR39K8vw23TKBmNmLl2MQ,222
13
13
  keras_hub/src/bounding_box/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
14
14
  keras_hub/src/bounding_box/converters.py,sha256=a5po8DBm87oz2EXfi-0uEZHCMlCJPIb4-MaZIdYx3Dg,17865
15
15
  keras_hub/src/bounding_box/formats.py,sha256=YmskOz2BOSat7NaE__J9VfpSNGPJJR0znSzA4lp8MMI,3868
@@ -34,7 +34,7 @@ keras_hub/src/layers/modeling/transformer_encoder.py,sha256=howjIXH_vgBOKaXaIa7m
34
34
  keras_hub/src/layers/modeling/transformer_layer_utils.py,sha256=FuznrW33iG50B-VDN8R1RjuA5JG72yNMJ1TBgWLxR0E,3487
35
35
  keras_hub/src/layers/preprocessing/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
36
36
  keras_hub/src/layers/preprocessing/audio_converter.py,sha256=YGh_kQw65a1Z6S5zzSNVP-ChyLYHq3-eOYpOS53xIN8,4156
37
- keras_hub/src/layers/preprocessing/image_converter.py,sha256=zlg6VKQWjKDCojJnI9VfK4Rt88QE29XjpDewZQNT8IE,10166
37
+ keras_hub/src/layers/preprocessing/image_converter.py,sha256=j8SdL-pFOrWIGIV_HwlABUPhdcSOZXYhPRlvFCukAU8,10180
38
38
  keras_hub/src/layers/preprocessing/masked_lm_mask_generator.py,sha256=itxWq3FHYlR0I7jKarQlSKbSmRLl9ut_UTSP3ZDwP0A,8162
39
39
  keras_hub/src/layers/preprocessing/multi_segment_packer.py,sha256=ZNqnUFnc9Af122Q7T6YyUoXgIdU9AgIJfsvR1UrCjFU,12068
40
40
  keras_hub/src/layers/preprocessing/preprocessing_layer.py,sha256=WyX41b9Ev_YJ5uVQVOAqD0PQasMOPDoyDjl_PkzkAkE,687
@@ -57,15 +57,17 @@ keras_hub/src/models/image_classifier.py,sha256=yt6cjhPfqs8A_eWXBsXdXFzn-aRgH2rV
57
57
  keras_hub/src/models/image_classifier_preprocessor.py,sha256=YdewYfMPVHI7gdhbBI-zVcy4NSfg0bhiOHTmGEKoOYI,2668
58
58
  keras_hub/src/models/image_segmenter.py,sha256=C1bzIO59pG58iist5GLn_qnlotDpcAVxPV_8a68BkAc,2876
59
59
  keras_hub/src/models/image_segmenter_preprocessor.py,sha256=IMmVJWBc0VZ1-5jLmFmmwQ3q_oQnhIfCE9A6nS1ss8Q,3743
60
+ keras_hub/src/models/image_to_image.py,sha256=z2TfFh9DiaEj9u6hEY8May3B0etxhptttg6Bx6bbopM,16452
61
+ keras_hub/src/models/inpaint.py,sha256=8TTusRRS7ntPoAd0BsuhEZjedtaoljI4ZbgKQ_bnF34,20411
60
62
  keras_hub/src/models/masked_lm.py,sha256=uXO_dE_hILlOC9jNr6oK6IHi9IGUqLyNGvr6nMt8Rk0,3576
61
63
  keras_hub/src/models/masked_lm_preprocessor.py,sha256=g8vrnyYwqdnSw5xppROM1Gzo_jmMWKYZoQCsKdfrFKk,5656
62
- keras_hub/src/models/preprocessor.py,sha256=pJodz7KRVncvsC3o4qoKDYWP2J0a8E9CD6oVGYgJzIM,7970
64
+ keras_hub/src/models/preprocessor.py,sha256=3CWLsMpQC77w7GzM3fU3Jf-G62ldJjufKyzPVvnGdeI,7970
63
65
  keras_hub/src/models/seq_2_seq_lm.py,sha256=w0gX-5YZjatfvAJmFAgSHyqS_BLqc8FF8DPLGK8mrgI,1864
64
66
  keras_hub/src/models/seq_2_seq_lm_preprocessor.py,sha256=HUHRbWRG5SF1pPpotGzBhXlrMh4pLFxgAoFk05FIrB4,9687
65
- keras_hub/src/models/task.py,sha256=2iapEFHvzyl0ASlH6yzQA2OHSr1jV1V-pLtagHdBncQ,14416
67
+ keras_hub/src/models/task.py,sha256=VN-CClNw3EB5Byb7HyyI3CqaS140od7-dmQInmYFSKg,14414
66
68
  keras_hub/src/models/text_classifier.py,sha256=VBDvQUHTpJPqKp7A4VAtm35FOmJ3yMo0DW6GdX67xG0,4159
67
69
  keras_hub/src/models/text_classifier_preprocessor.py,sha256=EoWp-GHnaLnAKTdAzDmC-soAV92ATF3QozdubdV2WXI,4722
68
- keras_hub/src/models/text_to_image.py,sha256=N42l1W8YEUBHOdGiT4BQNqzTpgjB2O5dtLU5FbKpMy0,10792
70
+ keras_hub/src/models/text_to_image.py,sha256=7s6rB1To46A7l9ItqRw3Pe4DGRm7YnqbHJ-RyNAlLPE,12973
69
71
  keras_hub/src/models/albert/__init__.py,sha256=rR6q_-8FujB1FXp6r4KOI7xi4gFjtAhQwXjp-MIhiyg,257
70
72
  keras_hub/src/models/albert/albert_backbone.py,sha256=4NQFo8lhv8rFiNIwQeZxxKxFwT3nKcCt36FUa6oPGok,10073
71
73
  keras_hub/src/models/albert/albert_masked_lm.py,sha256=jG6FttE_MAyBe8GzOEXMjEem3wo6UFGvxM3lRmXuS70,4126
@@ -207,10 +209,13 @@ keras_hub/src/models/mistral/mistral_layer_norm.py,sha256=nimMZ5CTPK8v9eflfrGuzq
207
209
  keras_hub/src/models/mistral/mistral_presets.py,sha256=gucgdaFAiU-vRDS1g9zWGHjbDF_jaCiljPibCF4yVqY,1329
208
210
  keras_hub/src/models/mistral/mistral_tokenizer.py,sha256=wyzR_Y2XwrDiBV3jIeBChSPiaOkVVaxFuLxMH2F6EYA,2005
209
211
  keras_hub/src/models/mistral/mistral_transformer_decoder.py,sha256=RDIIB3FhneHZP11tNUFQT9DcWawCMnrtVxtSvtnP3ts,9542
210
- keras_hub/src/models/mix_transformer/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
212
+ keras_hub/src/models/mix_transformer/__init__.py,sha256=neU-h7C0sXS6OmtS5NFJeJ1lF13OW3DaUlT6LXhl6vA,409
211
213
  keras_hub/src/models/mix_transformer/mix_transformer_backbone.py,sha256=B4hdhWHZ93lS937BGSSxovDKVXQZVuWrMbFwECFoWrg,6048
212
- keras_hub/src/models/mix_transformer/mix_transformer_classifier.py,sha256=uXO2-GzI_25TdlXe8O8qvnM7tryadfetVDW3yJLGfiI,348
213
- keras_hub/src/models/mix_transformer/mix_transformer_layers.py,sha256=SzyJJhuyESlsCgndmZNYuuF0Ogb1FKoYkSfDJnThgT0,9538
214
+ keras_hub/src/models/mix_transformer/mix_transformer_classifier.py,sha256=pVfbbTNuiZRFElCGLyNO3gknNGnut-6-L-zAVB4Nb5w,531
215
+ keras_hub/src/models/mix_transformer/mix_transformer_classifier_preprocessor.py,sha256=lSUuMAJiyWDVH0AVjG2y684bU3msxI3_UTa_xWyLLKQ,570
216
+ keras_hub/src/models/mix_transformer/mix_transformer_image_converter.py,sha256=WG2LjuagCxSYXkFgqd4bHyUoMLFCzTj9QjJBoptW6WM,323
217
+ keras_hub/src/models/mix_transformer/mix_transformer_layers.py,sha256=9AbA4kCJkjeV7fAwbRns8VGn0l1pgQ3CqFPjY-99VGA,9695
218
+ keras_hub/src/models/mix_transformer/mix_transformer_presets.py,sha256=rWrjAAwc9Kmo0c66CNh5cuIpySzqqLKj_VI6hlI9d44,5116
214
219
  keras_hub/src/models/mobilenet/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
215
220
  keras_hub/src/models/mobilenet/mobilenet_backbone.py,sha256=nlXdMqcj18iahy60aew4ON79EHUEuNIgvKY9dToH284,18191
216
221
  keras_hub/src/models/mobilenet/mobilenet_image_classifier.py,sha256=l5jo99I0fLlbwLub5jHw07CjC-NnmuV-ySJwXGI20Ek,351
@@ -271,10 +276,12 @@ keras_hub/src/models/sam/sam_prompt_encoder.py,sha256=2foB7900QbzQfZjBo335XYsdjm
271
276
  keras_hub/src/models/sam/sam_transformer.py,sha256=L2bdxdc2RUF1juRZ0F0Z6r0gTva1sUwEdjItJmKKf6w,5730
272
277
  keras_hub/src/models/stable_diffusion_3/__init__.py,sha256=ZKYQuaRObyhKq8GVAHmoRvlXp6FpU8ChvutVCHyXKuc,343
273
278
  keras_hub/src/models/stable_diffusion_3/flow_match_euler_discrete_scheduler.py,sha256=vtVhieAv277mAiZj7Kvvqg_Ba7klfQxZVk4PPxNNQ0s,3062
274
- keras_hub/src/models/stable_diffusion_3/mmdit.py,sha256=ntmxjDJtZbHDGVPPAnasVZyoOTp5bbMPhxM30SYmpoQ,25711
275
- keras_hub/src/models/stable_diffusion_3/stable_diffusion_3_backbone.py,sha256=D-U5T6UYKzraHLAgMa-LLcd40ZmX_5rmlybawT4ooHY,21398
276
- keras_hub/src/models/stable_diffusion_3/stable_diffusion_3_presets.py,sha256=sBGVRFd-bYxcqydydOB70XpOtpTt6AVrTR3LV-LBFXY,662
277
- keras_hub/src/models/stable_diffusion_3/stable_diffusion_3_text_to_image.py,sha256=8nP3ejDOd1hqjYXJzbri62PgtclxGydw-8bw-qHIPdc,4414
279
+ keras_hub/src/models/stable_diffusion_3/mmdit.py,sha256=ByFot4_I1Z6woOBYvPcbkUtYXWn-dPwgg-4het5vrH4,30615
280
+ keras_hub/src/models/stable_diffusion_3/stable_diffusion_3_backbone.py,sha256=QuggvAy1yvtIXFcwyXOmE_aUdhLcCEUw4FnTuqekys0,22497
281
+ keras_hub/src/models/stable_diffusion_3/stable_diffusion_3_image_to_image.py,sha256=6_IXkxAv588lAKEasJrXgCjQePSXs-54XrvVIlYOT60,5483
282
+ keras_hub/src/models/stable_diffusion_3/stable_diffusion_3_inpaint.py,sha256=tKVAQVbKOt3lWkWsQLKN9KK3WYem0-u5fonq2uBAPrc,6367
283
+ keras_hub/src/models/stable_diffusion_3/stable_diffusion_3_presets.py,sha256=QSDCse8IhEwAWz_lHjAr2N8ygLaJ4ls3uNwvwjJTp5w,662
284
+ keras_hub/src/models/stable_diffusion_3/stable_diffusion_3_text_to_image.py,sha256=pQOC7xMJfJHZxZRiYFtjrbjx0GXb94cNyOr9NELoXo8,4488
278
285
  keras_hub/src/models/stable_diffusion_3/stable_diffusion_3_text_to_image_preprocessor.py,sha256=TB0KESt5dnFYiS292PbzB0LdiH23AD6aTSTGmQEuzGM,2742
279
286
  keras_hub/src/models/stable_diffusion_3/t5_encoder.py,sha256=oV7P1uwCKdGiD93zXq7kmqX0elMZQU4UvBa8wg6P1hs,5113
280
287
  keras_hub/src/models/t5/__init__.py,sha256=OWyoUeDY3v4DnO8Ry02DWV1bNSVGcC89PF9oCftyi1s,233
@@ -325,20 +332,20 @@ keras_hub/src/samplers/serialization.py,sha256=K6FC4AY1sfOLLIk2k4G783XWnQ_Rk3z1Q
325
332
  keras_hub/src/samplers/top_k_sampler.py,sha256=WSyrhmOCan55X2JYAnNWE88rkx66sXqdoerl87nOrDQ,2250
326
333
  keras_hub/src/samplers/top_p_sampler.py,sha256=9r29WdqBlrW_2TBma6QqkRps2Uit4a6iZPmq1Gsiuko,3400
327
334
  keras_hub/src/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
328
- keras_hub/src/tests/test_case.py,sha256=pgjT5CkkkX4BTNfaDD6i-YChO6Ig3But66Ls4RxEymw,25937
335
+ keras_hub/src/tests/test_case.py,sha256=9-yV87k508ciVOJUBiypd8HmnDTHVtWU2m_RbOHMv5Q,26005
329
336
  keras_hub/src/tokenizers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
330
337
  keras_hub/src/tokenizers/byte_pair_tokenizer.py,sha256=fGFp3WgPNYGTztpSGMl0kKFjn1bCeZB71lSJfT1eqEE,24052
331
338
  keras_hub/src/tokenizers/byte_tokenizer.py,sha256=vjgrTT8FdtZVAlr0mU13alzADcUhtMrzgOs4lYeHvAQ,10648
332
339
  keras_hub/src/tokenizers/sentence_piece_tokenizer.py,sha256=_PaVn4re3AwBkHylJWsvdvOCCYjOnFXLZmj-V34KehU,9562
333
340
  keras_hub/src/tokenizers/sentence_piece_tokenizer_trainer.py,sha256=8X_IN-hPDiUETGrSX3wPzFnip73xTYcN6FhLNIwfy-Y,4834
334
- keras_hub/src/tokenizers/tokenizer.py,sha256=xiT8efGyNmTgsbi6JoJzKUoGg3rWbHjykhfW5mnDbbw,9722
341
+ keras_hub/src/tokenizers/tokenizer.py,sha256=v0Ka5ayrBwpsGBlkIadXK-b4RsMTbhV6BZrvKullbxY,9722
335
342
  keras_hub/src/tokenizers/unicode_codepoint_tokenizer.py,sha256=efyjXjUyUCsnh97_kPFq1z2QZENiZSdV0voZytLBffg,13531
336
343
  keras_hub/src/tokenizers/word_piece_tokenizer.py,sha256=vP6AZgbzsRiuPCt3W_n94nsF7XiERnagWcH_rqJHtVU,19943
337
344
  keras_hub/src/tokenizers/word_piece_tokenizer_trainer.py,sha256=xUhc9EMswarzghNfrDLUFYQBExZOQxbMlfKp9G6A63k,6549
338
345
  keras_hub/src/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
339
346
  keras_hub/src/utils/keras_utils.py,sha256=lrZuC8HL2lmQfbHaS_t1JUyJann_ji2iTYE0Fzos8PU,1969
340
347
  keras_hub/src/utils/pipeline_model.py,sha256=33-0vIB9KGYh2mRtyjHxBPvgGZHDusRcRy-xjki3_gg,9024
341
- keras_hub/src/utils/preset_utils.py,sha256=O7SbhcJJAoPeMhAF77ppG6XkIAIqBqAQVhKoE-Yt61c,30119
348
+ keras_hub/src/utils/preset_utils.py,sha256=w45mluy4bhPPWB68waWpXFQ4MAKvSXS6llVw4rAE70s,30119
342
349
  keras_hub/src/utils/python_utils.py,sha256=N8nWeO3san4YnGkffRXG3Ix7VEIMTKSN21FX5TuL7G8,202
343
350
  keras_hub/src/utils/tensor_utils.py,sha256=JipeJUDnnvLuT-ToVQC0t9dmSzebwPG6XiZgEwGEGI4,14646
344
351
  keras_hub/src/utils/imagenet/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -346,7 +353,7 @@ keras_hub/src/utils/imagenet/imagenet_utils.py,sha256=MvIvv1WJo51ZXBxy4S7t_DsN3Z
346
353
  keras_hub/src/utils/timm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
347
354
  keras_hub/src/utils/timm/convert_densenet.py,sha256=V-GRjWuDnlh3b1EMxqahwZ3GMwSgOa3v0HOfb2ZZ-d0,3342
348
355
  keras_hub/src/utils/timm/convert_resnet.py,sha256=ee8eTml0ffJKE8avzGoLFcpjPF63DsvoIUArAGa8Ngg,5832
349
- keras_hub/src/utils/timm/preset_loader.py,sha256=SbDqy2nr54_Y7bwe4sICQ8n-kHnw0PtvNI52tgrH170,3095
356
+ keras_hub/src/utils/timm/preset_loader.py,sha256=CW-yNXvp3IExK3xuHKjYqbLdBVUST2kSsLmWxSs0i5c,2968
350
357
  keras_hub/src/utils/transformers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
351
358
  keras_hub/src/utils/transformers/convert_albert.py,sha256=VdKclZpCxtDWq3UbUUQZf4fR9DJK_JYZ73B4O_G9skg,7695
352
359
  keras_hub/src/utils/transformers/convert_bart.py,sha256=Tk4h9Md9rwN5wjQbGIVrC7qzDpF8kI8qm-FKL8HlUok,14411
@@ -359,7 +366,7 @@ keras_hub/src/utils/transformers/convert_mistral.py,sha256=kVhN9h1ZFVhwkNW8p3wnS
359
366
  keras_hub/src/utils/transformers/convert_pali_gemma.py,sha256=B1leeDw96Yvu81hYumf66hIid07k5NLqoeWAJgPnaLs,10649
360
367
  keras_hub/src/utils/transformers/preset_loader.py,sha256=GS44hZUuGQCtzsyn8z44ZpHdftd3DFemwV2hx2bQa-U,2738
361
368
  keras_hub/src/utils/transformers/safetensor_utils.py,sha256=rPK-Uw1CG0DX0d_UAD-r2cG9fw8GI8bvAlrcXfQ9g4c,3323
362
- keras_hub_nightly-0.16.1.dev202410080341.dist-info/METADATA,sha256=SrlKiCjbDmXdTPsxSP6_NNTb-RKCwlNldhrxmphg_5Y,7458
363
- keras_hub_nightly-0.16.1.dev202410080341.dist-info/WHEEL,sha256=GV9aMThwP_4oNCtvEC2ec3qUYutgWeAzklro_0m4WJQ,91
364
- keras_hub_nightly-0.16.1.dev202410080341.dist-info/top_level.txt,sha256=N4J6piIWBKa38A4uV-CnIopnOEf8mHAbkNXafXm_CuA,10
365
- keras_hub_nightly-0.16.1.dev202410080341.dist-info/RECORD,,
369
+ keras_hub_nightly-0.16.1.dev202410100339.dist-info/METADATA,sha256=Q1QCPvLIZpKSgKKhzvHH4Qng8vWrQihbFRVWk85Pjqs,7458
370
+ keras_hub_nightly-0.16.1.dev202410100339.dist-info/WHEEL,sha256=GV9aMThwP_4oNCtvEC2ec3qUYutgWeAzklro_0m4WJQ,91
371
+ keras_hub_nightly-0.16.1.dev202410100339.dist-info/top_level.txt,sha256=N4J6piIWBKa38A4uV-CnIopnOEf8mHAbkNXafXm_CuA,10
372
+ keras_hub_nightly-0.16.1.dev202410100339.dist-info/RECORD,,