keras-hub-nightly 0.16.1.dev202409240339__py3-none-any.whl → 0.16.1.dev202409260340__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (44) hide show
  1. keras_hub/api/layers/__init__.py +5 -0
  2. keras_hub/api/models/__init__.py +19 -0
  3. keras_hub/api/tokenizers/__init__.py +1 -0
  4. keras_hub/src/models/{stable_diffusion_v3 → clip}/clip_encoder_block.py +8 -2
  5. keras_hub/src/models/clip/clip_preprocessor.py +147 -0
  6. keras_hub/src/models/{stable_diffusion_v3 → clip}/clip_text_encoder.py +60 -57
  7. keras_hub/src/models/{stable_diffusion_v3 → clip}/clip_tokenizer.py +69 -30
  8. keras_hub/src/models/densenet/__init__.py +6 -0
  9. keras_hub/src/models/densenet/densenet_backbone.py +11 -8
  10. keras_hub/src/models/densenet/densenet_image_classifier.py +27 -4
  11. keras_hub/src/models/densenet/densenet_image_classifier_preprocessor.py +27 -0
  12. keras_hub/src/models/densenet/densenet_image_converter.py +23 -0
  13. keras_hub/src/models/densenet/densenet_presets.py +56 -0
  14. keras_hub/src/models/image_segmenter.py +86 -0
  15. keras_hub/src/models/sam/__init__.py +13 -0
  16. keras_hub/src/models/sam/sam_backbone.py +153 -0
  17. keras_hub/src/models/sam/sam_image_segmenter.py +237 -0
  18. keras_hub/src/models/sam/sam_layers.py +402 -0
  19. keras_hub/src/models/sam/sam_mask_decoder.py +270 -0
  20. keras_hub/src/models/sam/sam_prompt_encoder.py +336 -0
  21. keras_hub/src/models/sam/sam_transformer.py +159 -0
  22. keras_hub/src/models/stable_diffusion_3/__init__.py +13 -0
  23. keras_hub/src/models/stable_diffusion_3/flow_match_euler_discrete_scheduler.py +93 -0
  24. keras_hub/src/models/{stable_diffusion_v3 → stable_diffusion_3}/mmdit.py +351 -26
  25. keras_hub/src/models/stable_diffusion_3/stable_diffusion_3_backbone.py +630 -0
  26. keras_hub/src/models/stable_diffusion_3/stable_diffusion_3_text_to_image.py +151 -0
  27. keras_hub/src/models/stable_diffusion_3/stable_diffusion_3_text_to_image_preprocessor.py +77 -0
  28. keras_hub/src/models/{stable_diffusion_v3/t5_xxl_text_encoder.py → stable_diffusion_3/t5_encoder.py} +7 -7
  29. keras_hub/src/models/stable_diffusion_3/vae_image_decoder.py +333 -0
  30. keras_hub/src/models/{stable_diffusion_v3/t5_xxl_preprocessor.py → t5/t5_preprocessor.py} +12 -3
  31. keras_hub/src/models/text_to_image.py +295 -0
  32. keras_hub/src/models/vit_det/vit_det_backbone.py +17 -12
  33. keras_hub/src/utils/timm/convert_densenet.py +107 -0
  34. keras_hub/src/utils/timm/preset_loader.py +3 -0
  35. keras_hub/src/version_utils.py +1 -1
  36. {keras_hub_nightly-0.16.1.dev202409240339.dist-info → keras_hub_nightly-0.16.1.dev202409260340.dist-info}/METADATA +1 -1
  37. {keras_hub_nightly-0.16.1.dev202409240339.dist-info → keras_hub_nightly-0.16.1.dev202409260340.dist-info}/RECORD +40 -24
  38. keras_hub/src/models/stable_diffusion_v3/clip_preprocessor.py +0 -93
  39. keras_hub/src/models/stable_diffusion_v3/mmdit_block.py +0 -317
  40. keras_hub/src/models/stable_diffusion_v3/vae_attention.py +0 -126
  41. keras_hub/src/models/stable_diffusion_v3/vae_image_decoder.py +0 -186
  42. /keras_hub/src/models/{stable_diffusion_v3 → clip}/__init__.py +0 -0
  43. {keras_hub_nightly-0.16.1.dev202409240339.dist-info → keras_hub_nightly-0.16.1.dev202409260340.dist-info}/WHEEL +0 -0
  44. {keras_hub_nightly-0.16.1.dev202409240339.dist-info → keras_hub_nightly-0.16.1.dev202409260340.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,295 @@
1
+ # Copyright 2024 The KerasHub Authors
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # https://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import itertools
15
+ from functools import partial
16
+
17
+ import keras
18
+ from keras import ops
19
+ from keras import random
20
+
21
+ from keras_hub.src.api_export import keras_hub_export
22
+ from keras_hub.src.models.task import Task
23
+
24
+ try:
25
+ import tensorflow as tf
26
+ except ImportError:
27
+ tf = None
28
+
29
+
30
+ @keras_hub_export("keras_hub.models.TextToImage")
31
+ class TextToImage(Task):
32
+ """Base class for text-to-image tasks.
33
+
34
+ `TextToImage` tasks wrap a `keras_hub.models.Backbone` and
35
+ a `keras_hub.models.Preprocessor` to create a model that can be used for
36
+ generation and generative fine-tuning.
37
+
38
+ `TextToImage` tasks provide an additional, high-level `generate()` function
39
+ which can be used to generate image by token with a string in, image out
40
+ signature.
41
+
42
+ All `TextToImage` tasks include a `from_preset()` constructor which can be
43
+ used to load a pre-trained config and weights.
44
+
45
+ Example:
46
+
47
+ ```python
48
+ # Load a Stable Diffusion 3 backbone with pre-trained weights.
49
+ text_to_image = keras_hub.models.TextToImage.from_preset(
50
+ "stable_diffusion_3_medium",
51
+ )
52
+ text_to_image.generate(
53
+ "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k"
54
+ )
55
+
56
+ # Load a Stable Diffusion 3 backbone at bfloat16 precision.
57
+ text_to_image = keras_hub.models.TextToImage.from_preset(
58
+ "stable_diffusion_3_medium",
59
+ dtype="bfloat16",
60
+ )
61
+ text_to_image.generate(
62
+ "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k"
63
+ )
64
+ ```
65
+ """
66
+
67
+ def __init__(self, *args, **kwargs):
68
+ super().__init__(*args, **kwargs)
69
+ # Default compilation.
70
+ self.compile()
71
+
72
+ @property
73
+ def latent_shape(self):
74
+ return tuple(self.backbone.latent_shape)
75
+
76
+ def compile(
77
+ self,
78
+ optimizer="auto",
79
+ loss="auto",
80
+ *,
81
+ metrics="auto",
82
+ **kwargs,
83
+ ):
84
+ """Configures the `TextToImage` task for training.
85
+
86
+ The `TextToImage` task extends the default compilation signature of
87
+ `keras.Model.compile` with defaults for `optimizer`, `loss`, and
88
+ `metrics`. To override these defaults, pass any value
89
+ to these arguments during compilation.
90
+
91
+ Args:
92
+ optimizer: `"auto"`, an optimizer name, or a `keras.Optimizer`
93
+ instance. Defaults to `"auto"`, which uses the default optimizer
94
+ for the given model and task. See `keras.Model.compile` and
95
+ `keras.optimizers` for more info on possible `optimizer` values.
96
+ loss: `"auto"`, a loss name, or a `keras.losses.Loss` instance.
97
+ Defaults to `"auto"`, where a
98
+ `keras.losses.MeanSquaredError` loss will be applied. See
99
+ `keras.Model.compile` and `keras.losses` for more info on
100
+ possible `loss` values.
101
+ metrics: `"auto"`, or a list of metrics to be evaluated by
102
+ the model during training and testing. Defaults to `"auto"`,
103
+ where a `keras.metrics.MeanSquaredError` will be applied to
104
+ track the loss of the model during training. See
105
+ `keras.Model.compile` and `keras.metrics` for more info on
106
+ possible `metrics` values.
107
+ **kwargs: See `keras.Model.compile` for a full list of arguments
108
+ supported by the compile method.
109
+ """
110
+ # Ref: https://github.com/huggingface/diffusers/blob/main/examples/text_to_image/train_text_to_image.py#L410-L414
111
+ if optimizer == "auto":
112
+ optimizer = keras.optimizers.AdamW(
113
+ 1e-4, weight_decay=1e-2, epsilon=1e-8, clipnorm=1.0
114
+ )
115
+ if loss == "auto":
116
+ loss = keras.losses.MeanSquaredError()
117
+ if metrics == "auto":
118
+ metrics = [keras.metrics.MeanSquaredError()]
119
+ super().compile(
120
+ optimizer=optimizer,
121
+ loss=loss,
122
+ metrics=metrics,
123
+ **kwargs,
124
+ )
125
+ self.generate_function = None
126
+
127
+ def generate_step(self, *args, **kwargs):
128
+ """Run generation on batches of input."""
129
+ raise NotImplementedError
130
+
131
+ def make_generate_function(self):
132
+ """Create or return the compiled generation function."""
133
+ if self.generate_function is not None:
134
+ return self.generate_function
135
+
136
+ self.generate_function = self.generate_step
137
+ if keras.config.backend() == "torch":
138
+ import torch
139
+
140
+ def wrapped_function(*args, **kwargs):
141
+ with torch.no_grad():
142
+ return self.generate_step(*args, **kwargs)
143
+
144
+ self.generate_function = wrapped_function
145
+ elif keras.config.backend() == "tensorflow" and not self.run_eagerly:
146
+ self.generate_function = tf.function(
147
+ self.generate_step, jit_compile=self.jit_compile
148
+ )
149
+ elif keras.config.backend() == "jax" and not self.run_eagerly:
150
+ import jax
151
+
152
+ @partial(jax.jit)
153
+ def compiled_function(state, *args, **kwargs):
154
+ (
155
+ trainable_variables,
156
+ non_trainable_variables,
157
+ ) = state
158
+ mapping = itertools.chain(
159
+ zip(self.trainable_variables, trainable_variables),
160
+ zip(self.non_trainable_variables, non_trainable_variables),
161
+ )
162
+
163
+ with keras.StatelessScope(state_mapping=mapping):
164
+ outputs = self.generate_step(*args, **kwargs)
165
+ return outputs
166
+
167
+ def wrapped_function(*args, **kwargs):
168
+ # Create an explicit tuple of all variable state.
169
+ state = (
170
+ # Use the explicit variable.value to preserve the
171
+ # sharding spec of distribution.
172
+ [v.value for v in self.trainable_variables],
173
+ [v.value for v in self.non_trainable_variables],
174
+ )
175
+ outputs = compiled_function(state, *args, **kwargs)
176
+ return outputs
177
+
178
+ self.generate_function = wrapped_function
179
+ return self.generate_function
180
+
181
+ def _normalize_generate_inputs(self, inputs):
182
+ """Normalize user input to the generate function.
183
+
184
+ This function converts all inputs to tensors, adds a batch dimension if
185
+ necessary, and returns a iterable "dataset like" object (either an
186
+ actual `tf.data.Dataset` or a list with a single batch element).
187
+ """
188
+ if tf and isinstance(inputs, tf.data.Dataset):
189
+ return inputs.as_numpy_iterator(), False
190
+
191
+ def normalize(x):
192
+ if isinstance(x, str):
193
+ return [x], True
194
+ if tf and isinstance(x, tf.Tensor) and x.shape.rank == 0:
195
+ return x[tf.newaxis], True
196
+ return x, False
197
+
198
+ if isinstance(inputs, dict):
199
+ for key in inputs:
200
+ inputs[key], input_is_scalar = normalize(inputs[key])
201
+ else:
202
+ inputs, input_is_scalar = normalize(inputs)
203
+
204
+ return inputs, input_is_scalar
205
+
206
+ def _normalize_generate_outputs(self, outputs, input_is_scalar):
207
+ """Normalize user output from the generate function.
208
+
209
+ This function converts all output to numpy with a value range of
210
+ `[0, 255]`. If a batch dimension was added to the input, it is removed
211
+ from the output.
212
+ """
213
+
214
+ def normalize(x):
215
+ outputs = ops.clip(ops.divide(ops.add(x, 1.0), 2.0), 0.0, 1.0)
216
+ outputs = ops.cast(ops.round(ops.multiply(outputs, 255.0)), "uint8")
217
+ outputs = ops.convert_to_numpy(outputs)
218
+ if input_is_scalar:
219
+ outputs = outputs[0]
220
+ return outputs
221
+
222
+ if isinstance(outputs[0], dict):
223
+ normalized = {}
224
+ for key in outputs[0]:
225
+ normalized[key] = normalize([x[key] for x in outputs])
226
+ return normalized
227
+ return normalize([x for x in outputs])
228
+
229
+ def generate(
230
+ self,
231
+ inputs,
232
+ negative_inputs,
233
+ num_steps,
234
+ guidance_scale,
235
+ seed=None,
236
+ ):
237
+ """Generate image based on the provided `inputs` and `negative_inputs`.
238
+
239
+ If `inputs` are a `tf.data.Dataset`, outputs will be generated
240
+ "batch-by-batch" and concatenated. Otherwise, all inputs will be
241
+ processed as batches.
242
+
243
+ Args:
244
+ inputs: python data, tensor data, or a `tf.data.Dataset`.
245
+ negative_inputs: python data, tensor data, or a `tf.data.Dataset`.
246
+ Unlike `inputs`, these are used as negative inputs to guide the
247
+ generation. If not provided, it defaults to `""` for each input
248
+ in `inputs`.
249
+ num_steps: int. The number of diffusion steps to take.
250
+ guidance_scale: float. The classifier free guidance scale defined in
251
+ [Classifier-Free Diffusion Guidance](
252
+ https://arxiv.org/abs/2207.12598). A higher scale encourages
253
+ generating images more closely related to the prompts, typically
254
+ at the cost of lower image quality.
255
+ seed: optional int. Used as a random seed.
256
+ """
257
+ num_steps = ops.convert_to_tensor(num_steps, "int32")
258
+ guidance_scale = ops.convert_to_tensor(guidance_scale)
259
+
260
+ # Setup our three main passes.
261
+ # 1. Preprocessing strings to dense integer tensors.
262
+ # 2. Generate outputs via a compiled function on dense tensors.
263
+ # 3. Postprocess dense tensors to a value range of `[0, 255]`.
264
+ generate_function = self.make_generate_function()
265
+
266
+ def preprocess(x):
267
+ return self.preprocessor.generate_preprocess(x)
268
+
269
+ # Normalize and preprocess inputs.
270
+ inputs, input_is_scalar = self._normalize_generate_inputs(inputs)
271
+ if negative_inputs is None:
272
+ negative_inputs = [""] * len(inputs)
273
+ negative_inputs, _ = self._normalize_generate_inputs(negative_inputs)
274
+
275
+ if self.preprocessor is not None:
276
+ inputs = preprocess(inputs)
277
+ negative_inputs = preprocess(negative_inputs)
278
+ if isinstance(inputs, dict):
279
+ batch_size = len(inputs[list(inputs.keys())[0]])
280
+ else:
281
+ batch_size = len(inputs)
282
+
283
+ # Initialize random latents.
284
+ latent_shape = (batch_size,) + self.latent_shape[1:]
285
+ latents = random.normal(latent_shape, dtype="float32", seed=seed)
286
+
287
+ # Text-to-image.
288
+ outputs = generate_function(
289
+ latents,
290
+ inputs,
291
+ negative_inputs,
292
+ num_steps,
293
+ guidance_scale,
294
+ )
295
+ return self._normalize_generate_outputs(outputs, input_is_scalar)
@@ -104,7 +104,7 @@ class ViTDetBackbone(Backbone):
104
104
  **kwargs
105
105
  ):
106
106
  # === Functional model ===
107
- img_input = keras.layers.Input(shape=image_shape)
107
+ img_input = keras.layers.Input(shape=image_shape, name="images")
108
108
  # Check that the input image is well specified.
109
109
  if img_input.shape[-3] is None or img_input.shape[-2] is None:
110
110
  raise ValueError(
@@ -144,17 +144,22 @@ class ViTDetBackbone(Backbone):
144
144
  ),
145
145
  input_size=(img_size // patch_size, img_size // patch_size),
146
146
  )(x)
147
- x = keras.layers.Conv2D(
148
- filters=num_output_channels, kernel_size=1, use_bias=False
149
- )(x)
150
- x = keras.layers.LayerNormalization(epsilon=1e-6)(x)
151
- x = keras.layers.Conv2D(
152
- filters=num_output_channels,
153
- kernel_size=3,
154
- padding="same",
155
- use_bias=False,
156
- )(x)
157
- x = keras.layers.LayerNormalization(epsilon=1e-6)(x)
147
+ self.neck = keras.models.Sequential(
148
+ [
149
+ keras.layers.Conv2D(
150
+ filters=num_output_channels, kernel_size=1, use_bias=False
151
+ ),
152
+ keras.layers.LayerNormalization(epsilon=1e-6),
153
+ keras.layers.Conv2D(
154
+ filters=num_output_channels,
155
+ kernel_size=3,
156
+ padding="same",
157
+ use_bias=False,
158
+ ),
159
+ keras.layers.LayerNormalization(epsilon=1e-6),
160
+ ]
161
+ )
162
+ x = self.neck(x)
158
163
 
159
164
  super().__init__(inputs=img_input, outputs=x, **kwargs)
160
165
 
@@ -0,0 +1,107 @@
1
+ # Copyright 2024 The KerasHUB Authors
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # https://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import numpy as np
15
+
16
+ from keras_hub.src.models.densenet.densenet_backbone import DenseNetBackbone
17
+
18
+ backbone_cls = DenseNetBackbone
19
+
20
+
21
+ def convert_backbone_config(timm_config):
22
+ timm_architecture = timm_config["architecture"]
23
+
24
+ if timm_architecture == "densenet121":
25
+ stackwise_num_repeats = [6, 12, 24, 16]
26
+ elif timm_architecture == "densenet169":
27
+ stackwise_num_repeats = [6, 12, 32, 32]
28
+ elif timm_architecture == "densenet201":
29
+ stackwise_num_repeats = [6, 12, 48, 32]
30
+ else:
31
+ raise ValueError(
32
+ f"Currently, the architecture {timm_architecture} is not supported."
33
+ )
34
+ return dict(
35
+ stackwise_num_repeats=stackwise_num_repeats,
36
+ compression_ratio=0.5,
37
+ growth_rate=32,
38
+ )
39
+
40
+
41
+ def convert_weights(backbone, loader, timm_config):
42
+ def port_conv2d(keras_layer_name, hf_weight_prefix):
43
+ loader.port_weight(
44
+ backbone.get_layer(keras_layer_name).kernel,
45
+ hf_weight_key=f"{hf_weight_prefix}.weight",
46
+ hook_fn=lambda x, _: np.transpose(x, (2, 3, 1, 0)),
47
+ )
48
+
49
+ def port_batch_normalization(keras_layer_name, hf_weight_prefix):
50
+ loader.port_weight(
51
+ backbone.get_layer(keras_layer_name).gamma,
52
+ hf_weight_key=f"{hf_weight_prefix}.weight",
53
+ )
54
+ loader.port_weight(
55
+ backbone.get_layer(keras_layer_name).beta,
56
+ hf_weight_key=f"{hf_weight_prefix}.bias",
57
+ )
58
+ loader.port_weight(
59
+ backbone.get_layer(keras_layer_name).moving_mean,
60
+ hf_weight_key=f"{hf_weight_prefix}.running_mean",
61
+ )
62
+ loader.port_weight(
63
+ backbone.get_layer(keras_layer_name).moving_variance,
64
+ hf_weight_key=f"{hf_weight_prefix}.running_var",
65
+ )
66
+
67
+ # Stem
68
+ port_conv2d("conv1_conv", "features.conv0")
69
+ port_batch_normalization("conv1_bn", "features.norm0")
70
+
71
+ # Stages
72
+ num_stacks = len(backbone.stackwise_num_repeats)
73
+ for stack_index in range(num_stacks):
74
+ for block_idx in range(backbone.stackwise_num_repeats[stack_index]):
75
+ keras_name = f"stack{stack_index+1}_block{block_idx+1}"
76
+ hf_name = (
77
+ f"features.denseblock{stack_index+1}.denselayer{block_idx+1}"
78
+ )
79
+ port_batch_normalization(f"{keras_name}_1_bn", f"{hf_name}.norm1")
80
+ port_conv2d(f"{keras_name}_1_conv", f"{hf_name}.conv1")
81
+ port_batch_normalization(f"{keras_name}_2_bn", f"{hf_name}.norm2")
82
+ port_conv2d(f"{keras_name}_2_conv", f"{hf_name}.conv2")
83
+
84
+ for stack_index in range(num_stacks - 1):
85
+ keras_transition_name = f"transition{stack_index+1}"
86
+ hf_transition_name = f"features.transition{stack_index+1}"
87
+ port_batch_normalization(
88
+ f"{keras_transition_name}_bn", f"{hf_transition_name}.norm"
89
+ )
90
+ port_conv2d(
91
+ f"{keras_transition_name}_conv", f"{hf_transition_name}.conv"
92
+ )
93
+
94
+ # Post
95
+ port_batch_normalization("bn", "features.norm5")
96
+
97
+
98
+ def convert_head(task, loader, timm_config):
99
+ loader.port_weight(
100
+ task.output_dense.kernel,
101
+ hf_weight_key="classifier.weight",
102
+ hook_fn=lambda x, _: np.transpose(np.squeeze(x)),
103
+ )
104
+ loader.port_weight(
105
+ task.output_dense.bias,
106
+ hf_weight_key="classifier.bias",
107
+ )
@@ -16,6 +16,7 @@
16
16
  from keras_hub.src.models.image_classifier import ImageClassifier
17
17
  from keras_hub.src.utils.preset_utils import PresetLoader
18
18
  from keras_hub.src.utils.preset_utils import jax_memory_cleanup
19
+ from keras_hub.src.utils.timm import convert_densenet
19
20
  from keras_hub.src.utils.timm import convert_resnet
20
21
  from keras_hub.src.utils.transformers.safetensor_utils import SafetensorLoader
21
22
 
@@ -26,6 +27,8 @@ class TimmPresetLoader(PresetLoader):
26
27
  architecture = self.config["architecture"]
27
28
  if "resnet" in architecture:
28
29
  self.converter = convert_resnet
30
+ if "densenet" in architecture:
31
+ self.converter = convert_densenet
29
32
  else:
30
33
  raise ValueError(
31
34
  "KerasHub has no converter for timm models "
@@ -15,7 +15,7 @@
15
15
  from keras_hub.src.api_export import keras_hub_export
16
16
 
17
17
  # Unique source of truth for the version number.
18
- __version__ = "0.16.1.dev202409240339"
18
+ __version__ = "0.16.1.dev202409260340"
19
19
 
20
20
 
21
21
  @keras_hub_export("keras_hub.version")
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: keras-hub-nightly
3
- Version: 0.16.1.dev202409240339
3
+ Version: 0.16.1.dev202409260340
4
4
  Summary: Industry-strength Natural Language Processing extensions for Keras.
5
5
  Home-page: https://github.com/keras-team/keras-hub
6
6
  Author: Keras team
@@ -1,15 +1,15 @@
1
1
  keras_hub/__init__.py,sha256=La-s5SQDd0312puWDSbPJ2XYxFXtg0jsCdUa2LMY-Z8,1440
2
2
  keras_hub/api/__init__.py,sha256=8EwhEBO-o-92lvGv6M5zOdkNL9Bd3xfutlfGNJ8QwBE,1109
3
3
  keras_hub/api/bounding_box/__init__.py,sha256=LNSVZLB1WJ9hMg0wxt7HTfFFd9uAFviH9x9CnfJYzBA,1682
4
- keras_hub/api/layers/__init__.py,sha256=4OlmzaQ0I8RuHp7Ot9580loeElsV4QeB2Lon8ZB_a1Q,2600
4
+ keras_hub/api/layers/__init__.py,sha256=40siZktbh1Xw7FjvdrPlW_srr0gNzB-a4N8_VdMh-WQ,2841
5
5
  keras_hub/api/metrics/__init__.py,sha256=tgQfooPHzlq6w34RHfro6vO8IUITLTf-jU2IWEBxxUM,966
6
- keras_hub/api/models/__init__.py,sha256=0BRVIXtv8DrIbE5n1JeAR_gVeF1_sG_zeMI0cR0rjBI,13396
6
+ keras_hub/api/models/__init__.py,sha256=BekXaBeF9KTcEEqLkYNCy-45nqpc6OkULiELb4FwUSo,14372
7
7
  keras_hub/api/samplers/__init__.py,sha256=l56H4y3h_HlRn_PpeMyZ6vC7228EH_BVFo4Caay-zQ8,1315
8
- keras_hub/api/tokenizers/__init__.py,sha256=nzMwKmxkMCOiYB35BIgxHNveCM9WoYRp7ChhmVK8MIM,3042
8
+ keras_hub/api/tokenizers/__init__.py,sha256=skXkmjQrm-0BxT4V0bLO97FUXS1xdmC71RCO7yAZ3C0,3109
9
9
  keras_hub/api/utils/__init__.py,sha256=4IXDgmXqFzqrCK2MPgkih0Ye1s-8hrlBaUk-n5Kqwl4,800
10
10
  keras_hub/src/__init__.py,sha256=lY7spwqXeGX_75qOHiSCff7FPvFCvRamJMF5ua9OWCg,585
11
11
  keras_hub/src/api_export.py,sha256=82JzmDgnWTJR-PRJI9L_vjhW2Svz8gilbE1NMGZ2JgA,2085
12
- keras_hub/src/version_utils.py,sha256=i_AT_prEzLhyI7l52vHMFFHNaayHc6VB54FNwdhAm6o,808
12
+ keras_hub/src/version_utils.py,sha256=VLABG9MV5PbD192g7QxqZpMVhvkPMYTB9JRjALW0Yeo,808
13
13
  keras_hub/src/bounding_box/__init__.py,sha256=lY7spwqXeGX_75qOHiSCff7FPvFCvRamJMF5ua9OWCg,585
14
14
  keras_hub/src/bounding_box/converters.py,sha256=V2ti6xPpaBgeLKbTpCsHsABdYOYASerIKX9oWqeOjHo,18450
15
15
  keras_hub/src/bounding_box/formats.py,sha256=5bbHO-n2ADsKIOBJDHMvIPCeNBaV1_mj-NVCgBKNiu8,4453
@@ -56,6 +56,7 @@ keras_hub/src/models/causal_lm_preprocessor.py,sha256=VvHwIwnQyKzMDKTtW0CuWQ0faR
56
56
  keras_hub/src/models/feature_pyramid_backbone.py,sha256=p4z7urzAAz0V6Q9WS57heaxWVLKW-11LoFKnXYxetUA,2832
57
57
  keras_hub/src/models/image_classifier.py,sha256=72qxEL01DSKE-Ugg4tpZqkLQpYf15bPfpknBnbx_G8Q,3754
58
58
  keras_hub/src/models/image_classifier_preprocessor.py,sha256=Az9596ow470lqCzYF0I-GUkHbVfWx4GiynvpwGws6f0,3199
59
+ keras_hub/src/models/image_segmenter.py,sha256=J8kcZzrVgQxQi21juGsOmCJaNWLYx_S8aGq6qss5TLA,3599
59
60
  keras_hub/src/models/masked_lm.py,sha256=x8jeqgYsKsgeVPAirVRPHDdT21FAhqJ45pb8mIPc410,4161
60
61
  keras_hub/src/models/masked_lm_preprocessor.py,sha256=Z6mo0szZp5Kfn6LmtY7EjZWGxLdR4c75hfw97V310Kc,6241
61
62
  keras_hub/src/models/preprocessor.py,sha256=PZruA4xHS_w0-9hWLD1iJ79aOQMP81aJPYXl5SpjXak,7174
@@ -64,6 +65,7 @@ keras_hub/src/models/seq_2_seq_lm_preprocessor.py,sha256=fQv-zg7vvIpy3ucCbIkiey8
64
65
  keras_hub/src/models/task.py,sha256=elkNVXUAbUskRprIBmTDiJkFheLo1mLTX9lppelHucc,14432
65
66
  keras_hub/src/models/text_classifier.py,sha256=BhsLovKyIVslm4ibrzFqtxrqljyNehk1lTpQ-r3bq5k,4744
66
67
  keras_hub/src/models/text_classifier_preprocessor.py,sha256=6Mkypx3UUj4gUmLlocaLZBc2Addk_pshKPWwy7wb788,5307
68
+ keras_hub/src/models/text_to_image.py,sha256=6cNGcbRAooBmMoDJzPUzbHGipAIW03GOxN0b93PGU_g,11377
67
69
  keras_hub/src/models/albert/__init__.py,sha256=RuIE1aGly5hA0OHBu_QA09XairoViM1kvS6K3kzVB3Q,843
68
70
  keras_hub/src/models/albert/albert_backbone.py,sha256=MNurFI3ansonMPJi8gmRf0dXwMwE38C-DJzqdkuLs9o,10659
69
71
  keras_hub/src/models/albert/albert_masked_lm.py,sha256=Y8N5HqQ3fUl4lUG4T_vbn_zI-Pink8oDFRKlxfGm6S8,4712
@@ -94,6 +96,11 @@ keras_hub/src/models/bloom/bloom_causal_lm_preprocessor.py,sha256=WVTWRUbQXUBlfC
94
96
  keras_hub/src/models/bloom/bloom_decoder.py,sha256=hSoeVnwRQvGbpVhYmf7-k8FB3Wg4auwZWdr2ubiNtxc,7157
95
97
  keras_hub/src/models/bloom/bloom_presets.py,sha256=7GiGFPmcXd_UraNsWGQffpzjKDRF-7nqIoUsic78xf0,4696
96
98
  keras_hub/src/models/bloom/bloom_tokenizer.py,sha256=ZMx8mHhw0D50zmmvYdmpg-Lk2GcvHz7pPlRpPlhS_2s,3161
99
+ keras_hub/src/models/clip/__init__.py,sha256=lY7spwqXeGX_75qOHiSCff7FPvFCvRamJMF5ua9OWCg,585
100
+ keras_hub/src/models/clip/clip_encoder_block.py,sha256=c6gyxHA6f23I_kZpaZkyercJCK7gkDWebiIbRUJyE88,3793
101
+ keras_hub/src/models/clip/clip_preprocessor.py,sha256=e1c44Qy4ZIqhGBehpMokVzI7AtZaaFqrfrbNFSN2ad8,5337
102
+ keras_hub/src/models/clip/clip_text_encoder.py,sha256=bvjUoJUunmEX-PMJWb2rZEA34_okoGCpChqZRBx5HE0,5912
103
+ keras_hub/src/models/clip/clip_tokenizer.py,sha256=XYXUS5eyPMQr8OBDWHSvqyrUqluLaIXF62rrHRAqCNM,7671
97
104
  keras_hub/src/models/csp_darknet/__init__.py,sha256=lY7spwqXeGX_75qOHiSCff7FPvFCvRamJMF5ua9OWCg,585
98
105
  keras_hub/src/models/csp_darknet/csp_darknet_backbone.py,sha256=h0eua1EZP0vBV416uOVMmMP1JXy7cVoEj0JEO0OO_lc,14312
99
106
  keras_hub/src/models/csp_darknet/csp_darknet_image_classifier.py,sha256=qLav7bxuzB0oaNJLs8gIiQbQVFjAlteDT7WKRfKoSmk,4355
@@ -108,9 +115,12 @@ keras_hub/src/models/deberta_v3/deberta_v3_tokenizer.py,sha256=NLLkMvotpPZUdRELa
108
115
  keras_hub/src/models/deberta_v3/disentangled_attention_encoder.py,sha256=Zt10UPxYsr_x8isO_OrXeaquWVJbcE49raM6_BkDdEs,9142
109
116
  keras_hub/src/models/deberta_v3/disentangled_self_attention.py,sha256=MxpWy30h9JB8nlEk7V9_wETzP-tpv1Sd1Wiz_pHGpkI,13708
110
117
  keras_hub/src/models/deberta_v3/relative_embedding.py,sha256=QT5MAnheJ1wSKFeN49pdnZzWkztz5K2oYYuNEtB_5xM,3472
111
- keras_hub/src/models/densenet/__init__.py,sha256=lY7spwqXeGX_75qOHiSCff7FPvFCvRamJMF5ua9OWCg,585
112
- keras_hub/src/models/densenet/densenet_backbone.py,sha256=BbTecC7gfigSC3t4L-kGsZHS7pjj8DtDIztyMxo_AoI,7238
113
- keras_hub/src/models/densenet/densenet_image_classifier.py,sha256=eECPZKHycVHNbgFuBHyiZGPWBn0M_pBdLasjmroc95g,4303
118
+ keras_hub/src/models/densenet/__init__.py,sha256=GV_UHqkQ4TOVBkKKUGioA63O7QWBbHBEQqdHuXyK0TA,855
119
+ keras_hub/src/models/densenet/densenet_backbone.py,sha256=2M3g7xxLMXbMpIGTvUQs6Bg4W6HdKKUkmJC8ZFg5DJc,7308
120
+ keras_hub/src/models/densenet/densenet_image_classifier.py,sha256=e92FdUitdYfxVlRK06Tpk18CGlQz1Sqzsz47yjnClwM,5327
121
+ keras_hub/src/models/densenet/densenet_image_classifier_preprocessor.py,sha256=83OcsZp2RZ626YGQ99roR8RKArlink-6VsUqwlv7L_s,1148
122
+ keras_hub/src/models/densenet/densenet_image_converter.py,sha256=oynHnALbasOWF0f4iYbJPyfYQ8_u0oSZAK8-zSdG7kM,973
123
+ keras_hub/src/models/densenet/densenet_presets.py,sha256=RypneRdDz0OlsRidmjh03gWalre7MlrHNOce97PLEjw,2119
114
124
  keras_hub/src/models/distil_bert/__init__.py,sha256=EiJUA3y_b22rMacMbBD7jD0eBSzR-wbVtF73k2RsQow,889
115
125
  keras_hub/src/models/distil_bert/distil_bert_backbone.py,sha256=ZW2OgNlWXeRlfI5BrcJLYr4Oc2qNJZoDxjoL7-cGuIQ,7027
116
126
  keras_hub/src/models/distil_bert/distil_bert_masked_lm.py,sha256=1BFS1At_HYlLK21VWyhQPrPtActpmR52A8LJG2c6N8Y,4862
@@ -240,21 +250,26 @@ keras_hub/src/models/roberta/roberta_presets.py,sha256=Ys5WnfBCzrRDLVLrAm412ojHY
240
250
  keras_hub/src/models/roberta/roberta_text_classifier.py,sha256=A4psd1Ef0ZSPMCsBpSLe5xmZqsFSn5XZ8gr_ekL9EoU,7268
241
251
  keras_hub/src/models/roberta/roberta_text_classifier_preprocessor.py,sha256=xK0dGPi3nZ5mUoRtTSE8OhibQSaOvzkGELhPAJAB5sc,6579
242
252
  keras_hub/src/models/roberta/roberta_tokenizer.py,sha256=RlKxa0eo7KYgRH5HSHrflna2LkB9pS6qjm2cr4DbuBg,3299
243
- keras_hub/src/models/stable_diffusion_v3/__init__.py,sha256=lY7spwqXeGX_75qOHiSCff7FPvFCvRamJMF5ua9OWCg,585
244
- keras_hub/src/models/stable_diffusion_v3/clip_encoder_block.py,sha256=6-bOVTGHCSniDYf616UhKmDHM239y8J5wdjZATXgxig,3556
245
- keras_hub/src/models/stable_diffusion_v3/clip_preprocessor.py,sha256=90QYFvAlSk_F1HC80VG6IceVN0Q8paIHZQpbaG2pMec,3172
246
- keras_hub/src/models/stable_diffusion_v3/clip_text_encoder.py,sha256=hVL3DaoYOTYd3bi0PUoKcMJTFxvsMwQ905uS7Ol__DU,5233
247
- keras_hub/src/models/stable_diffusion_v3/clip_tokenizer.py,sha256=5CdplYY3L50tgEflJep2VjjVjlLz-JMmobGd6QnyI6I,6296
248
- keras_hub/src/models/stable_diffusion_v3/mmdit.py,sha256=wlH6x9bS6gL3SsuiTpF541_2bwtrCOnUQIdPPavXCV0,14596
249
- keras_hub/src/models/stable_diffusion_v3/mmdit_block.py,sha256=xY-iqzIHb_h_nzzuQKDbbjPi3738A8XlL3nIGc5Taas,10877
250
- keras_hub/src/models/stable_diffusion_v3/t5_xxl_preprocessor.py,sha256=Liu6yg4ipCoisx0MGhJZvJTpOedl12NxxeBZtgrc0vs,2645
251
- keras_hub/src/models/stable_diffusion_v3/t5_xxl_text_encoder.py,sha256=xiK82Z4ioTEpgJre8YQ_8GkrwFNsbyxYurmTUDCrweU,5735
252
- keras_hub/src/models/stable_diffusion_v3/vae_attention.py,sha256=YQpVu4NaySi2pgczD-lru_jUYHg6YBxjDZvyclJ4T1s,4189
253
- keras_hub/src/models/stable_diffusion_v3/vae_image_decoder.py,sha256=ww6s-h4YjNKdRhv9bv9Gx2UA1JYOjn54IDAWFtjLO1Y,5879
253
+ keras_hub/src/models/sam/__init__.py,sha256=lY7spwqXeGX_75qOHiSCff7FPvFCvRamJMF5ua9OWCg,585
254
+ keras_hub/src/models/sam/sam_backbone.py,sha256=Gv535P33JczvBxP7Z8n4vQCXJnzJAKd5YGdUn6ni3uQ,4937
255
+ keras_hub/src/models/sam/sam_image_segmenter.py,sha256=QW00mR0fVhqiSFVnYORCIGAa9HhDJNLYWi1fA6Kp1uM,8091
256
+ keras_hub/src/models/sam/sam_layers.py,sha256=6A4H2qiJSmpSUjp0xwaGzhQeAAUWi-lsETJjyfHiMV8,14448
257
+ keras_hub/src/models/sam/sam_mask_decoder.py,sha256=Iwq9-YQYVXtFXg7fUnu0BRyUMS6D_56AV2IOCVdlGb8,10135
258
+ keras_hub/src/models/sam/sam_prompt_encoder.py,sha256=rUinjN0yI8h6ewA_n0GWMJAg5FszS0x25ADdKiGAaBA,12387
259
+ keras_hub/src/models/sam/sam_transformer.py,sha256=V3UfDYldxXVW2jngVUS9Klu7HXwKf5ROfpTm3fPEtOo,6316
260
+ keras_hub/src/models/stable_diffusion_3/__init__.py,sha256=lY7spwqXeGX_75qOHiSCff7FPvFCvRamJMF5ua9OWCg,585
261
+ keras_hub/src/models/stable_diffusion_3/flow_match_euler_discrete_scheduler.py,sha256=6zxybuut0DNIAUdGO8CPoRn2iZyWrGjYtR6AkqNmWns,3412
262
+ keras_hub/src/models/stable_diffusion_3/mmdit.py,sha256=aVXnQcRpiJ5XnGQxF9gHfIdKGkErmVPfmsNFnjVLm04,26296
263
+ keras_hub/src/models/stable_diffusion_3/stable_diffusion_3_backbone.py,sha256=4mZoMDIX7Soy5EPgA-oY2HBy6_z1CjD4jclgsdGJkQY,22577
264
+ keras_hub/src/models/stable_diffusion_3/stable_diffusion_3_text_to_image.py,sha256=pxS-JcF1eut6ufSrsXsJcE_74ELsEOeFKdbe_xDAhqk,5004
265
+ keras_hub/src/models/stable_diffusion_3/stable_diffusion_3_text_to_image_preprocessor.py,sha256=3U-su-hUi5BP6biQdZs0MllxTPa1FLE6azCC0d3sA5E,2736
266
+ keras_hub/src/models/stable_diffusion_3/t5_encoder.py,sha256=XlGkQrBThiPk6fCgONNYafIz68INeBlgOTuKKQWiyzk,5698
267
+ keras_hub/src/models/stable_diffusion_3/vae_image_decoder.py,sha256=rmza741siiDXPE80pzdY4hPiUPoRu7sMj5OeI0t1lFo,10641
254
268
  keras_hub/src/models/t5/__init__.py,sha256=1XZ5_R-qymPE1M1IyTqyNAW6_sWn8viJGXjqzB61sFw,819
255
269
  keras_hub/src/models/t5/t5_backbone.py,sha256=y_gEISm9CxL_1goJLwR-moAxS-bzxNNcdL__w7e8Isw,10844
256
270
  keras_hub/src/models/t5/t5_layer_norm.py,sha256=lVP_6IajHf8kX0APzGNdSZa-8IkkzsiLy5VcKOGhtkg,1216
257
271
  keras_hub/src/models/t5/t5_multi_head_attention.py,sha256=ToRrHmJKiTJ2F8jF1HIgHCagme7MSxn9FIQGEXlH3Vo,12445
272
+ keras_hub/src/models/t5/t5_preprocessor.py,sha256=r7dJslXSimw05Azyw6_3xXpMpIsvPn0yhXXowHlJTWM,2838
258
273
  keras_hub/src/models/t5/t5_presets.py,sha256=2RT_NuJcqDdSeAsoSJXh5O_ax2H-s4YKTAoYErVPwPQ,3590
259
274
  keras_hub/src/models/t5/t5_tokenizer.py,sha256=UnmZjiKhyb4AU7zALW3YAM_6_OGzYOVEGStBiw4ICvg,3103
260
275
  keras_hub/src/models/t5/t5_transformer_layer.py,sha256=wnu108InkHH9YMmFNTbmgIqcrKQQUxeJ7S1dcjUfBSY,5933
@@ -262,7 +277,7 @@ keras_hub/src/models/vgg/__init__.py,sha256=lY7spwqXeGX_75qOHiSCff7FPvFCvRamJMF5
262
277
  keras_hub/src/models/vgg/vgg_backbone.py,sha256=O6onZEduEPt1J4v2HFgtHsxu-SheqpUwY2pYoeLa6uE,5080
263
278
  keras_hub/src/models/vgg/vgg_image_classifier.py,sha256=cDcmHoHU1BZ211JakGPw3Z9lV22oMmK8J4-Ng8S07G0,4071
264
279
  keras_hub/src/models/vit_det/__init__.py,sha256=lY7spwqXeGX_75qOHiSCff7FPvFCvRamJMF5ua9OWCg,585
265
- keras_hub/src/models/vit_det/vit_det_backbone.py,sha256=4b3CUk4zg8gjFJvDU-QJZP72CV8jqw3TnaoCzUC-vyo,8054
280
+ keras_hub/src/models/vit_det/vit_det_backbone.py,sha256=wRtFHqKSQ-d7Bzv4LJ-Uhjn013AsIP86wvHOrXfGzGU,8241
266
281
  keras_hub/src/models/vit_det/vit_layers.py,sha256=JeUzOT2jmSOoJ_OiHOfLSkkCUZ5mlK5Mfd21DwudRCQ,20436
267
282
  keras_hub/src/models/whisper/__init__.py,sha256=FI-xj6FwZDAAdCfKhOrE1_roQ8cXhD1gK4G6CLTvPQo,849
268
283
  keras_hub/src/models/whisper/whisper_audio_converter.py,sha256=JqtA2kLUMFKZ4FrI8g2piEjahE-0-F3Yp4qQXS1cYf4,8973
@@ -314,8 +329,9 @@ keras_hub/src/utils/tensor_utils.py,sha256=XpWORE8iUzHXv1E1akiYDep07ndZJRKvjsKVl
314
329
  keras_hub/src/utils/imagenet/__init__.py,sha256=AK2s8L-VARI5OmlT6G3vtlKIVyjwLfgVwXfxzhhSCq4,585
315
330
  keras_hub/src/utils/imagenet/imagenet_utils.py,sha256=0iHrAQbh5DCa9Dh7tJiQeJc7AGzNO7j0cFEWS2Of16w,39889
316
331
  keras_hub/src/utils/timm/__init__.py,sha256=lY7spwqXeGX_75qOHiSCff7FPvFCvRamJMF5ua9OWCg,585
332
+ keras_hub/src/utils/timm/convert_densenet.py,sha256=RqPlE0J7iXgcjyAPEfLLMGIKvmGs3QJQlYNTFrhOLKM,3927
317
333
  keras_hub/src/utils/timm/convert_resnet.py,sha256=X2N9lk8sqRMzOMXkcIThAu6ZEtw8u8_Y4Kol82iTuW4,6417
318
- keras_hub/src/utils/timm/preset_loader.py,sha256=ac2PwGkfe-bikhQEFeIM25gDs3xk0E9SS5A1YEzZYQU,3602
334
+ keras_hub/src/utils/timm/preset_loader.py,sha256=e3r25uoMqtMzSopLE9-NQvg97PYWW-8GvxmnC0Dinao,3741
319
335
  keras_hub/src/utils/transformers/__init__.py,sha256=lY7spwqXeGX_75qOHiSCff7FPvFCvRamJMF5ua9OWCg,585
320
336
  keras_hub/src/utils/transformers/convert_albert.py,sha256=7b9X1TLrWfHieoeX_K-EXTagkl4Rp9AfPjsPrwArBGY,8280
321
337
  keras_hub/src/utils/transformers/convert_bart.py,sha256=RXmPf_XUZrUyqDaOV9T7qVNEP4rAVR44oK1aRZI0v78,14996
@@ -328,7 +344,7 @@ keras_hub/src/utils/transformers/convert_mistral.py,sha256=4QStizMS6ESEPjSI-ls6j
328
344
  keras_hub/src/utils/transformers/convert_pali_gemma.py,sha256=BT5eX1QzbjCQCopbMstiejQQWQiB_N77bpD5FMUygEo,11234
329
345
  keras_hub/src/utils/transformers/preset_loader.py,sha256=9x9hLhDh_6PAHG5gay5rVoEVyt-gXTQGrnprjMLKvCM,3294
330
346
  keras_hub/src/utils/transformers/safetensor_utils.py,sha256=2O8lcCf9yIFt5xiRVOtF1ZkPb5pfhOfDJotBaanD9Zo,3547
331
- keras_hub_nightly-0.16.1.dev202409240339.dist-info/METADATA,sha256=m5_uFvpe9Xgs6a0BxLOQsc3DumB76ZPDZHoP6ZQaQxE,7061
332
- keras_hub_nightly-0.16.1.dev202409240339.dist-info/WHEEL,sha256=GV9aMThwP_4oNCtvEC2ec3qUYutgWeAzklro_0m4WJQ,91
333
- keras_hub_nightly-0.16.1.dev202409240339.dist-info/top_level.txt,sha256=N4J6piIWBKa38A4uV-CnIopnOEf8mHAbkNXafXm_CuA,10
334
- keras_hub_nightly-0.16.1.dev202409240339.dist-info/RECORD,,
347
+ keras_hub_nightly-0.16.1.dev202409260340.dist-info/METADATA,sha256=tbYq7M5h0GvH0R4O0viRypG59absfWlxXVZqZhTHwiE,7061
348
+ keras_hub_nightly-0.16.1.dev202409260340.dist-info/WHEEL,sha256=GV9aMThwP_4oNCtvEC2ec3qUYutgWeAzklro_0m4WJQ,91
349
+ keras_hub_nightly-0.16.1.dev202409260340.dist-info/top_level.txt,sha256=N4J6piIWBKa38A4uV-CnIopnOEf8mHAbkNXafXm_CuA,10
350
+ keras_hub_nightly-0.16.1.dev202409260340.dist-info/RECORD,,