keras-hub-nightly 0.16.1.dev202410090340__py3-none-any.whl → 0.16.1.dev202410110340__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -40,6 +40,9 @@ from keras_hub.src.models.deeplab_v3.deeplab_v3_image_converter import (
40
40
  from keras_hub.src.models.densenet.densenet_image_converter import (
41
41
  DenseNetImageConverter,
42
42
  )
43
+ from keras_hub.src.models.mix_transformer.mix_transformer_image_converter import (
44
+ MiTImageConverter,
45
+ )
43
46
  from keras_hub.src.models.pali_gemma.pali_gemma_image_converter import (
44
47
  PaliGemmaImageConverter,
45
48
  )
@@ -49,6 +52,7 @@ from keras_hub.src.models.resnet.resnet_image_converter import (
49
52
  from keras_hub.src.models.sam.sam_image_converter import SAMImageConverter
50
53
  from keras_hub.src.models.sam.sam_mask_decoder import SAMMaskDecoder
51
54
  from keras_hub.src.models.sam.sam_prompt_encoder import SAMPromptEncoder
55
+ from keras_hub.src.models.vgg.vgg_image_classifier import VGGImageConverter
52
56
  from keras_hub.src.models.whisper.whisper_audio_converter import (
53
57
  WhisperAudioConverter,
54
58
  )
@@ -208,6 +208,9 @@ from keras_hub.src.models.mix_transformer.mix_transformer_backbone import (
208
208
  from keras_hub.src.models.mix_transformer.mix_transformer_classifier import (
209
209
  MiTImageClassifier,
210
210
  )
211
+ from keras_hub.src.models.mix_transformer.mix_transformer_classifier_preprocessor import (
212
+ MiTImageClassifierPreprocessor,
213
+ )
211
214
  from keras_hub.src.models.mobilenet.mobilenet_backbone import MobileNetBackbone
212
215
  from keras_hub.src.models.mobilenet.mobilenet_image_classifier import (
213
216
  MobileNetImageClassifier,
@@ -296,6 +299,9 @@ from keras_hub.src.models.text_classifier_preprocessor import (
296
299
  from keras_hub.src.models.text_to_image import TextToImage
297
300
  from keras_hub.src.models.vgg.vgg_backbone import VGGBackbone
298
301
  from keras_hub.src.models.vgg.vgg_image_classifier import VGGImageClassifier
302
+ from keras_hub.src.models.vgg.vgg_image_classifier import (
303
+ VGGImageClassifierPreprocessor,
304
+ )
299
305
  from keras_hub.src.models.vit_det.vit_det_backbone import ViTDetBackbone
300
306
  from keras_hub.src.models.whisper.whisper_backbone import WhisperBackbone
301
307
  from keras_hub.src.models.whisper.whisper_tokenizer import WhisperTokenizer
@@ -145,8 +145,9 @@ class ImageConverter(PreprocessingLayer):
145
145
 
146
146
  @preprocessing_function
147
147
  def call(self, inputs):
148
+ x = inputs
148
149
  if self.image_size is not None:
149
- x = self.resizing(inputs)
150
+ x = self.resizing(x)
150
151
  if self.scale is not None:
151
152
  x = x * self._expand_non_channel_dims(self.scale, x)
152
153
  if self.offset is not None:
@@ -0,0 +1,12 @@
1
+ from keras_hub.src.models.mix_transformer.mix_transformer_backbone import (
2
+ MiTBackbone,
3
+ )
4
+ from keras_hub.src.models.mix_transformer.mix_transformer_classifier import (
5
+ MiTImageClassifier,
6
+ )
7
+ from keras_hub.src.models.mix_transformer.mix_transformer_presets import (
8
+ backbone_presets,
9
+ )
10
+ from keras_hub.src.utils.preset_utils import register_presets
11
+
12
+ register_presets(backbone_presets, MiTBackbone)
@@ -3,8 +3,12 @@ from keras_hub.src.models.image_classifier import ImageClassifier
3
3
  from keras_hub.src.models.mix_transformer.mix_transformer_backbone import (
4
4
  MiTBackbone,
5
5
  )
6
+ from keras_hub.src.models.mix_transformer.mix_transformer_classifier_preprocessor import (
7
+ MiTImageClassifierPreprocessor,
8
+ )
6
9
 
7
10
 
8
11
  @keras_hub_export("keras_hub.models.MiTImageClassifier")
9
12
  class MiTImageClassifier(ImageClassifier):
10
13
  backbone_cls = MiTBackbone
14
+ preprocessor_cls = MiTImageClassifierPreprocessor
@@ -0,0 +1,16 @@
1
+ from keras_hub.src.api_export import keras_hub_export
2
+ from keras_hub.src.models.image_classifier_preprocessor import (
3
+ ImageClassifierPreprocessor,
4
+ )
5
+ from keras_hub.src.models.mix_transformer.mix_transformer_backbone import (
6
+ MiTBackbone,
7
+ )
8
+ from keras_hub.src.models.mix_transformer.mix_transformer_image_converter import (
9
+ MiTImageConverter,
10
+ )
11
+
12
+
13
+ @keras_hub_export("keras_hub.models.MiTImageClassifierPreprocessor")
14
+ class MiTImageClassifierPreprocessor(ImageClassifierPreprocessor):
15
+ backbone_cls = MiTBackbone
16
+ image_converter_cls = MiTImageConverter
@@ -0,0 +1,8 @@
1
+ from keras_hub.src.api_export import keras_hub_export
2
+ from keras_hub.src.layers.preprocessing.image_converter import ImageConverter
3
+ from keras_hub.src.models.mix_transformer import MiTBackbone
4
+
5
+
6
+ @keras_hub_export("keras_hub.layers.MiTImageConverter")
7
+ class MiTImageConverter(ImageConverter):
8
+ backbone_cls = MiTBackbone
@@ -28,19 +28,23 @@ class OverlappingPatchingAndEmbedding(keras.layers.Layer):
28
28
  self.patch_size = patch_size
29
29
  self.stride = stride
30
30
 
31
+ padding_size = self.patch_size // 2
32
+
33
+ self.padding = keras.layers.ZeroPadding2D(
34
+ padding=(padding_size, padding_size)
35
+ )
31
36
  self.proj = keras.layers.Conv2D(
32
37
  filters=project_dim,
33
38
  kernel_size=patch_size,
34
39
  strides=stride,
35
- padding="same",
40
+ padding="valid",
36
41
  )
37
- self.norm = keras.layers.LayerNormalization()
42
+ self.norm = keras.layers.LayerNormalization(epsilon=1e-5)
38
43
 
39
44
  def call(self, x):
45
+ x = self.padding(x)
40
46
  x = self.proj(x)
41
- # B, H, W, C
42
- shape = x.shape
43
- x = ops.reshape(x, (-1, shape[1] * shape[2], shape[3]))
47
+ x = ops.reshape(x, (-1, x.shape[1] * x.shape[2], x.shape[3]))
44
48
  x = self.norm(x)
45
49
  return x
46
50
 
@@ -0,0 +1,151 @@
1
+ # Licensed under the Apache License, Version 2.0 (the "License");
2
+ # you may not use this file except in compliance with the License.
3
+ # You may obtain a copy of the License at
4
+ #
5
+ # https://www.apache.org/licenses/LICENSE-2.0
6
+ #
7
+ # Unless required by applicable law or agreed to in writing, software
8
+ # distributed under the License is distributed on an "AS IS" BASIS,
9
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10
+ # See the License for the specific language governing permissions and
11
+ # limitations under the License.
12
+ """MiT model preset configurations."""
13
+
14
+ backbone_presets_with_weights = {
15
+ "mit_b0_ade20k_512": {
16
+ "metadata": {
17
+ "description": (
18
+ "MiT (MixTransformer) model with 8 transformer blocks."
19
+ ),
20
+ "params": 3321962,
21
+ "official_name": "MiT",
22
+ "path": "mit",
23
+ },
24
+ "kaggle_handle": "kaggle://kerashub/mix-transformer/keras/mit_b0_ade20k_512",
25
+ },
26
+ "mit_b1_ade20k_512": {
27
+ "metadata": {
28
+ "description": (
29
+ "MiT (MixTransformer) model with 8 transformer blocks."
30
+ ),
31
+ "params": 13156554,
32
+ "official_name": "MiT",
33
+ "path": "mit",
34
+ },
35
+ "kaggle_handle": "kaggle://kerashub/mix-transformer/keras/mit_b1_ade20k_512",
36
+ },
37
+ "mit_b2_ade20k_512": {
38
+ "metadata": {
39
+ "description": (
40
+ "MiT (MixTransformer) model with 16 transformer blocks."
41
+ ),
42
+ "params": 24201418,
43
+ "official_name": "MiT",
44
+ "path": "mit",
45
+ },
46
+ "kaggle_handle": "kaggle://kerashub/mix-transformer/keras/mit_b2_ade20k_512",
47
+ },
48
+ "mit_b3_ade20k_512": {
49
+ "metadata": {
50
+ "description": (
51
+ "MiT (MixTransformer) model with 28 transformer blocks."
52
+ ),
53
+ "params": 44077258,
54
+ "official_name": "MiT",
55
+ "path": "mit",
56
+ },
57
+ "kaggle_handle": "kaggle://kerashub/mix-transformer/keras/mit_b3_ade20k_512",
58
+ },
59
+ "mit_b4_ade20k_512": {
60
+ "metadata": {
61
+ "description": (
62
+ "MiT (MixTransformer) model with 41 transformer blocks."
63
+ ),
64
+ "params": 60847818,
65
+ "official_name": "MiT",
66
+ "path": "mit",
67
+ },
68
+ "kaggle_handle": "kaggle://kerashub/mix-transformer/keras/mit_b4_ade20k_512",
69
+ },
70
+ "mit_b5_ade20k_640": {
71
+ "metadata": {
72
+ "description": (
73
+ "MiT (MixTransformer) model with 52 transformer blocks."
74
+ ),
75
+ "params": 81448138,
76
+ "official_name": "MiT",
77
+ "path": "mit",
78
+ },
79
+ "kaggle_handle": "kaggle://kerashub/mix-transformer/keras/mit_b5_ade20k_512",
80
+ },
81
+ "mit_b0_cityscapes_1024": {
82
+ "metadata": {
83
+ "description": (
84
+ "MiT (MixTransformer) model with 8 transformer blocks."
85
+ ),
86
+ "params": 3321962,
87
+ "official_name": "MiT",
88
+ "path": "mit",
89
+ },
90
+ "kaggle_handle": "kaggle://kerashub/mix-transformer/keras/mit_b0_cityscapes_1024",
91
+ },
92
+ "mit_b1_cityscapes_1024": {
93
+ "metadata": {
94
+ "description": (
95
+ "MiT (MixTransformer) model with 8 transformer blocks."
96
+ ),
97
+ "params": 13156554,
98
+ "official_name": "MiT",
99
+ "path": "mit",
100
+ },
101
+ "kaggle_handle": "kaggle://kerashub/mix-transformer/keras/mit_b1_cityscapes_1024",
102
+ },
103
+ "mit_b2_cityscapes_1024": {
104
+ "metadata": {
105
+ "description": (
106
+ "MiT (MixTransformer) model with 16 transformer blocks."
107
+ ),
108
+ "params": 24201418,
109
+ "official_name": "MiT",
110
+ "path": "mit",
111
+ },
112
+ "kaggle_handle": "kaggle://kerashub/mix-transformer/keras/mit_b2_cityscapes_1024",
113
+ },
114
+ "mit_b3_cityscapes_1024": {
115
+ "metadata": {
116
+ "description": (
117
+ "MiT (MixTransformer) model with 28 transformer blocks."
118
+ ),
119
+ "params": 44077258,
120
+ "official_name": "MiT",
121
+ "path": "mit",
122
+ },
123
+ "kaggle_handle": "kaggle://kerashub/mix-transformer/keras/mit_b3_cityscapes_1024",
124
+ },
125
+ "mit_b4_cityscapes_1024": {
126
+ "metadata": {
127
+ "description": (
128
+ "MiT (MixTransformer) model with 41 transformer blocks."
129
+ ),
130
+ "params": 60847818,
131
+ "official_name": "MiT",
132
+ "path": "mit",
133
+ },
134
+ "kaggle_handle": "kaggle://kerashub/mix-transformer/keras/mit_b4_cityscapes_1024",
135
+ },
136
+ "mit_b5_cityscapes_1024": {
137
+ "metadata": {
138
+ "description": (
139
+ "MiT (MixTransformer) model with 52 transformer blocks."
140
+ ),
141
+ "params": 81448138,
142
+ "official_name": "MiT",
143
+ "path": "mit",
144
+ },
145
+ "kaggle_handle": "kaggle://kerashub/mix-transformer/keras/mit_b5_cityscapes_1024",
146
+ },
147
+ }
148
+
149
+ backbone_presets = {
150
+ **backbone_presets_with_weights,
151
+ }
@@ -339,7 +339,7 @@ class Task(PipelineModel):
339
339
  add_layer(layer, info)
340
340
  elif isinstance(layer, ImageConverter):
341
341
  info = "Image size: "
342
- info += highlight_shape(layer.image_size())
342
+ info += highlight_shape(layer.image_size)
343
343
  add_layer(layer, info)
344
344
  elif isinstance(layer, AudioConverter):
345
345
  info = "Audio shape: "
@@ -0,0 +1 @@
1
+ from keras_hub.src.models.vgg.vgg_backbone import VGGBackbone
@@ -47,12 +47,11 @@ class VGGBackbone(Backbone):
47
47
  image_shape=(None, None, 3),
48
48
  **kwargs,
49
49
  ):
50
-
51
50
  # === Functional Model ===
52
51
  img_input = keras.layers.Input(shape=image_shape)
53
52
  x = img_input
54
53
 
55
- for stack_index in range(len(stackwise_num_repeats) - 1):
54
+ for stack_index in range(len(stackwise_num_repeats)):
56
55
  x = apply_vgg_block(
57
56
  x=x,
58
57
  num_layers=stackwise_num_repeats[stack_index],
@@ -1,11 +1,26 @@
1
1
  import keras
2
2
 
3
3
  from keras_hub.src.api_export import keras_hub_export
4
+ from keras_hub.src.layers.preprocessing.image_converter import ImageConverter
4
5
  from keras_hub.src.models.image_classifier import ImageClassifier
6
+ from keras_hub.src.models.image_classifier_preprocessor import (
7
+ ImageClassifierPreprocessor,
8
+ )
5
9
  from keras_hub.src.models.task import Task
6
10
  from keras_hub.src.models.vgg.vgg_backbone import VGGBackbone
7
11
 
8
12
 
13
+ @keras_hub_export("keras_hub.layers.VGGImageConverter")
14
+ class VGGImageConverter(ImageConverter):
15
+ backbone_cls = VGGBackbone
16
+
17
+
18
+ @keras_hub_export("keras_hub.models.VGGImageClassifierPreprocessor")
19
+ class VGGImageClassifierPreprocessor(ImageClassifierPreprocessor):
20
+ backbone_cls = VGGBackbone
21
+ image_converter_cls = VGGImageConverter
22
+
23
+
9
24
  @keras_hub_export("keras_hub.models.VGGImageClassifier")
10
25
  class VGGImageClassifier(ImageClassifier):
11
26
  """VGG image classification task.
@@ -96,13 +111,14 @@ class VGGImageClassifier(ImageClassifier):
96
111
  """
97
112
 
98
113
  backbone_cls = VGGBackbone
114
+ preprocessor_cls = VGGImageClassifierPreprocessor
99
115
 
100
116
  def __init__(
101
117
  self,
102
118
  backbone,
103
119
  num_classes,
104
120
  preprocessor=None,
105
- pooling="flatten",
121
+ pooling="avg",
106
122
  pooling_hidden_dim=4096,
107
123
  activation=None,
108
124
  dropout=0.0,
@@ -141,24 +157,46 @@ class VGGImageClassifier(ImageClassifier):
141
157
  "Unknown `pooling` type. Polling should be either `'avg'` or "
142
158
  f"`'max'`. Received: pooling={pooling}."
143
159
  )
144
- self.output_dropout = keras.layers.Dropout(
145
- dropout,
146
- dtype=head_dtype,
147
- name="output_dropout",
148
- )
149
- self.output_dense = keras.layers.Dense(
150
- num_classes,
151
- activation=activation,
152
- dtype=head_dtype,
153
- name="predictions",
160
+
161
+ self.head = keras.Sequential(
162
+ [
163
+ keras.layers.Conv2D(
164
+ filters=4096,
165
+ kernel_size=7,
166
+ name="fc1",
167
+ activation=activation,
168
+ use_bias=True,
169
+ padding="same",
170
+ ),
171
+ keras.layers.Dropout(
172
+ rate=dropout,
173
+ dtype=head_dtype,
174
+ name="output_dropout",
175
+ ),
176
+ keras.layers.Conv2D(
177
+ filters=4096,
178
+ kernel_size=1,
179
+ name="fc2",
180
+ activation=activation,
181
+ use_bias=True,
182
+ padding="same",
183
+ ),
184
+ self.pooler,
185
+ keras.layers.Dense(
186
+ num_classes,
187
+ activation=activation,
188
+ dtype=head_dtype,
189
+ name="predictions",
190
+ ),
191
+ ],
192
+ name="head",
154
193
  )
155
194
 
156
195
  # === Functional Model ===
157
196
  inputs = self.backbone.input
158
197
  x = self.backbone(inputs)
159
- x = self.pooler(x)
160
- x = self.output_dropout(x)
161
- outputs = self.output_dense(x)
198
+ outputs = self.head(x)
199
+
162
200
  # Skip the parent class functional model.
163
201
  Task.__init__(
164
202
  self,
@@ -0,0 +1,85 @@
1
+ from typing import Any
2
+
3
+ import numpy as np
4
+
5
+ from keras_hub.src.models.vgg.vgg_backbone import VGGBackbone
6
+ from keras_hub.src.models.vgg.vgg_image_classifier import VGGImageClassifier
7
+
8
+ backbone_cls = VGGBackbone
9
+
10
+
11
+ REPEATS_BY_SIZE = {
12
+ "vgg11": [1, 1, 2, 2, 2],
13
+ "vgg13": [2, 2, 2, 2, 2],
14
+ "vgg16": [2, 2, 3, 3, 3],
15
+ "vgg19": [2, 2, 4, 4, 4],
16
+ }
17
+
18
+
19
+ def convert_backbone_config(timm_config):
20
+ architecture = timm_config["architecture"]
21
+ stackwise_num_repeats = REPEATS_BY_SIZE[architecture]
22
+ return dict(
23
+ stackwise_num_repeats=stackwise_num_repeats,
24
+ stackwise_num_filters=[64, 128, 256, 512, 512],
25
+ )
26
+
27
+
28
+ def convert_conv2d(
29
+ model,
30
+ loader,
31
+ keras_layer_name: str,
32
+ hf_layer_name: str,
33
+ ):
34
+ loader.port_weight(
35
+ model.get_layer(keras_layer_name).kernel,
36
+ hf_weight_key=f"{hf_layer_name}.weight",
37
+ hook_fn=lambda x, _: np.transpose(x, (2, 3, 1, 0)),
38
+ )
39
+ loader.port_weight(
40
+ model.get_layer(keras_layer_name).bias,
41
+ hf_weight_key=f"{hf_layer_name}.bias",
42
+ )
43
+
44
+
45
+ def convert_weights(
46
+ backbone: VGGBackbone,
47
+ loader,
48
+ timm_config: dict[Any],
49
+ ):
50
+ architecture = timm_config["architecture"]
51
+ stackwise_num_repeats = REPEATS_BY_SIZE[architecture]
52
+
53
+ hf_index_to_keras_layer_name = {}
54
+ layer_index = 0
55
+ for block_index, repeats_in_block in enumerate(stackwise_num_repeats):
56
+ for repeat_index in range(repeats_in_block):
57
+ hf_index = layer_index
58
+ layer_index += 2 # Conv + activation layers.
59
+ layer_name = f"block{block_index + 1}_conv{repeat_index + 1}"
60
+ hf_index_to_keras_layer_name[hf_index] = layer_name
61
+ layer_index += 1 # Pooling layer after blocks.
62
+
63
+ for hf_index, keras_layer_name in hf_index_to_keras_layer_name.items():
64
+ convert_conv2d(
65
+ backbone, loader, keras_layer_name, f"features.{hf_index}"
66
+ )
67
+
68
+
69
+ def convert_head(
70
+ task: VGGImageClassifier,
71
+ loader,
72
+ timm_config: dict[Any],
73
+ ):
74
+ convert_conv2d(task.head, loader, "fc1", "pre_logits.fc1")
75
+ convert_conv2d(task.head, loader, "fc2", "pre_logits.fc2")
76
+
77
+ loader.port_weight(
78
+ task.head.get_layer("predictions").kernel,
79
+ hf_weight_key="head.fc.weight",
80
+ hook_fn=lambda x, _: np.transpose(np.squeeze(x)),
81
+ )
82
+ loader.port_weight(
83
+ task.head.get_layer("predictions").bias,
84
+ hf_weight_key="head.fc.bias",
85
+ )
@@ -5,6 +5,7 @@ from keras_hub.src.utils.preset_utils import PresetLoader
5
5
  from keras_hub.src.utils.preset_utils import jax_memory_cleanup
6
6
  from keras_hub.src.utils.timm import convert_densenet
7
7
  from keras_hub.src.utils.timm import convert_resnet
8
+ from keras_hub.src.utils.timm import convert_vgg
8
9
  from keras_hub.src.utils.transformers.safetensor_utils import SafetensorLoader
9
10
 
10
11
 
@@ -16,6 +17,8 @@ class TimmPresetLoader(PresetLoader):
16
17
  self.converter = convert_resnet
17
18
  elif "densenet" in architecture:
18
19
  self.converter = convert_densenet
20
+ elif "vgg" in architecture:
21
+ self.converter = convert_vgg
19
22
  else:
20
23
  raise ValueError(
21
24
  "KerasHub has no converter for timm models "
@@ -50,11 +53,10 @@ class TimmPresetLoader(PresetLoader):
50
53
 
51
54
  def load_image_converter(self, cls, **kwargs):
52
55
  pretrained_cfg = self.config.get("pretrained_cfg", None)
53
- if not pretrained_cfg or "input_size" not in pretrained_cfg:
56
+ if not pretrained_cfg:
54
57
  return None
55
58
  # This assumes the same basic setup for all timm preprocessing, We may
56
59
  # need to extend this as we cover more model types.
57
- input_size = pretrained_cfg["input_size"]
58
60
  mean = pretrained_cfg["mean"]
59
61
  std = pretrained_cfg["std"]
60
62
  scale = [1.0 / 255.0 / s for s in std]
@@ -63,7 +65,6 @@ class TimmPresetLoader(PresetLoader):
63
65
  if interpolation not in ("bilinear", "nearest", "bicubic"):
64
66
  interpolation = "bilinear" # Unsupported interpolation type.
65
67
  return cls(
66
- image_size=input_size[1:],
67
68
  scale=scale,
68
69
  offset=offset,
69
70
  interpolation=interpolation,
@@ -1,7 +1,7 @@
1
1
  from keras_hub.src.api_export import keras_hub_export
2
2
 
3
3
  # Unique source of truth for the version number.
4
- __version__ = "0.16.1.dev202410090340"
4
+ __version__ = "0.16.1.dev202410110340"
5
5
 
6
6
 
7
7
  @keras_hub_export("keras_hub.version")
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: keras-hub-nightly
3
- Version: 0.16.1.dev202410090340
3
+ Version: 0.16.1.dev202410110340
4
4
  Summary: Industry-strength Natural Language Processing extensions for Keras.
5
5
  Home-page: https://github.com/keras-team/keras-hub
6
6
  Author: Keras team
@@ -1,15 +1,15 @@
1
1
  keras_hub/__init__.py,sha256=QGdXyHgYt6cMUAP1ebxwc6oR86dE0dkMxNy2eOCQtFo,855
2
2
  keras_hub/api/__init__.py,sha256=spMxsgqzjpeuC8rY4WP-2kAZ2qwwKRSbFwddXgUjqQE,524
3
3
  keras_hub/api/bounding_box/__init__.py,sha256=T8R_X7BPm0et1xaZq8565uJmid7dylsSFSj4V-rGuFQ,1097
4
- keras_hub/api/layers/__init__.py,sha256=XImD0tHdnDR1a7q3u-Pw-VRMASi9sDtrV6hr2beVYTw,2331
4
+ keras_hub/api/layers/__init__.py,sha256=NpuVqxRCKbnvuW1QjZiSy7vHsI9Ej95ZCnNp4Lm5Hj8,2515
5
5
  keras_hub/api/metrics/__init__.py,sha256=So8Ec-lOcTzn_UUMmAdzDm8RKkPu2dbRUm2px8gpUEI,381
6
- keras_hub/api/models/__init__.py,sha256=m3v73xL31KBAJaHZEfscrFIAFRoc4NIMM2bmZ_0D9Ys,14657
6
+ keras_hub/api/models/__init__.py,sha256=9-1qQyoGODam73Vlo49EI2fzTwzwgdlCshlTMRrFEqg,14884
7
7
  keras_hub/api/samplers/__init__.py,sha256=n-_SEXxr2LNUzK2FqVFN7alsrkx1P_HOVTeLZKeGCdE,730
8
8
  keras_hub/api/tokenizers/__init__.py,sha256=_f-r_cyUM2fjBB7iO84ThOdqqsAxHNIewJ2EBDlM0cA,2524
9
9
  keras_hub/api/utils/__init__.py,sha256=Gp1E6gG-RtKQS3PBEQEOz9PQvXkXaJ0ySGMqZ7myN7A,215
10
10
  keras_hub/src/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
11
11
  keras_hub/src/api_export.py,sha256=9pQZK27JObxWZ96QPLBp1OBsjWigh1iuV6RglPGMRk0,1499
12
- keras_hub/src/version_utils.py,sha256=OAeKND2_C34aewj9b9MT6TsgAA1AKvb-uZhnN4UWS5s,222
12
+ keras_hub/src/version_utils.py,sha256=9zvnHnaU7MGFEG0NsmxQpWEG3qxlcuPtyLNw-vhmajA,222
13
13
  keras_hub/src/bounding_box/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
14
14
  keras_hub/src/bounding_box/converters.py,sha256=a5po8DBm87oz2EXfi-0uEZHCMlCJPIb4-MaZIdYx3Dg,17865
15
15
  keras_hub/src/bounding_box/formats.py,sha256=YmskOz2BOSat7NaE__J9VfpSNGPJJR0znSzA4lp8MMI,3868
@@ -34,7 +34,7 @@ keras_hub/src/layers/modeling/transformer_encoder.py,sha256=howjIXH_vgBOKaXaIa7m
34
34
  keras_hub/src/layers/modeling/transformer_layer_utils.py,sha256=FuznrW33iG50B-VDN8R1RjuA5JG72yNMJ1TBgWLxR0E,3487
35
35
  keras_hub/src/layers/preprocessing/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
36
36
  keras_hub/src/layers/preprocessing/audio_converter.py,sha256=YGh_kQw65a1Z6S5zzSNVP-ChyLYHq3-eOYpOS53xIN8,4156
37
- keras_hub/src/layers/preprocessing/image_converter.py,sha256=zlg6VKQWjKDCojJnI9VfK4Rt88QE29XjpDewZQNT8IE,10166
37
+ keras_hub/src/layers/preprocessing/image_converter.py,sha256=j8SdL-pFOrWIGIV_HwlABUPhdcSOZXYhPRlvFCukAU8,10180
38
38
  keras_hub/src/layers/preprocessing/masked_lm_mask_generator.py,sha256=itxWq3FHYlR0I7jKarQlSKbSmRLl9ut_UTSP3ZDwP0A,8162
39
39
  keras_hub/src/layers/preprocessing/multi_segment_packer.py,sha256=ZNqnUFnc9Af122Q7T6YyUoXgIdU9AgIJfsvR1UrCjFU,12068
40
40
  keras_hub/src/layers/preprocessing/preprocessing_layer.py,sha256=WyX41b9Ev_YJ5uVQVOAqD0PQasMOPDoyDjl_PkzkAkE,687
@@ -64,7 +64,7 @@ keras_hub/src/models/masked_lm_preprocessor.py,sha256=g8vrnyYwqdnSw5xppROM1Gzo_j
64
64
  keras_hub/src/models/preprocessor.py,sha256=3CWLsMpQC77w7GzM3fU3Jf-G62ldJjufKyzPVvnGdeI,7970
65
65
  keras_hub/src/models/seq_2_seq_lm.py,sha256=w0gX-5YZjatfvAJmFAgSHyqS_BLqc8FF8DPLGK8mrgI,1864
66
66
  keras_hub/src/models/seq_2_seq_lm_preprocessor.py,sha256=HUHRbWRG5SF1pPpotGzBhXlrMh4pLFxgAoFk05FIrB4,9687
67
- keras_hub/src/models/task.py,sha256=2iapEFHvzyl0ASlH6yzQA2OHSr1jV1V-pLtagHdBncQ,14416
67
+ keras_hub/src/models/task.py,sha256=VN-CClNw3EB5Byb7HyyI3CqaS140od7-dmQInmYFSKg,14414
68
68
  keras_hub/src/models/text_classifier.py,sha256=VBDvQUHTpJPqKp7A4VAtm35FOmJ3yMo0DW6GdX67xG0,4159
69
69
  keras_hub/src/models/text_classifier_preprocessor.py,sha256=EoWp-GHnaLnAKTdAzDmC-soAV92ATF3QozdubdV2WXI,4722
70
70
  keras_hub/src/models/text_to_image.py,sha256=7s6rB1To46A7l9ItqRw3Pe4DGRm7YnqbHJ-RyNAlLPE,12973
@@ -209,10 +209,13 @@ keras_hub/src/models/mistral/mistral_layer_norm.py,sha256=nimMZ5CTPK8v9eflfrGuzq
209
209
  keras_hub/src/models/mistral/mistral_presets.py,sha256=gucgdaFAiU-vRDS1g9zWGHjbDF_jaCiljPibCF4yVqY,1329
210
210
  keras_hub/src/models/mistral/mistral_tokenizer.py,sha256=wyzR_Y2XwrDiBV3jIeBChSPiaOkVVaxFuLxMH2F6EYA,2005
211
211
  keras_hub/src/models/mistral/mistral_transformer_decoder.py,sha256=RDIIB3FhneHZP11tNUFQT9DcWawCMnrtVxtSvtnP3ts,9542
212
- keras_hub/src/models/mix_transformer/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
212
+ keras_hub/src/models/mix_transformer/__init__.py,sha256=neU-h7C0sXS6OmtS5NFJeJ1lF13OW3DaUlT6LXhl6vA,409
213
213
  keras_hub/src/models/mix_transformer/mix_transformer_backbone.py,sha256=B4hdhWHZ93lS937BGSSxovDKVXQZVuWrMbFwECFoWrg,6048
214
- keras_hub/src/models/mix_transformer/mix_transformer_classifier.py,sha256=uXO2-GzI_25TdlXe8O8qvnM7tryadfetVDW3yJLGfiI,348
215
- keras_hub/src/models/mix_transformer/mix_transformer_layers.py,sha256=SzyJJhuyESlsCgndmZNYuuF0Ogb1FKoYkSfDJnThgT0,9538
214
+ keras_hub/src/models/mix_transformer/mix_transformer_classifier.py,sha256=pVfbbTNuiZRFElCGLyNO3gknNGnut-6-L-zAVB4Nb5w,531
215
+ keras_hub/src/models/mix_transformer/mix_transformer_classifier_preprocessor.py,sha256=lSUuMAJiyWDVH0AVjG2y684bU3msxI3_UTa_xWyLLKQ,570
216
+ keras_hub/src/models/mix_transformer/mix_transformer_image_converter.py,sha256=WG2LjuagCxSYXkFgqd4bHyUoMLFCzTj9QjJBoptW6WM,323
217
+ keras_hub/src/models/mix_transformer/mix_transformer_layers.py,sha256=9AbA4kCJkjeV7fAwbRns8VGn0l1pgQ3CqFPjY-99VGA,9695
218
+ keras_hub/src/models/mix_transformer/mix_transformer_presets.py,sha256=rWrjAAwc9Kmo0c66CNh5cuIpySzqqLKj_VI6hlI9d44,5116
216
219
  keras_hub/src/models/mobilenet/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
217
220
  keras_hub/src/models/mobilenet/mobilenet_backbone.py,sha256=nlXdMqcj18iahy60aew4ON79EHUEuNIgvKY9dToH284,18191
218
221
  keras_hub/src/models/mobilenet/mobilenet_image_classifier.py,sha256=l5jo99I0fLlbwLub5jHw07CjC-NnmuV-ySJwXGI20Ek,351
@@ -292,9 +295,9 @@ keras_hub/src/models/t5/t5_transformer_layer.py,sha256=uDeP84F1x7xJxki5iKe12Zn6e
292
295
  keras_hub/src/models/vae/__init__.py,sha256=i3UaSW4IJf76O7lSPE1dyxOVjuHx8iAYKivqvUbDHOw,62
293
296
  keras_hub/src/models/vae/vae_backbone.py,sha256=aYf1sGteFJ7FyR3X8Ek6QBjAT5GjRtQTK2jXhYVJeM4,6671
294
297
  keras_hub/src/models/vae/vae_layers.py,sha256=N83CYM1zgbl1EIjAOs3cFCkJEdxvbXkgM9ghKyljFAg,27752
295
- keras_hub/src/models/vgg/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
296
- keras_hub/src/models/vgg/vgg_backbone.py,sha256=QnEDKn5n9bA9p3nvt5fBHnAssvnLxR0qv-oB372Ts0U,3702
297
- keras_hub/src/models/vgg/vgg_image_classifier.py,sha256=Dtq_HIJP6fHe8m7ZVLVn8IbHEsVMFWLvWMmn8TU1ntw,6600
298
+ keras_hub/src/models/vgg/__init__.py,sha256=1ydFmkTOix2kOnDHie3srD4XD0dQ_7iR8OYbJzBM_YM,62
299
+ keras_hub/src/models/vgg/vgg_backbone.py,sha256=qes1AsKwBDI7eQ3aC1uRievMkVNGXM9TNhtKLb9eZiU,3697
300
+ keras_hub/src/models/vgg/vgg_image_classifier.py,sha256=bl6XM7l9fnOTGFreqOO3Z1jreusjhA4l7G0xjimfUKA,7829
298
301
  keras_hub/src/models/vit_det/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
299
302
  keras_hub/src/models/vit_det/vit_det_backbone.py,sha256=GzwHXAfttExqDaGU4R2LAvng1gzjuvO3HMqUPwNUy9g,7656
300
303
  keras_hub/src/models/vit_det/vit_layers.py,sha256=oCKeUw5ckyUAGvmFPuxIiIAqgmC3uqh85LfZcgyh964,19852
@@ -350,7 +353,8 @@ keras_hub/src/utils/imagenet/imagenet_utils.py,sha256=MvIvv1WJo51ZXBxy4S7t_DsN3Z
350
353
  keras_hub/src/utils/timm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
351
354
  keras_hub/src/utils/timm/convert_densenet.py,sha256=V-GRjWuDnlh3b1EMxqahwZ3GMwSgOa3v0HOfb2ZZ-d0,3342
352
355
  keras_hub/src/utils/timm/convert_resnet.py,sha256=ee8eTml0ffJKE8avzGoLFcpjPF63DsvoIUArAGa8Ngg,5832
353
- keras_hub/src/utils/timm/preset_loader.py,sha256=SbDqy2nr54_Y7bwe4sICQ8n-kHnw0PtvNI52tgrH170,3095
356
+ keras_hub/src/utils/timm/convert_vgg.py,sha256=MT5jGnLrzenPpe66Af_Lp1IdR9KGtsSrcmn6_UPqHvQ,2419
357
+ keras_hub/src/utils/timm/preset_loader.py,sha256=2GJI2YeKGVovtDqc930uGta12yiyuCL9YrsTyGhqt9Y,3094
354
358
  keras_hub/src/utils/transformers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
355
359
  keras_hub/src/utils/transformers/convert_albert.py,sha256=VdKclZpCxtDWq3UbUUQZf4fR9DJK_JYZ73B4O_G9skg,7695
356
360
  keras_hub/src/utils/transformers/convert_bart.py,sha256=Tk4h9Md9rwN5wjQbGIVrC7qzDpF8kI8qm-FKL8HlUok,14411
@@ -363,7 +367,7 @@ keras_hub/src/utils/transformers/convert_mistral.py,sha256=kVhN9h1ZFVhwkNW8p3wnS
363
367
  keras_hub/src/utils/transformers/convert_pali_gemma.py,sha256=B1leeDw96Yvu81hYumf66hIid07k5NLqoeWAJgPnaLs,10649
364
368
  keras_hub/src/utils/transformers/preset_loader.py,sha256=GS44hZUuGQCtzsyn8z44ZpHdftd3DFemwV2hx2bQa-U,2738
365
369
  keras_hub/src/utils/transformers/safetensor_utils.py,sha256=rPK-Uw1CG0DX0d_UAD-r2cG9fw8GI8bvAlrcXfQ9g4c,3323
366
- keras_hub_nightly-0.16.1.dev202410090340.dist-info/METADATA,sha256=Wj_ah1mdW2q0gdurqH9ENThTtO9Qjg_ctqIzbiRXn-A,7458
367
- keras_hub_nightly-0.16.1.dev202410090340.dist-info/WHEEL,sha256=GV9aMThwP_4oNCtvEC2ec3qUYutgWeAzklro_0m4WJQ,91
368
- keras_hub_nightly-0.16.1.dev202410090340.dist-info/top_level.txt,sha256=N4J6piIWBKa38A4uV-CnIopnOEf8mHAbkNXafXm_CuA,10
369
- keras_hub_nightly-0.16.1.dev202410090340.dist-info/RECORD,,
370
+ keras_hub_nightly-0.16.1.dev202410110340.dist-info/METADATA,sha256=8UB_SWjRF9_PRraQNKuRN13SSTXmUCEwQPFGo0nU2Jk,7458
371
+ keras_hub_nightly-0.16.1.dev202410110340.dist-info/WHEEL,sha256=GV9aMThwP_4oNCtvEC2ec3qUYutgWeAzklro_0m4WJQ,91
372
+ keras_hub_nightly-0.16.1.dev202410110340.dist-info/top_level.txt,sha256=N4J6piIWBKa38A4uV-CnIopnOEf8mHAbkNXafXm_CuA,10
373
+ keras_hub_nightly-0.16.1.dev202410110340.dist-info/RECORD,,