keras-hub-nightly 0.16.1.dev202410180341__py3-none-any.whl → 0.16.1.dev202410200345__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (29) hide show
  1. keras_hub/api/layers/__init__.py +2 -4
  2. keras_hub/api/models/__init__.py +4 -8
  3. keras_hub/src/models/mit/__init__.py +6 -0
  4. keras_hub/src/models/{mix_transformer/mix_transformer_backbone.py → mit/mit_backbone.py} +3 -7
  5. keras_hub/src/models/{mix_transformer/mix_transformer_classifier.py → mit/mit_image_classifier.py} +2 -4
  6. keras_hub/src/models/{mix_transformer/mix_transformer_classifier_preprocessor.py → mit/mit_image_classifier_preprocessor.py} +2 -6
  7. keras_hub/src/models/{mix_transformer/mix_transformer_image_converter.py → mit/mit_image_converter.py} +1 -1
  8. keras_hub/src/models/mobilenet/mobilenet_backbone.py +1 -1
  9. keras_hub/src/models/resnet/resnet_backbone.py +1 -2
  10. keras_hub/src/models/resnet/resnet_presets.py +141 -0
  11. keras_hub/src/models/sam/sam_image_segmenter.py +1 -1
  12. keras_hub/src/models/sam/sam_presets.py +3 -3
  13. keras_hub/src/models/stable_diffusion_3/stable_diffusion_3_presets.py +1 -1
  14. keras_hub/src/models/vae/vae_backbone.py +13 -1
  15. keras_hub/src/models/vgg/__init__.py +4 -0
  16. keras_hub/src/models/vgg/vgg_backbone.py +1 -1
  17. keras_hub/src/models/vgg/vgg_image_classifier.py +4 -15
  18. keras_hub/src/models/vgg/vgg_image_classifier_preprocessor.py +12 -0
  19. keras_hub/src/models/vgg/vgg_image_converter.py +8 -0
  20. keras_hub/src/models/vit_det/vit_det_backbone.py +2 -2
  21. keras_hub/src/utils/pipeline_model.py +3 -3
  22. keras_hub/src/version_utils.py +1 -1
  23. {keras_hub_nightly-0.16.1.dev202410180341.dist-info → keras_hub_nightly-0.16.1.dev202410200345.dist-info}/METADATA +1 -1
  24. {keras_hub_nightly-0.16.1.dev202410180341.dist-info → keras_hub_nightly-0.16.1.dev202410200345.dist-info}/RECORD +28 -26
  25. keras_hub/src/models/mix_transformer/__init__.py +0 -12
  26. /keras_hub/src/models/{mix_transformer/mix_transformer_layers.py → mit/mit_layers.py} +0 -0
  27. /keras_hub/src/models/{mix_transformer/mix_transformer_presets.py → mit/mit_presets.py} +0 -0
  28. {keras_hub_nightly-0.16.1.dev202410180341.dist-info → keras_hub_nightly-0.16.1.dev202410200345.dist-info}/WHEEL +0 -0
  29. {keras_hub_nightly-0.16.1.dev202410180341.dist-info → keras_hub_nightly-0.16.1.dev202410200345.dist-info}/top_level.txt +0 -0
@@ -40,9 +40,7 @@ from keras_hub.src.models.deeplab_v3.deeplab_v3_image_converter import (
40
40
  from keras_hub.src.models.densenet.densenet_image_converter import (
41
41
  DenseNetImageConverter,
42
42
  )
43
- from keras_hub.src.models.mix_transformer.mix_transformer_image_converter import (
44
- MiTImageConverter,
45
- )
43
+ from keras_hub.src.models.mit.mit_image_converter import MiTImageConverter
46
44
  from keras_hub.src.models.pali_gemma.pali_gemma_image_converter import (
47
45
  PaliGemmaImageConverter,
48
46
  )
@@ -52,7 +50,7 @@ from keras_hub.src.models.resnet.resnet_image_converter import (
52
50
  from keras_hub.src.models.sam.sam_image_converter import SAMImageConverter
53
51
  from keras_hub.src.models.sam.sam_mask_decoder import SAMMaskDecoder
54
52
  from keras_hub.src.models.sam.sam_prompt_encoder import SAMPromptEncoder
55
- from keras_hub.src.models.vgg.vgg_image_classifier import VGGImageConverter
53
+ from keras_hub.src.models.vgg.vgg_image_converter import VGGImageConverter
56
54
  from keras_hub.src.models.whisper.whisper_audio_converter import (
57
55
  WhisperAudioConverter,
58
56
  )
@@ -202,13 +202,9 @@ from keras_hub.src.models.mistral.mistral_causal_lm_preprocessor import (
202
202
  MistralCausalLMPreprocessor,
203
203
  )
204
204
  from keras_hub.src.models.mistral.mistral_tokenizer import MistralTokenizer
205
- from keras_hub.src.models.mix_transformer.mix_transformer_backbone import (
206
- MiTBackbone,
207
- )
208
- from keras_hub.src.models.mix_transformer.mix_transformer_classifier import (
209
- MiTImageClassifier,
210
- )
211
- from keras_hub.src.models.mix_transformer.mix_transformer_classifier_preprocessor import (
205
+ from keras_hub.src.models.mit.mit_backbone import MiTBackbone
206
+ from keras_hub.src.models.mit.mit_image_classifier import MiTImageClassifier
207
+ from keras_hub.src.models.mit.mit_image_classifier_preprocessor import (
212
208
  MiTImageClassifierPreprocessor,
213
209
  )
214
210
  from keras_hub.src.models.mobilenet.mobilenet_backbone import MobileNetBackbone
@@ -299,7 +295,7 @@ from keras_hub.src.models.text_classifier_preprocessor import (
299
295
  from keras_hub.src.models.text_to_image import TextToImage
300
296
  from keras_hub.src.models.vgg.vgg_backbone import VGGBackbone
301
297
  from keras_hub.src.models.vgg.vgg_image_classifier import VGGImageClassifier
302
- from keras_hub.src.models.vgg.vgg_image_classifier import (
298
+ from keras_hub.src.models.vgg.vgg_image_classifier_preprocessor import (
303
299
  VGGImageClassifierPreprocessor,
304
300
  )
305
301
  from keras_hub.src.models.vit_det.vit_det_backbone import ViTDetBackbone
@@ -0,0 +1,6 @@
1
+ from keras_hub.src.models.mit.mit_backbone import MiTBackbone
2
+ from keras_hub.src.models.mit.mit_image_classifier import MiTImageClassifier
3
+ from keras_hub.src.models.mit.mit_presets import backbone_presets
4
+ from keras_hub.src.utils.preset_utils import register_presets
5
+
6
+ register_presets(backbone_presets, MiTBackbone)
@@ -4,12 +4,8 @@ from keras import ops
4
4
 
5
5
  from keras_hub.src.api_export import keras_hub_export
6
6
  from keras_hub.src.models.feature_pyramid_backbone import FeaturePyramidBackbone
7
- from keras_hub.src.models.mix_transformer.mix_transformer_layers import (
8
- HierarchicalTransformerEncoder,
9
- )
10
- from keras_hub.src.models.mix_transformer.mix_transformer_layers import (
11
- OverlappingPatchingAndEmbedding,
12
- )
7
+ from keras_hub.src.models.mit.mit_layers import HierarchicalTransformerEncoder
8
+ from keras_hub.src.models.mit.mit_layers import OverlappingPatchingAndEmbedding
13
9
 
14
10
 
15
11
  @keras_hub_export("keras_hub.models.MiTBackbone")
@@ -61,7 +57,7 @@ class MiTBackbone(FeaturePyramidBackbone):
61
57
  ```python
62
58
  images = np.ones(shape=(1, 96, 96, 3))
63
59
  labels = np.zeros(shape=(1, 96, 96, 1))
64
- backbone = keras_hub.models.MiTBackbone.from_preset("mit_b0_imagenet")
60
+ backbone = keras_hub.models.MiTBackbone.from_preset("mit_b0_ade20k_512")
65
61
 
66
62
  # Evaluate model
67
63
  model(images)
@@ -1,9 +1,7 @@
1
1
  from keras_hub.src.api_export import keras_hub_export
2
2
  from keras_hub.src.models.image_classifier import ImageClassifier
3
- from keras_hub.src.models.mix_transformer.mix_transformer_backbone import (
4
- MiTBackbone,
5
- )
6
- from keras_hub.src.models.mix_transformer.mix_transformer_classifier_preprocessor import (
3
+ from keras_hub.src.models.mit.mit_backbone import MiTBackbone
4
+ from keras_hub.src.models.mit.mit_image_classifier_preprocessor import (
7
5
  MiTImageClassifierPreprocessor,
8
6
  )
9
7
 
@@ -2,12 +2,8 @@ from keras_hub.src.api_export import keras_hub_export
2
2
  from keras_hub.src.models.image_classifier_preprocessor import (
3
3
  ImageClassifierPreprocessor,
4
4
  )
5
- from keras_hub.src.models.mix_transformer.mix_transformer_backbone import (
6
- MiTBackbone,
7
- )
8
- from keras_hub.src.models.mix_transformer.mix_transformer_image_converter import (
9
- MiTImageConverter,
10
- )
5
+ from keras_hub.src.models.mit.mit_backbone import MiTBackbone
6
+ from keras_hub.src.models.mit.mit_image_converter import MiTImageConverter
11
7
 
12
8
 
13
9
  @keras_hub_export("keras_hub.models.MiTImageClassifierPreprocessor")
@@ -1,6 +1,6 @@
1
1
  from keras_hub.src.api_export import keras_hub_export
2
2
  from keras_hub.src.layers.preprocessing.image_converter import ImageConverter
3
- from keras_hub.src.models.mix_transformer import MiTBackbone
3
+ from keras_hub.src.models.mit import MiTBackbone
4
4
 
5
5
 
6
6
  @keras_hub_export("keras_hub.layers.MiTImageConverter")
@@ -96,7 +96,7 @@ class MobileNetBackbone(Backbone):
96
96
  stackwise_activation,
97
97
  output_num_filters,
98
98
  inverted_res_block,
99
- image_shape=(224, 224, 3),
99
+ image_shape=(None, None, 3),
100
100
  input_activation="hard_swish",
101
101
  output_activation="hard_swish",
102
102
  depth_multiplier=1.0,
@@ -68,7 +68,7 @@ class ResNetBackbone(FeaturePyramidBackbone):
68
68
  input_data = np.random.uniform(0, 1, size=(2, 224, 224, 3))
69
69
 
70
70
  # Pretrained ResNet backbone.
71
- model = keras_hub.models.ResNetBackbone.from_preset("resnet50")
71
+ model = keras_hub.models.ResNetBackbone.from_preset("resnet_50_imagenet")
72
72
  model(input_data)
73
73
 
74
74
  # Randomly initialized ResNetV2 backbone with a custom config.
@@ -80,7 +80,6 @@ class ResNetBackbone(FeaturePyramidBackbone):
80
80
  stackwise_num_strides=[1, 2, 2],
81
81
  block_type="basic_block",
82
82
  use_pre_activation=True,
83
- pooling="avg",
84
83
  )
85
84
  model(input_data)
86
85
  ```
@@ -79,4 +79,145 @@ backbone_presets = {
79
79
  },
80
80
  "kaggle_handle": "kaggle://keras/resnetv2/keras/resnet_v2_101_imagenet/2",
81
81
  },
82
+ "resnet_vd_18_imagenet": {
83
+ "metadata": {
84
+ "description": (
85
+ "18-layer ResNetVD (ResNet with bag of tricks) model "
86
+ "pre-trained on the ImageNet 1k dataset at a 224x224 "
87
+ "resolution."
88
+ ),
89
+ "params": 11722824,
90
+ "official_name": "ResNet",
91
+ "path": "resnet",
92
+ "model_card": "https://arxiv.org/abs/1812.01187",
93
+ },
94
+ "kaggle_handle": "kaggle://kerashub/resnetvd/keras/resnet_vd_18_imagenet",
95
+ },
96
+ "resnet_vd_34_imagenet": {
97
+ "metadata": {
98
+ "description": (
99
+ "34-layer ResNetVD (ResNet with bag of tricks) model "
100
+ "pre-trained on the ImageNet 1k dataset at a 224x224 "
101
+ "resolution."
102
+ ),
103
+ "params": 21838408,
104
+ "official_name": "ResNet",
105
+ "path": "resnet",
106
+ "model_card": "https://arxiv.org/abs/1812.01187",
107
+ },
108
+ "kaggle_handle": "kaggle://kerashub/resnetvd/keras/resnet_vd_34_imagenet",
109
+ },
110
+ "resnet_vd_50_imagenet": {
111
+ "metadata": {
112
+ "description": (
113
+ "50-layer ResNetVD (ResNet with bag of tricks) model "
114
+ "pre-trained on the ImageNet 1k dataset at a 224x224 "
115
+ "resolution."
116
+ ),
117
+ "params": 25629512,
118
+ "official_name": "ResNet",
119
+ "path": "resnet",
120
+ "model_card": "https://arxiv.org/abs/1812.01187",
121
+ },
122
+ "kaggle_handle": "kaggle://kerashub/resnetvd/keras/resnet_vd_50_imagenet",
123
+ },
124
+ "resnet_vd_50_ssld_imagenet": {
125
+ "metadata": {
126
+ "description": (
127
+ "50-layer ResNetVD (ResNet with bag of tricks) model "
128
+ "pre-trained on the ImageNet 1k dataset at a 224x224 "
129
+ "resolution with knowledge distillation."
130
+ ),
131
+ "params": 25629512,
132
+ "official_name": "ResNet",
133
+ "path": "resnet",
134
+ "model_card": "https://arxiv.org/abs/1812.01187",
135
+ },
136
+ "kaggle_handle": "kaggle://kerashub/resnetvd/keras/resnet_vd_50_ssld_imagenet",
137
+ },
138
+ "resnet_vd_50_ssld_v2_imagenet": {
139
+ "metadata": {
140
+ "description": (
141
+ "50-layer ResNetVD (ResNet with bag of tricks) model "
142
+ "pre-trained on the ImageNet 1k dataset at a 224x224 "
143
+ "resolution with knowledge distillation and AutoAugment."
144
+ ),
145
+ "params": 25629512,
146
+ "official_name": "ResNet",
147
+ "path": "resnet",
148
+ "model_card": "https://arxiv.org/abs/1812.01187",
149
+ },
150
+ "kaggle_handle": "kaggle://kerashub/resnetvd/keras/resnet_vd_50_ssld_v2_imagenet",
151
+ },
152
+ "resnet_vd_50_ssld_v2_fix_imagenet": {
153
+ "metadata": {
154
+ "description": (
155
+ "50-layer ResNetVD (ResNet with bag of tricks) model "
156
+ "pre-trained on the ImageNet 1k dataset at a 224x224 "
157
+ "resolution with knowledge distillation, AutoAugment and "
158
+ "additional fine-tuning of the classification head."
159
+ ),
160
+ "params": 25629512,
161
+ "official_name": "ResNet",
162
+ "path": "resnet",
163
+ "model_card": "https://arxiv.org/abs/1812.01187",
164
+ },
165
+ "kaggle_handle": "kaggle://kerashub/resnetvd/keras/resnet_vd_50_ssld_v2_fix_imagenet",
166
+ },
167
+ "resnet_vd_101_imagenet": {
168
+ "metadata": {
169
+ "description": (
170
+ "101-layer ResNetVD (ResNet with bag of tricks) model "
171
+ "pre-trained on the ImageNet 1k dataset at a 224x224 "
172
+ "resolution."
173
+ ),
174
+ "params": 44673864,
175
+ "official_name": "ResNet",
176
+ "path": "resnet",
177
+ "model_card": "https://arxiv.org/abs/1812.01187",
178
+ },
179
+ "kaggle_handle": "kaggle://kerashub/resnetvd/keras/resnet_vd_101_imagenet",
180
+ },
181
+ "resnet_vd_101_ssld_imagenet": {
182
+ "metadata": {
183
+ "description": (
184
+ "101-layer ResNetVD (ResNet with bag of tricks) model "
185
+ "pre-trained on the ImageNet 1k dataset at a 224x224 "
186
+ "resolution with knowledge distillation."
187
+ ),
188
+ "params": 44673864,
189
+ "official_name": "ResNet",
190
+ "path": "resnet",
191
+ "model_card": "https://arxiv.org/abs/1812.01187",
192
+ },
193
+ "kaggle_handle": "kaggle://kerashub/resnetvd/keras/resnet_vd_101_ssld_imagenet",
194
+ },
195
+ "resnet_vd_152_imagenet": {
196
+ "metadata": {
197
+ "description": (
198
+ "152-layer ResNetVD (ResNet with bag of tricks) model "
199
+ "pre-trained on the ImageNet 1k dataset at a 224x224 "
200
+ "resolution."
201
+ ),
202
+ "params": 60363592,
203
+ "official_name": "ResNet",
204
+ "path": "resnet",
205
+ "model_card": "https://arxiv.org/abs/1812.01187",
206
+ },
207
+ "kaggle_handle": "kaggle://kerashub/resnetvd/keras/resnet_vd_152_imagenet",
208
+ },
209
+ "resnet_vd_200_imagenet": {
210
+ "metadata": {
211
+ "description": (
212
+ "200-layer ResNetVD (ResNet with bag of tricks) model "
213
+ "pre-trained on the ImageNet 1k dataset at a 224x224 "
214
+ "resolution."
215
+ ),
216
+ "params": 74933064,
217
+ "official_name": "ResNet",
218
+ "path": "resnet",
219
+ "model_card": "https://arxiv.org/abs/1812.01187",
220
+ },
221
+ "kaggle_handle": "kaggle://kerashub/resnetvd/keras/resnet_vd_200_imagenet",
222
+ },
82
223
  }
@@ -31,7 +31,7 @@ class SAMImageSegmenter(ImageSegmenter):
31
31
 
32
32
 
33
33
  Args:
34
- backbone: A `keras_hub.models.VGGBackbone` instance.
34
+ backbone: A `keras_hub.models.SAMBackbone` instance.
35
35
 
36
36
  Example:
37
37
  Load pretrained model using `from_preset`.
@@ -9,7 +9,7 @@ backbone_presets = {
9
9
  "path": "sam",
10
10
  "model_card": "https://arxiv.org/abs/2304.02643",
11
11
  },
12
- "kaggle_handle": "kaggle://keras/sam/keras/sam_base_sa1b/2",
12
+ "kaggle_handle": "kaggle://keras/sam/keras/sam_base_sa1b/4",
13
13
  },
14
14
  "sam_large_sa1b": {
15
15
  "metadata": {
@@ -19,7 +19,7 @@ backbone_presets = {
19
19
  "path": "sam",
20
20
  "model_card": "https://arxiv.org/abs/2304.02643",
21
21
  },
22
- "kaggle_handle": "kaggle://keras/sam/keras/sam_large_sa1b/3",
22
+ "kaggle_handle": "kaggle://keras/sam/keras/sam_large_sa1b/4",
23
23
  },
24
24
  "sam_huge_sa1b": {
25
25
  "metadata": {
@@ -29,6 +29,6 @@ backbone_presets = {
29
29
  "path": "sam",
30
30
  "model_card": "https://arxiv.org/abs/2304.02643",
31
31
  },
32
- "kaggle_handle": "kaggle://keras/sam/keras/sam_huge_sa1b/3",
32
+ "kaggle_handle": "kaggle://keras/sam/keras/sam_huge_sa1b/4",
33
33
  },
34
34
  }
@@ -10,7 +10,7 @@ backbone_presets = {
10
10
  ),
11
11
  "params": 2987080931,
12
12
  "official_name": "StableDiffusion3",
13
- "path": "stablediffusion3",
13
+ "path": "stable_diffusion_3",
14
14
  "model_card": "https://arxiv.org/abs/2110.00476",
15
15
  },
16
16
  "kaggle_handle": "kaggle://keras/stablediffusion3/keras/stable_diffusion_3_medium/1",
@@ -10,7 +10,7 @@ from keras_hub.src.utils.keras_utils import standardize_data_format
10
10
 
11
11
 
12
12
  class VAEBackbone(Backbone):
13
- """VAE backbone used in latent diffusion models.
13
+ """Variational Autoencoder(VAE) backbone used in latent diffusion models.
14
14
 
15
15
  When encoding, this model generates mean and log variance of the input
16
16
  images. When decoding, it reconstructs images from the latent space.
@@ -51,6 +51,18 @@ class VAEBackbone(Backbone):
51
51
  `"channels_last"`.
52
52
  dtype: `None` or str or `keras.mixed_precision.DTypePolicy`. The dtype
53
53
  to use for the model's computations and weights.
54
+
55
+ Example:
56
+ ```Python
57
+ backbone = VAEBackbone(
58
+ encoder_num_filters=[32, 32, 32, 32],
59
+ encoder_num_blocks=[1, 1, 1, 1],
60
+ decoder_num_filters=[32, 32, 32, 32],
61
+ decoder_num_blocks=[1, 1, 1, 1],
62
+ )
63
+ input_data = ops.ones((2, self.height, self.width, 3))
64
+ output = backbone(input_data)
65
+ ```
54
66
  """
55
67
 
56
68
  def __init__(
@@ -1 +1,5 @@
1
1
  from keras_hub.src.models.vgg.vgg_backbone import VGGBackbone
2
+ from keras_hub.src.models.vgg.vgg_presets import backbone_presets
3
+ from keras_hub.src.utils.preset_utils import register_presets
4
+
5
+ register_presets(backbone_presets, VGGBackbone)
@@ -20,7 +20,7 @@ class VGGBackbone(Backbone):
20
20
  stackwise_num_filters: list of ints, filter size for convolutional
21
21
  blocks per VGG block. For both VGG16 and VGG19 this is [
22
22
  64, 128, 256, 512, 512].
23
- image_shape: tuple, optional shape tuple, defaults to (224, 224, 3).
23
+ image_shape: tuple, optional shape tuple, defaults to (None, None, 3).
24
24
 
25
25
  Examples:
26
26
  ```python
@@ -1,24 +1,12 @@
1
1
  import keras
2
2
 
3
3
  from keras_hub.src.api_export import keras_hub_export
4
- from keras_hub.src.layers.preprocessing.image_converter import ImageConverter
5
4
  from keras_hub.src.models.image_classifier import ImageClassifier
6
- from keras_hub.src.models.image_classifier_preprocessor import (
7
- ImageClassifierPreprocessor,
8
- )
9
5
  from keras_hub.src.models.task import Task
10
6
  from keras_hub.src.models.vgg.vgg_backbone import VGGBackbone
11
-
12
-
13
- @keras_hub_export("keras_hub.layers.VGGImageConverter")
14
- class VGGImageConverter(ImageConverter):
15
- backbone_cls = VGGBackbone
16
-
17
-
18
- @keras_hub_export("keras_hub.models.VGGImageClassifierPreprocessor")
19
- class VGGImageClassifierPreprocessor(ImageClassifierPreprocessor):
20
- backbone_cls = VGGBackbone
21
- image_converter_cls = VGGImageConverter
7
+ from keras_hub.src.models.vgg.vgg_image_classifier_preprocessor import (
8
+ VGGImageClassifierPreprocessor,
9
+ )
22
10
 
23
11
 
24
12
  @keras_hub_export("keras_hub.models.VGGImageClassifier")
@@ -211,6 +199,7 @@ class VGGImageClassifier(ImageClassifier):
211
199
  self.pooling = pooling
212
200
  self.pooling_hidden_dim = pooling_hidden_dim
213
201
  self.dropout = dropout
202
+ self.preprocessor = preprocessor
214
203
 
215
204
  def get_config(self):
216
205
  # Backbone serialized in `super`
@@ -0,0 +1,12 @@
1
+ from keras_hub.src.api_export import keras_hub_export
2
+ from keras_hub.src.models.image_classifier_preprocessor import (
3
+ ImageClassifierPreprocessor,
4
+ )
5
+ from keras_hub.src.models.vgg.vgg_backbone import VGGBackbone
6
+ from keras_hub.src.models.vgg.vgg_image_converter import VGGImageConverter
7
+
8
+
9
+ @keras_hub_export("keras_hub.models.VGGImageClassifierPreprocessor")
10
+ class VGGImageClassifierPreprocessor(ImageClassifierPreprocessor):
11
+ backbone_cls = VGGBackbone
12
+ image_converter_cls = VGGImageConverter
@@ -0,0 +1,8 @@
1
+ from keras_hub.src.api_export import keras_hub_export
2
+ from keras_hub.src.layers.preprocessing.image_converter import ImageConverter
3
+ from keras_hub.src.models.vgg.vgg_backbone import VGGBackbone
4
+
5
+
6
+ @keras_hub_export("keras_hub.layers.VGGImageConverter")
7
+ class VGGImageConverter(ImageConverter):
8
+ backbone_cls = VGGBackbone
@@ -31,7 +31,7 @@ class ViTDetBackbone(Backbone):
31
31
  global_attention_layer_indices (list): Indexes for blocks using
32
32
  global attention.
33
33
  image_shape (tuple[int], optional): The size of the input image in
34
- `(H, W, C)` format. Defaults to `(1024, 1024, 3)`.
34
+ `(H, W, C)` format. Defaults to `(None, None, 3)`.
35
35
  patch_size (int, optional): the patch size to be supplied to the
36
36
  Patching layer to turn input images into a flattened sequence of
37
37
  patches. Defaults to `16`.
@@ -79,7 +79,7 @@ class ViTDetBackbone(Backbone):
79
79
  intermediate_dim,
80
80
  num_heads,
81
81
  global_attention_layer_indices,
82
- image_shape=(1024, 1024, 3),
82
+ image_shape=(None, None, 3),
83
83
  patch_size=16,
84
84
  num_output_channels=256,
85
85
  use_bias=True,
@@ -232,7 +232,7 @@ class PipelineModel(keras.Model):
232
232
  ):
233
233
  data = self.preprocess_samples(x, y, sample_weight)
234
234
  x, y, sample_weight = keras.utils.unpack_x_y_sample_weight(data)
235
- x = ops.convert_to_tensor(x)
235
+ x = tree.map_structure(ops.convert_to_tensor, x)
236
236
  if y is not None:
237
237
  y = ops.convert_to_tensor(y)
238
238
  if sample_weight is not None:
@@ -253,7 +253,7 @@ class PipelineModel(keras.Model):
253
253
  ):
254
254
  data = self.preprocess_samples(x, y, sample_weight)
255
255
  x, y, sample_weight = keras.utils.unpack_x_y_sample_weight(data)
256
- x = ops.convert_to_tensor(x)
256
+ x = tree.map_structure(ops.convert_to_tensor, x)
257
257
  if y is not None:
258
258
  y = ops.convert_to_tensor(y)
259
259
  if sample_weight is not None:
@@ -272,7 +272,7 @@ class PipelineModel(keras.Model):
272
272
  ):
273
273
  data = self.preprocess_samples(x)
274
274
  x, _, _ = keras.utils.unpack_x_y_sample_weight(data)
275
- x = ops.convert_to_tensor(x)
275
+ x = tree.map_structure(ops.convert_to_tensor, x)
276
276
  return super().predict_on_batch(
277
277
  x=x,
278
278
  **kwargs,
@@ -1,7 +1,7 @@
1
1
  from keras_hub.src.api_export import keras_hub_export
2
2
 
3
3
  # Unique source of truth for the version number.
4
- __version__ = "0.16.1.dev202410180341"
4
+ __version__ = "0.16.1.dev202410200345"
5
5
 
6
6
 
7
7
  @keras_hub_export("keras_hub.version")
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: keras-hub-nightly
3
- Version: 0.16.1.dev202410180341
3
+ Version: 0.16.1.dev202410200345
4
4
  Summary: Industry-strength Natural Language Processing extensions for Keras.
5
5
  Home-page: https://github.com/keras-team/keras-hub
6
6
  Author: Keras team
@@ -1,15 +1,15 @@
1
1
  keras_hub/__init__.py,sha256=QGdXyHgYt6cMUAP1ebxwc6oR86dE0dkMxNy2eOCQtFo,855
2
2
  keras_hub/api/__init__.py,sha256=spMxsgqzjpeuC8rY4WP-2kAZ2qwwKRSbFwddXgUjqQE,524
3
3
  keras_hub/api/bounding_box/__init__.py,sha256=T8R_X7BPm0et1xaZq8565uJmid7dylsSFSj4V-rGuFQ,1097
4
- keras_hub/api/layers/__init__.py,sha256=NpuVqxRCKbnvuW1QjZiSy7vHsI9Ej95ZCnNp4Lm5Hj8,2515
4
+ keras_hub/api/layers/__init__.py,sha256=OpXnXktkkpTjlufy1u2hLPqV0cidG2B40x30jQGiy9U,2481
5
5
  keras_hub/api/metrics/__init__.py,sha256=So8Ec-lOcTzn_UUMmAdzDm8RKkPu2dbRUm2px8gpUEI,381
6
- keras_hub/api/models/__init__.py,sha256=9-1qQyoGODam73Vlo49EI2fzTwzwgdlCshlTMRrFEqg,14884
6
+ keras_hub/api/models/__init__.py,sha256=5EfZDUOnHStK8UE6f6ih7cQZo2ZyFeUO15T45TC1uNA,14819
7
7
  keras_hub/api/samplers/__init__.py,sha256=n-_SEXxr2LNUzK2FqVFN7alsrkx1P_HOVTeLZKeGCdE,730
8
8
  keras_hub/api/tokenizers/__init__.py,sha256=_f-r_cyUM2fjBB7iO84ThOdqqsAxHNIewJ2EBDlM0cA,2524
9
9
  keras_hub/api/utils/__init__.py,sha256=Gp1E6gG-RtKQS3PBEQEOz9PQvXkXaJ0ySGMqZ7myN7A,215
10
10
  keras_hub/src/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
11
11
  keras_hub/src/api_export.py,sha256=9pQZK27JObxWZ96QPLBp1OBsjWigh1iuV6RglPGMRk0,1499
12
- keras_hub/src/version_utils.py,sha256=u4lRruStAMtT-Vd1NqG2WDfk_hI_aY5rRtlyOLK2wBo,222
12
+ keras_hub/src/version_utils.py,sha256=vhdVfxhBYNF9KstNciGcRjXgFBojRjbm2hO-HcHlQ0E,222
13
13
  keras_hub/src/bounding_box/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
14
14
  keras_hub/src/bounding_box/converters.py,sha256=a5po8DBm87oz2EXfi-0uEZHCMlCJPIb4-MaZIdYx3Dg,17865
15
15
  keras_hub/src/bounding_box/formats.py,sha256=YmskOz2BOSat7NaE__J9VfpSNGPJJR0znSzA4lp8MMI,3868
@@ -209,15 +209,15 @@ keras_hub/src/models/mistral/mistral_layer_norm.py,sha256=nimMZ5CTPK8v9eflfrGuzq
209
209
  keras_hub/src/models/mistral/mistral_presets.py,sha256=gucgdaFAiU-vRDS1g9zWGHjbDF_jaCiljPibCF4yVqY,1329
210
210
  keras_hub/src/models/mistral/mistral_tokenizer.py,sha256=wyzR_Y2XwrDiBV3jIeBChSPiaOkVVaxFuLxMH2F6EYA,2005
211
211
  keras_hub/src/models/mistral/mistral_transformer_decoder.py,sha256=RDIIB3FhneHZP11tNUFQT9DcWawCMnrtVxtSvtnP3ts,9542
212
- keras_hub/src/models/mix_transformer/__init__.py,sha256=neU-h7C0sXS6OmtS5NFJeJ1lF13OW3DaUlT6LXhl6vA,409
213
- keras_hub/src/models/mix_transformer/mix_transformer_backbone.py,sha256=B4hdhWHZ93lS937BGSSxovDKVXQZVuWrMbFwECFoWrg,6048
214
- keras_hub/src/models/mix_transformer/mix_transformer_classifier.py,sha256=pVfbbTNuiZRFElCGLyNO3gknNGnut-6-L-zAVB4Nb5w,531
215
- keras_hub/src/models/mix_transformer/mix_transformer_classifier_preprocessor.py,sha256=lSUuMAJiyWDVH0AVjG2y684bU3msxI3_UTa_xWyLLKQ,570
216
- keras_hub/src/models/mix_transformer/mix_transformer_image_converter.py,sha256=WG2LjuagCxSYXkFgqd4bHyUoMLFCzTj9QjJBoptW6WM,323
217
- keras_hub/src/models/mix_transformer/mix_transformer_layers.py,sha256=9AbA4kCJkjeV7fAwbRns8VGn0l1pgQ3CqFPjY-99VGA,9695
218
- keras_hub/src/models/mix_transformer/mix_transformer_presets.py,sha256=9bxWVOLhmBdoq2I4uZfZb7wZAB-3YjuMq0T_-JJzr2w,4960
212
+ keras_hub/src/models/mit/__init__.py,sha256=F70_0PR_nPzPdMI8XOpXDRR_nxclGjcHv3iWSWUX3w8,316
213
+ keras_hub/src/models/mit/mit_backbone.py,sha256=0lsWM7TkwmFE3euYbI5Xe808_ua9UDPOV4hOPlCBrOo,5984
214
+ keras_hub/src/models/mit/mit_image_classifier.py,sha256=HKj6u6AqPbxinGYPRsz_ZdW2pEHAcFsKenrGHpRMobM,480
215
+ keras_hub/src/models/mit/mit_image_classifier_preprocessor.py,sha256=oNYs-pUK8VnzNEPcq5beYX0qfnnlbJcxY8o5s7bVQes,504
216
+ keras_hub/src/models/mit/mit_image_converter.py,sha256=Mw7nV-OzyBveGuZUNFsPPKyq9jXJVW2_cVH024CNkXM,311
217
+ keras_hub/src/models/mit/mit_layers.py,sha256=9AbA4kCJkjeV7fAwbRns8VGn0l1pgQ3CqFPjY-99VGA,9695
218
+ keras_hub/src/models/mit/mit_presets.py,sha256=9bxWVOLhmBdoq2I4uZfZb7wZAB-3YjuMq0T_-JJzr2w,4960
219
219
  keras_hub/src/models/mobilenet/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
220
- keras_hub/src/models/mobilenet/mobilenet_backbone.py,sha256=nlXdMqcj18iahy60aew4ON79EHUEuNIgvKY9dToH284,18191
220
+ keras_hub/src/models/mobilenet/mobilenet_backbone.py,sha256=fFqEH3WTspEBYnF9LEdsX7RGHEEL3CVoVDPWG1ZnqBk,18193
221
221
  keras_hub/src/models/mobilenet/mobilenet_image_classifier.py,sha256=l5jo99I0fLlbwLub5jHw07CjC-NnmuV-ySJwXGI20Ek,351
222
222
  keras_hub/src/models/opt/__init__.py,sha256=6Ybj8etxNaPsVcuZvaeHnKB3As92Px--dbiFAqOCIT0,239
223
223
  keras_hub/src/models/opt/opt_backbone.py,sha256=mK5z_E5mSiIX5s0w4hr4IVQpT7K46W2ajZBmuMjxwaY,5873
@@ -245,11 +245,11 @@ keras_hub/src/models/phi3/phi3_presets.py,sha256=DNyPTDA7PzFC8Ys2QmR2-mxUDa8Y8Id
245
245
  keras_hub/src/models/phi3/phi3_rotary_embedding.py,sha256=WTPCN8IKq3R7kMzsES1b8JEKV-8iNi_49WkhNTXoNUk,5012
246
246
  keras_hub/src/models/phi3/phi3_tokenizer.py,sha256=bOPH14wTVVHJHq8mgzXLjsgvKMNhfO8eayevAPpjYVA,1992
247
247
  keras_hub/src/models/resnet/__init__.py,sha256=C5UqlQ6apm8WSp1bnrxB6Bi3BGaknxRQs-r3b2wpaGA,257
248
- keras_hub/src/models/resnet/resnet_backbone.py,sha256=mqVdGUj8YtjZ3zIhAQXgNqu8SqiQiFlYChn0rRKF_IE,31287
248
+ keras_hub/src/models/resnet/resnet_backbone.py,sha256=3acTjdWbnos8l_TPxYLgoV3Y4V_vJ_o1AqGhiQu459k,31274
249
249
  keras_hub/src/models/resnet/resnet_image_classifier.py,sha256=nf35EKDzvBkfhHsK-s6Ks0nbhvKO7HEOYZm94YckyWE,510
250
250
  keras_hub/src/models/resnet/resnet_image_classifier_preprocessor.py,sha256=fM7gyQ0qB-RRuI4USJkRD6q9-HVfuC71e-BLTo-UhHQ,543
251
251
  keras_hub/src/models/resnet/resnet_image_converter.py,sha256=fgTxihJznGFss-y3Z-jp0JE3X1gaaB2y-f2KMwrT8Pk,342
252
- keras_hub/src/models/resnet/resnet_presets.py,sha256=FwQuCH9IZM0c7eRnbqxviQcfypbA_lg0-yVvnsGY1Dc,2947
252
+ keras_hub/src/models/resnet/resnet_presets.py,sha256=kgEZQtJKnK37bSKy4Ny0GdE70P71z6OOdR_H--1pYGI,8563
253
253
  keras_hub/src/models/retinanet/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
254
254
  keras_hub/src/models/retinanet/anchor_generator.py,sha256=43NoI7djbRudH98hUm-9fw5OEGQNRXOUYzypIZhLYhE,6750
255
255
  keras_hub/src/models/retinanet/box_matcher.py,sha256=l820r1R-ByqiyVgmZ0YFjjz0njchDda-wItzLn1X84o,10834
@@ -267,11 +267,11 @@ keras_hub/src/models/roberta/roberta_tokenizer.py,sha256=VKPrgXVT9aMKP7et2DIWKlT
267
267
  keras_hub/src/models/sam/__init__.py,sha256=fp71Q288xeE81tIOZkkudec4Acs8v4aO5WdyzCD9x-c,239
268
268
  keras_hub/src/models/sam/sam_backbone.py,sha256=fbvtGG6du7tnkcGtEsRyT9TRwPBUJ99GBolGkWR5pkc,4351
269
269
  keras_hub/src/models/sam/sam_image_converter.py,sha256=5POp3aYFu6CK3R0NNfeUBbjhguBkincSMNvlcIJXarE,324
270
- keras_hub/src/models/sam/sam_image_segmenter.py,sha256=gJ-O7XaSvn9KTI-QPguhAiGfvxLUBar-KVQ-EEH5kko,7680
270
+ keras_hub/src/models/sam/sam_image_segmenter.py,sha256=2OIowfl7cF298gIlqQlOJUsx_BcI2mCDntbXR8uohZc,7680
271
271
  keras_hub/src/models/sam/sam_image_segmenter_preprocessor.py,sha256=7slvyhGoMHmSigagqIcjDJ3gX8fUJbuMBwmozC4FlCg,849
272
272
  keras_hub/src/models/sam/sam_layers.py,sha256=SE5e6tYc-lsIVfMp9khewvuA1jY-dEHQmLT00YUok4M,13862
273
273
  keras_hub/src/models/sam/sam_mask_decoder.py,sha256=9RfjoNL7GSY6I9LZ3ulUa5cIoYSPJNP4KnHvq16lnM4,9549
274
- keras_hub/src/models/sam/sam_presets.py,sha256=oAv_VmRiSE4dtJRp0ue_5hP7zoXeL9ykjHwECV-dzyY,1211
274
+ keras_hub/src/models/sam/sam_presets.py,sha256=IzpNyhxa3s5zqQFPTZOuqU75I7DU6y-TvMM27gKXLAs,1211
275
275
  keras_hub/src/models/sam/sam_prompt_encoder.py,sha256=2foB7900QbzQfZjBo335XYsdjmhOnVT8fKD1CubJNVE,11801
276
276
  keras_hub/src/models/sam/sam_transformer.py,sha256=L2bdxdc2RUF1juRZ0F0Z6r0gTva1sUwEdjItJmKKf6w,5730
277
277
  keras_hub/src/models/stable_diffusion_3/__init__.py,sha256=ZKYQuaRObyhKq8GVAHmoRvlXp6FpU8ChvutVCHyXKuc,343
@@ -280,7 +280,7 @@ keras_hub/src/models/stable_diffusion_3/mmdit.py,sha256=ByFot4_I1Z6woOBYvPcbkUtY
280
280
  keras_hub/src/models/stable_diffusion_3/stable_diffusion_3_backbone.py,sha256=QuggvAy1yvtIXFcwyXOmE_aUdhLcCEUw4FnTuqekys0,22497
281
281
  keras_hub/src/models/stable_diffusion_3/stable_diffusion_3_image_to_image.py,sha256=6_IXkxAv588lAKEasJrXgCjQePSXs-54XrvVIlYOT60,5483
282
282
  keras_hub/src/models/stable_diffusion_3/stable_diffusion_3_inpaint.py,sha256=tKVAQVbKOt3lWkWsQLKN9KK3WYem0-u5fonq2uBAPrc,6367
283
- keras_hub/src/models/stable_diffusion_3/stable_diffusion_3_presets.py,sha256=EP8hQHj8yqP3q26kwpNnDbrbTH7UeWL8GS5Xwo4nSCE,659
283
+ keras_hub/src/models/stable_diffusion_3/stable_diffusion_3_presets.py,sha256=9LcbF9Okin9ba6XJd9EQJKW5AVWdrfvW8r6F6TCx7X8,661
284
284
  keras_hub/src/models/stable_diffusion_3/stable_diffusion_3_text_to_image.py,sha256=pQOC7xMJfJHZxZRiYFtjrbjx0GXb94cNyOr9NELoXo8,4488
285
285
  keras_hub/src/models/stable_diffusion_3/stable_diffusion_3_text_to_image_preprocessor.py,sha256=TB0KESt5dnFYiS292PbzB0LdiH23AD6aTSTGmQEuzGM,2742
286
286
  keras_hub/src/models/stable_diffusion_3/t5_encoder.py,sha256=oV7P1uwCKdGiD93zXq7kmqX0elMZQU4UvBa8wg6P1hs,5113
@@ -293,14 +293,16 @@ keras_hub/src/models/t5/t5_presets.py,sha256=95zU4cTNEZMH2yiCLptA9zhu2D4mE1Cay18
293
293
  keras_hub/src/models/t5/t5_tokenizer.py,sha256=pLTu15JeYSpVmy-2600vBc-Mxn_uHyTKts4PI2MxxBM,2517
294
294
  keras_hub/src/models/t5/t5_transformer_layer.py,sha256=uDeP84F1x7xJxki5iKe12Zn6eWD_4yVjoFXMuod-a3A,5347
295
295
  keras_hub/src/models/vae/__init__.py,sha256=i3UaSW4IJf76O7lSPE1dyxOVjuHx8iAYKivqvUbDHOw,62
296
- keras_hub/src/models/vae/vae_backbone.py,sha256=aYf1sGteFJ7FyR3X8Ek6QBjAT5GjRtQTK2jXhYVJeM4,6671
296
+ keras_hub/src/models/vae/vae_backbone.py,sha256=Yk0srJhB-zfxQeAoyZdNzvxfxPxPMVie0nqKU7cp-2M,7033
297
297
  keras_hub/src/models/vae/vae_layers.py,sha256=N83CYM1zgbl1EIjAOs3cFCkJEdxvbXkgM9ghKyljFAg,27752
298
- keras_hub/src/models/vgg/__init__.py,sha256=1ydFmkTOix2kOnDHie3srD4XD0dQ_7iR8OYbJzBM_YM,62
299
- keras_hub/src/models/vgg/vgg_backbone.py,sha256=qes1AsKwBDI7eQ3aC1uRievMkVNGXM9TNhtKLb9eZiU,3697
300
- keras_hub/src/models/vgg/vgg_image_classifier.py,sha256=bl6XM7l9fnOTGFreqOO3Z1jreusjhA4l7G0xjimfUKA,7829
298
+ keras_hub/src/models/vgg/__init__.py,sha256=5ktFtITvvYja4Jg3q1LqPvGH-fMicx5wxCCpXT8aVKQ,239
299
+ keras_hub/src/models/vgg/vgg_backbone.py,sha256=yzzindEMO1rDgf3eAv9K0rhpg4NgfGZLgxaAv5CyGM8,3699
300
+ keras_hub/src/models/vgg/vgg_image_classifier.py,sha256=d-hlgvwbNhzR6r3q2oqEmRmuAuCpKzUwNC2JUwdzruI,7460
301
+ keras_hub/src/models/vgg/vgg_image_classifier_preprocessor.py,sha256=M7hBbDPws5Z7oDQPigBx-upHssul7Q_p0QIv3E4yOwo,504
302
+ keras_hub/src/models/vgg/vgg_image_converter.py,sha256=FKVrSNNBxIkiKvApzf4TZxidBb1z917Xs9nooHCcRLM,324
301
303
  keras_hub/src/models/vgg/vgg_presets.py,sha256=ltKExQdrR3E30kZPZD53tXVOsc8Gj7Krj6pzHP1UYVU,1879
302
304
  keras_hub/src/models/vit_det/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
303
- keras_hub/src/models/vit_det/vit_det_backbone.py,sha256=GzwHXAfttExqDaGU4R2LAvng1gzjuvO3HMqUPwNUy9g,7656
305
+ keras_hub/src/models/vit_det/vit_det_backbone.py,sha256=-ZSvfL_XSjYK_qpo5fDGY-UOI38_rqU4fD9XszK2yxU,7656
304
306
  keras_hub/src/models/vit_det/vit_layers.py,sha256=oCKeUw5ckyUAGvmFPuxIiIAqgmC3uqh85LfZcgyh964,19852
305
307
  keras_hub/src/models/whisper/__init__.py,sha256=45vTF01_e-7VzD-zvXPw1NiA9SCgDE8w0cI-6peG9cA,263
306
308
  keras_hub/src/models/whisper/whisper_audio_converter.py,sha256=aXqQ6uPI9fBSjuYbo7bMr4C0avPh3iDwrVXHEJ7W_zo,8386
@@ -345,7 +347,7 @@ keras_hub/src/tokenizers/word_piece_tokenizer.py,sha256=vP6AZgbzsRiuPCt3W_n94nsF
345
347
  keras_hub/src/tokenizers/word_piece_tokenizer_trainer.py,sha256=xUhc9EMswarzghNfrDLUFYQBExZOQxbMlfKp9G6A63k,6549
346
348
  keras_hub/src/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
347
349
  keras_hub/src/utils/keras_utils.py,sha256=lrZuC8HL2lmQfbHaS_t1JUyJann_ji2iTYE0Fzos8PU,1969
348
- keras_hub/src/utils/pipeline_model.py,sha256=33-0vIB9KGYh2mRtyjHxBPvgGZHDusRcRy-xjki3_gg,9024
350
+ keras_hub/src/utils/pipeline_model.py,sha256=jgzB6NQPSl0KOu08N-TazfOnXnUJbZjH2EXXhx25Ftg,9084
349
351
  keras_hub/src/utils/preset_utils.py,sha256=w45mluy4bhPPWB68waWpXFQ4MAKvSXS6llVw4rAE70s,30119
350
352
  keras_hub/src/utils/python_utils.py,sha256=N8nWeO3san4YnGkffRXG3Ix7VEIMTKSN21FX5TuL7G8,202
351
353
  keras_hub/src/utils/tensor_utils.py,sha256=JipeJUDnnvLuT-ToVQC0t9dmSzebwPG6XiZgEwGEGI4,14646
@@ -368,7 +370,7 @@ keras_hub/src/utils/transformers/convert_mistral.py,sha256=kVhN9h1ZFVhwkNW8p3wnS
368
370
  keras_hub/src/utils/transformers/convert_pali_gemma.py,sha256=B1leeDw96Yvu81hYumf66hIid07k5NLqoeWAJgPnaLs,10649
369
371
  keras_hub/src/utils/transformers/preset_loader.py,sha256=GS44hZUuGQCtzsyn8z44ZpHdftd3DFemwV2hx2bQa-U,2738
370
372
  keras_hub/src/utils/transformers/safetensor_utils.py,sha256=rPK-Uw1CG0DX0d_UAD-r2cG9fw8GI8bvAlrcXfQ9g4c,3323
371
- keras_hub_nightly-0.16.1.dev202410180341.dist-info/METADATA,sha256=I3yyfeBrwqtrZvH5HpEzkgeAs0iwD05aUdSbwxlTKy0,7458
372
- keras_hub_nightly-0.16.1.dev202410180341.dist-info/WHEEL,sha256=OVMc5UfuAQiSplgO0_WdW7vXVGAt9Hdd6qtN4HotdyA,91
373
- keras_hub_nightly-0.16.1.dev202410180341.dist-info/top_level.txt,sha256=N4J6piIWBKa38A4uV-CnIopnOEf8mHAbkNXafXm_CuA,10
374
- keras_hub_nightly-0.16.1.dev202410180341.dist-info/RECORD,,
373
+ keras_hub_nightly-0.16.1.dev202410200345.dist-info/METADATA,sha256=NriUPiepBw1bkQefKH4n_O5T-8UMx8pKwN6WQeKE7-s,7458
374
+ keras_hub_nightly-0.16.1.dev202410200345.dist-info/WHEEL,sha256=OVMc5UfuAQiSplgO0_WdW7vXVGAt9Hdd6qtN4HotdyA,91
375
+ keras_hub_nightly-0.16.1.dev202410200345.dist-info/top_level.txt,sha256=N4J6piIWBKa38A4uV-CnIopnOEf8mHAbkNXafXm_CuA,10
376
+ keras_hub_nightly-0.16.1.dev202410200345.dist-info/RECORD,,
@@ -1,12 +0,0 @@
1
- from keras_hub.src.models.mix_transformer.mix_transformer_backbone import (
2
- MiTBackbone,
3
- )
4
- from keras_hub.src.models.mix_transformer.mix_transformer_classifier import (
5
- MiTImageClassifier,
6
- )
7
- from keras_hub.src.models.mix_transformer.mix_transformer_presets import (
8
- backbone_presets,
9
- )
10
- from keras_hub.src.utils.preset_utils import register_presets
11
-
12
- register_presets(backbone_presets, MiTBackbone)