keras-hub-nightly 0.16.1.dev202410170342__py3-none-any.whl → 0.16.1.dev202410190340__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- keras_hub/api/layers/__init__.py +2 -4
- keras_hub/api/models/__init__.py +4 -8
- keras_hub/src/models/deeplab_v3/deeplab_v3_presets.py +3 -3
- keras_hub/src/models/densenet/densenet_presets.py +3 -3
- keras_hub/src/models/mit/__init__.py +6 -0
- keras_hub/src/models/{mix_transformer/mix_transformer_backbone.py → mit/mit_backbone.py} +3 -7
- keras_hub/src/models/{mix_transformer/mix_transformer_classifier.py → mit/mit_image_classifier.py} +2 -4
- keras_hub/src/models/{mix_transformer/mix_transformer_classifier_preprocessor.py → mit/mit_image_classifier_preprocessor.py} +2 -6
- keras_hub/src/models/{mix_transformer/mix_transformer_image_converter.py → mit/mit_image_converter.py} +1 -1
- keras_hub/src/models/mobilenet/mobilenet_backbone.py +1 -1
- keras_hub/src/models/pali_gemma/pali_gemma_backbone.py +0 -6
- keras_hub/src/models/pali_gemma/pali_gemma_presets.py +5 -5
- keras_hub/src/models/pali_gemma/pali_gemma_vit.py +0 -13
- keras_hub/src/models/preprocessor.py +16 -0
- keras_hub/src/models/resnet/resnet_backbone.py +1 -1
- keras_hub/src/models/resnet/resnet_presets.py +147 -6
- keras_hub/src/models/sam/sam_image_segmenter.py +1 -1
- keras_hub/src/models/sam/sam_presets.py +3 -3
- keras_hub/src/models/stable_diffusion_3/stable_diffusion_3_presets.py +1 -1
- keras_hub/src/models/task.py +5 -2
- keras_hub/src/models/vae/vae_backbone.py +13 -1
- keras_hub/src/models/vgg/__init__.py +4 -0
- keras_hub/src/models/vgg/vgg_backbone.py +1 -1
- keras_hub/src/models/vgg/vgg_image_classifier.py +4 -15
- keras_hub/src/models/vgg/vgg_image_classifier_preprocessor.py +12 -0
- keras_hub/src/models/vgg/vgg_image_converter.py +8 -0
- keras_hub/src/models/vit_det/vit_det_backbone.py +2 -2
- keras_hub/src/utils/pipeline_model.py +3 -3
- keras_hub/src/utils/timm/preset_loader.py +3 -1
- keras_hub/src/version_utils.py +1 -1
- {keras_hub_nightly-0.16.1.dev202410170342.dist-info → keras_hub_nightly-0.16.1.dev202410190340.dist-info}/METADATA +1 -1
- {keras_hub_nightly-0.16.1.dev202410170342.dist-info → keras_hub_nightly-0.16.1.dev202410190340.dist-info}/RECORD +36 -34
- keras_hub/src/models/mix_transformer/__init__.py +0 -12
- /keras_hub/src/models/{mix_transformer/mix_transformer_layers.py → mit/mit_layers.py} +0 -0
- /keras_hub/src/models/{mix_transformer/mix_transformer_presets.py → mit/mit_presets.py} +0 -0
- {keras_hub_nightly-0.16.1.dev202410170342.dist-info → keras_hub_nightly-0.16.1.dev202410190340.dist-info}/WHEEL +0 -0
- {keras_hub_nightly-0.16.1.dev202410170342.dist-info → keras_hub_nightly-0.16.1.dev202410190340.dist-info}/top_level.txt +0 -0
keras_hub/api/layers/__init__.py
CHANGED
@@ -40,9 +40,7 @@ from keras_hub.src.models.deeplab_v3.deeplab_v3_image_converter import (
|
|
40
40
|
from keras_hub.src.models.densenet.densenet_image_converter import (
|
41
41
|
DenseNetImageConverter,
|
42
42
|
)
|
43
|
-
from keras_hub.src.models.
|
44
|
-
MiTImageConverter,
|
45
|
-
)
|
43
|
+
from keras_hub.src.models.mit.mit_image_converter import MiTImageConverter
|
46
44
|
from keras_hub.src.models.pali_gemma.pali_gemma_image_converter import (
|
47
45
|
PaliGemmaImageConverter,
|
48
46
|
)
|
@@ -52,7 +50,7 @@ from keras_hub.src.models.resnet.resnet_image_converter import (
|
|
52
50
|
from keras_hub.src.models.sam.sam_image_converter import SAMImageConverter
|
53
51
|
from keras_hub.src.models.sam.sam_mask_decoder import SAMMaskDecoder
|
54
52
|
from keras_hub.src.models.sam.sam_prompt_encoder import SAMPromptEncoder
|
55
|
-
from keras_hub.src.models.vgg.
|
53
|
+
from keras_hub.src.models.vgg.vgg_image_converter import VGGImageConverter
|
56
54
|
from keras_hub.src.models.whisper.whisper_audio_converter import (
|
57
55
|
WhisperAudioConverter,
|
58
56
|
)
|
keras_hub/api/models/__init__.py
CHANGED
@@ -202,13 +202,9 @@ from keras_hub.src.models.mistral.mistral_causal_lm_preprocessor import (
|
|
202
202
|
MistralCausalLMPreprocessor,
|
203
203
|
)
|
204
204
|
from keras_hub.src.models.mistral.mistral_tokenizer import MistralTokenizer
|
205
|
-
from keras_hub.src.models.
|
206
|
-
|
207
|
-
|
208
|
-
from keras_hub.src.models.mix_transformer.mix_transformer_classifier import (
|
209
|
-
MiTImageClassifier,
|
210
|
-
)
|
211
|
-
from keras_hub.src.models.mix_transformer.mix_transformer_classifier_preprocessor import (
|
205
|
+
from keras_hub.src.models.mit.mit_backbone import MiTBackbone
|
206
|
+
from keras_hub.src.models.mit.mit_image_classifier import MiTImageClassifier
|
207
|
+
from keras_hub.src.models.mit.mit_image_classifier_preprocessor import (
|
212
208
|
MiTImageClassifierPreprocessor,
|
213
209
|
)
|
214
210
|
from keras_hub.src.models.mobilenet.mobilenet_backbone import MobileNetBackbone
|
@@ -299,7 +295,7 @@ from keras_hub.src.models.text_classifier_preprocessor import (
|
|
299
295
|
from keras_hub.src.models.text_to_image import TextToImage
|
300
296
|
from keras_hub.src.models.vgg.vgg_backbone import VGGBackbone
|
301
297
|
from keras_hub.src.models.vgg.vgg_image_classifier import VGGImageClassifier
|
302
|
-
from keras_hub.src.models.vgg.
|
298
|
+
from keras_hub.src.models.vgg.vgg_image_classifier_preprocessor import (
|
303
299
|
VGGImageClassifierPreprocessor,
|
304
300
|
)
|
305
301
|
from keras_hub.src.models.vit_det.vit_det_backbone import ViTDetBackbone
|
@@ -1,7 +1,7 @@
|
|
1
1
|
"""DeepLabV3 preset configurations."""
|
2
2
|
|
3
3
|
backbone_presets = {
|
4
|
-
"
|
4
|
+
"deeplab_v3_plus_resnet50_pascalvoc": {
|
5
5
|
"metadata": {
|
6
6
|
"description": (
|
7
7
|
"DeepLabV3+ model with ResNet50 as image encoder and trained on "
|
@@ -10,9 +10,9 @@ backbone_presets = {
|
|
10
10
|
),
|
11
11
|
"params": 39190656,
|
12
12
|
"official_name": "DeepLabV3",
|
13
|
-
"path": "
|
13
|
+
"path": "deeplab_v3",
|
14
14
|
"model_card": "https://arxiv.org/abs/1802.02611",
|
15
15
|
},
|
16
|
-
"kaggle_handle": "kaggle://keras/
|
16
|
+
"kaggle_handle": "kaggle://keras/deeplabv3plus/keras/deeplab_v3_plus_resnet50_pascalvoc/3",
|
17
17
|
},
|
18
18
|
}
|
@@ -12,7 +12,7 @@ backbone_presets = {
|
|
12
12
|
"path": "densenet",
|
13
13
|
"model_card": "https://arxiv.org/abs/1608.06993",
|
14
14
|
},
|
15
|
-
"kaggle_handle": "kaggle://keras/densenet/keras/densenet_121_imagenet/
|
15
|
+
"kaggle_handle": "kaggle://keras/densenet/keras/densenet_121_imagenet/2",
|
16
16
|
},
|
17
17
|
"densenet_169_imagenet": {
|
18
18
|
"metadata": {
|
@@ -25,7 +25,7 @@ backbone_presets = {
|
|
25
25
|
"path": "densenet",
|
26
26
|
"model_card": "https://arxiv.org/abs/1608.06993",
|
27
27
|
},
|
28
|
-
"kaggle_handle": "kaggle://keras/densenet/keras/densenet_169_imagenet/
|
28
|
+
"kaggle_handle": "kaggle://keras/densenet/keras/densenet_169_imagenet/2",
|
29
29
|
},
|
30
30
|
"densenet_201_imagenet": {
|
31
31
|
"metadata": {
|
@@ -38,6 +38,6 @@ backbone_presets = {
|
|
38
38
|
"path": "densenet",
|
39
39
|
"model_card": "https://arxiv.org/abs/1608.06993",
|
40
40
|
},
|
41
|
-
"kaggle_handle": "kaggle://keras/densenet/keras/densenet_201_imagenet/
|
41
|
+
"kaggle_handle": "kaggle://keras/densenet/keras/densenet_201_imagenet/2",
|
42
42
|
},
|
43
43
|
}
|
@@ -0,0 +1,6 @@
|
|
1
|
+
from keras_hub.src.models.mit.mit_backbone import MiTBackbone
|
2
|
+
from keras_hub.src.models.mit.mit_image_classifier import MiTImageClassifier
|
3
|
+
from keras_hub.src.models.mit.mit_presets import backbone_presets
|
4
|
+
from keras_hub.src.utils.preset_utils import register_presets
|
5
|
+
|
6
|
+
register_presets(backbone_presets, MiTBackbone)
|
@@ -4,12 +4,8 @@ from keras import ops
|
|
4
4
|
|
5
5
|
from keras_hub.src.api_export import keras_hub_export
|
6
6
|
from keras_hub.src.models.feature_pyramid_backbone import FeaturePyramidBackbone
|
7
|
-
from keras_hub.src.models.
|
8
|
-
|
9
|
-
)
|
10
|
-
from keras_hub.src.models.mix_transformer.mix_transformer_layers import (
|
11
|
-
OverlappingPatchingAndEmbedding,
|
12
|
-
)
|
7
|
+
from keras_hub.src.models.mit.mit_layers import HierarchicalTransformerEncoder
|
8
|
+
from keras_hub.src.models.mit.mit_layers import OverlappingPatchingAndEmbedding
|
13
9
|
|
14
10
|
|
15
11
|
@keras_hub_export("keras_hub.models.MiTBackbone")
|
@@ -61,7 +57,7 @@ class MiTBackbone(FeaturePyramidBackbone):
|
|
61
57
|
```python
|
62
58
|
images = np.ones(shape=(1, 96, 96, 3))
|
63
59
|
labels = np.zeros(shape=(1, 96, 96, 1))
|
64
|
-
backbone = keras_hub.models.MiTBackbone.from_preset("
|
60
|
+
backbone = keras_hub.models.MiTBackbone.from_preset("mit_b0_ade20k_512")
|
65
61
|
|
66
62
|
# Evaluate model
|
67
63
|
model(images)
|
keras_hub/src/models/{mix_transformer/mix_transformer_classifier.py → mit/mit_image_classifier.py}
RENAMED
@@ -1,9 +1,7 @@
|
|
1
1
|
from keras_hub.src.api_export import keras_hub_export
|
2
2
|
from keras_hub.src.models.image_classifier import ImageClassifier
|
3
|
-
from keras_hub.src.models.
|
4
|
-
|
5
|
-
)
|
6
|
-
from keras_hub.src.models.mix_transformer.mix_transformer_classifier_preprocessor import (
|
3
|
+
from keras_hub.src.models.mit.mit_backbone import MiTBackbone
|
4
|
+
from keras_hub.src.models.mit.mit_image_classifier_preprocessor import (
|
7
5
|
MiTImageClassifierPreprocessor,
|
8
6
|
)
|
9
7
|
|
@@ -2,12 +2,8 @@ from keras_hub.src.api_export import keras_hub_export
|
|
2
2
|
from keras_hub.src.models.image_classifier_preprocessor import (
|
3
3
|
ImageClassifierPreprocessor,
|
4
4
|
)
|
5
|
-
from keras_hub.src.models.
|
6
|
-
|
7
|
-
)
|
8
|
-
from keras_hub.src.models.mix_transformer.mix_transformer_image_converter import (
|
9
|
-
MiTImageConverter,
|
10
|
-
)
|
5
|
+
from keras_hub.src.models.mit.mit_backbone import MiTBackbone
|
6
|
+
from keras_hub.src.models.mit.mit_image_converter import MiTImageConverter
|
11
7
|
|
12
8
|
|
13
9
|
@keras_hub_export("keras_hub.models.MiTImageClassifierPreprocessor")
|
@@ -1,6 +1,6 @@
|
|
1
1
|
from keras_hub.src.api_export import keras_hub_export
|
2
2
|
from keras_hub.src.layers.preprocessing.image_converter import ImageConverter
|
3
|
-
from keras_hub.src.models.
|
3
|
+
from keras_hub.src.models.mit import MiTBackbone
|
4
4
|
|
5
5
|
|
6
6
|
@keras_hub_export("keras_hub.layers.MiTImageConverter")
|
@@ -96,7 +96,7 @@ class MobileNetBackbone(Backbone):
|
|
96
96
|
stackwise_activation,
|
97
97
|
output_num_filters,
|
98
98
|
inverted_res_block,
|
99
|
-
image_shape=(
|
99
|
+
image_shape=(None, None, 3),
|
100
100
|
input_activation="hard_swish",
|
101
101
|
output_activation="hard_swish",
|
102
102
|
depth_multiplier=1.0,
|
@@ -61,8 +61,6 @@ class PaliGemmaBackbone(Backbone):
|
|
61
61
|
vit_classifier_activation: activation function. The activation that
|
62
62
|
is used for final output classification in the vision transformer.
|
63
63
|
vit_name: string. The name used for vision transformer layers.
|
64
|
-
include_rescaling: bool. If true, the image input will be rescaled from
|
65
|
-
the range `[0, 255]`, to the range `[0, 1]`.
|
66
64
|
layer_norm_epsilon: float. The epsilon value user for every layer norm
|
67
65
|
in all transformer blocks.
|
68
66
|
dropout: float. Dropout probability for the Transformer decoder blocks.
|
@@ -121,7 +119,6 @@ class PaliGemmaBackbone(Backbone):
|
|
121
119
|
vit_pooling=None,
|
122
120
|
vit_classifier_activation=None,
|
123
121
|
vit_name=None,
|
124
|
-
include_rescaling=True,
|
125
122
|
layer_norm_epsilon=1e-6,
|
126
123
|
dropout=0,
|
127
124
|
dtype=None,
|
@@ -145,7 +142,6 @@ class PaliGemmaBackbone(Backbone):
|
|
145
142
|
vit_intermediate_dim = vit_intermediate_dim or 4304
|
146
143
|
self.vit_encoder = PaliGemmaVit(
|
147
144
|
image_size=image_size,
|
148
|
-
include_rescaling=include_rescaling,
|
149
145
|
patch_size=vit_patch_size,
|
150
146
|
num_heads=vit_num_heads,
|
151
147
|
hidden_dim=vit_hidden_dim,
|
@@ -215,7 +211,6 @@ class PaliGemmaBackbone(Backbone):
|
|
215
211
|
# === Config ===
|
216
212
|
self.vocabulary_size = vocabulary_size
|
217
213
|
self.image_size = image_size
|
218
|
-
self.include_rescaling = include_rescaling
|
219
214
|
self.num_layers = num_layers
|
220
215
|
self.num_query_heads = num_query_heads
|
221
216
|
self.num_key_value_heads = num_key_value_heads
|
@@ -242,7 +237,6 @@ class PaliGemmaBackbone(Backbone):
|
|
242
237
|
{
|
243
238
|
"vocabulary_size": self.vocabulary_size,
|
244
239
|
"image_size": self.image_size,
|
245
|
-
"include_rescaling": self.include_rescaling,
|
246
240
|
"num_layers": self.num_layers,
|
247
241
|
"num_query_heads": self.num_query_heads,
|
248
242
|
"num_key_value_heads": self.num_key_value_heads,
|
@@ -12,7 +12,7 @@ backbone_presets = {
|
|
12
12
|
"path": "pali_gemma",
|
13
13
|
"model_card": "https://www.kaggle.com/models/google/paligemma",
|
14
14
|
},
|
15
|
-
"kaggle_handle": "kaggle://keras/paligemma/keras/pali_gemma_3b_mix_224/
|
15
|
+
"kaggle_handle": "kaggle://keras/paligemma/keras/pali_gemma_3b_mix_224/3",
|
16
16
|
},
|
17
17
|
"pali_gemma_3b_mix_448": {
|
18
18
|
"metadata": {
|
@@ -24,7 +24,7 @@ backbone_presets = {
|
|
24
24
|
"path": "pali_gemma",
|
25
25
|
"model_card": "https://www.kaggle.com/models/google/paligemma",
|
26
26
|
},
|
27
|
-
"kaggle_handle": "kaggle://keras/paligemma/keras/pali_gemma_3b_mix_448/
|
27
|
+
"kaggle_handle": "kaggle://keras/paligemma/keras/pali_gemma_3b_mix_448/3",
|
28
28
|
},
|
29
29
|
"pali_gemma_3b_224": {
|
30
30
|
"metadata": {
|
@@ -36,7 +36,7 @@ backbone_presets = {
|
|
36
36
|
"path": "pali_gemma",
|
37
37
|
"model_card": "https://www.kaggle.com/models/google/paligemma",
|
38
38
|
},
|
39
|
-
"kaggle_handle": "kaggle://keras/paligemma/keras/pali_gemma_3b_224/
|
39
|
+
"kaggle_handle": "kaggle://keras/paligemma/keras/pali_gemma_3b_224/3",
|
40
40
|
},
|
41
41
|
"pali_gemma_3b_448": {
|
42
42
|
"metadata": {
|
@@ -48,7 +48,7 @@ backbone_presets = {
|
|
48
48
|
"path": "pali_gemma",
|
49
49
|
"model_card": "https://www.kaggle.com/models/google/paligemma",
|
50
50
|
},
|
51
|
-
"kaggle_handle": "kaggle://keras/paligemma/keras/pali_gemma_3b_448/
|
51
|
+
"kaggle_handle": "kaggle://keras/paligemma/keras/pali_gemma_3b_448/3",
|
52
52
|
},
|
53
53
|
"pali_gemma_3b_896": {
|
54
54
|
"metadata": {
|
@@ -60,6 +60,6 @@ backbone_presets = {
|
|
60
60
|
"path": "pali_gemma",
|
61
61
|
"model_card": "https://www.kaggle.com/models/google/paligemma",
|
62
62
|
},
|
63
|
-
"kaggle_handle": "kaggle://keras/paligemma/keras/pali_gemma_3b_896/
|
63
|
+
"kaggle_handle": "kaggle://keras/paligemma/keras/pali_gemma_3b_896/3",
|
64
64
|
},
|
65
65
|
}
|
@@ -410,8 +410,6 @@ class PaliGemmaVit(keras.Model):
|
|
410
410
|
Args:
|
411
411
|
image_size: int. The height/width of the image. Both height and width is
|
412
412
|
expected to be the same.
|
413
|
-
include_rescaling: bool. If true, the image input will be rescaled from
|
414
|
-
the range `[0, 255]`, to the range `[0, 1]`.
|
415
413
|
patch_size: int. The size of each square patch in the input image.
|
416
414
|
num_heads: int. The number of attention heads for the vision(image)
|
417
415
|
transformer encoder.
|
@@ -452,7 +450,6 @@ class PaliGemmaVit(keras.Model):
|
|
452
450
|
num_layers,
|
453
451
|
intermediate_dim,
|
454
452
|
num_classes,
|
455
|
-
include_rescaling=True,
|
456
453
|
pooling=None,
|
457
454
|
classifier_activation=None,
|
458
455
|
dtype=None,
|
@@ -463,14 +460,6 @@ class PaliGemmaVit(keras.Model):
|
|
463
460
|
shape=(image_size, image_size, 3), name="images"
|
464
461
|
)
|
465
462
|
x = image_input # Intermediate result.
|
466
|
-
# TODO we have moved this rescaling to preprocessing layers for most
|
467
|
-
# models. We should consider removing it here, though it would break
|
468
|
-
# compatibility.
|
469
|
-
if include_rescaling:
|
470
|
-
rescaling = keras.layers.Rescaling(
|
471
|
-
scale=1.0 / 127.5, offset=-1.0, name="rescaling"
|
472
|
-
)
|
473
|
-
x = rescaling(image_input)
|
474
463
|
x = PaliGemmaVitEncoder(
|
475
464
|
hidden_dim=hidden_dim,
|
476
465
|
num_layers=num_layers,
|
@@ -520,7 +509,6 @@ class PaliGemmaVit(keras.Model):
|
|
520
509
|
self.pooling = pooling
|
521
510
|
self.num_classes = num_classes
|
522
511
|
self.image_size = image_size
|
523
|
-
self.include_rescaling = include_rescaling
|
524
512
|
self.patch_size = patch_size
|
525
513
|
self.classifier_activation = keras.activations.get(
|
526
514
|
classifier_activation
|
@@ -549,7 +537,6 @@ class PaliGemmaVit(keras.Model):
|
|
549
537
|
self.classifier_activation
|
550
538
|
),
|
551
539
|
"image_size": self.image_size,
|
552
|
-
"include_rescaling": self.include_rescaling,
|
553
540
|
"patch_size": self.patch_size,
|
554
541
|
}
|
555
542
|
)
|
@@ -71,6 +71,22 @@ class Preprocessor(PreprocessingLayer):
|
|
71
71
|
def image_converter(self, value):
|
72
72
|
self._image_converter = value
|
73
73
|
|
74
|
+
@property
|
75
|
+
def image_size(self):
|
76
|
+
"""Shortcut to get/set the image size of the image converter."""
|
77
|
+
if self.image_converter is None:
|
78
|
+
return None
|
79
|
+
return self.image_converter.image_size
|
80
|
+
|
81
|
+
@image_size.setter
|
82
|
+
def image_size(self, value):
|
83
|
+
if self.image_converter is None:
|
84
|
+
raise ValueError(
|
85
|
+
"Cannot set `image_size` on preprocessor if `image_converter` "
|
86
|
+
" is `None`."
|
87
|
+
)
|
88
|
+
self.image_converter.image_size = value
|
89
|
+
|
74
90
|
def get_config(self):
|
75
91
|
config = super().get_config()
|
76
92
|
if self.tokenizer:
|
@@ -68,7 +68,7 @@ class ResNetBackbone(FeaturePyramidBackbone):
|
|
68
68
|
input_data = np.random.uniform(0, 1, size=(2, 224, 224, 3))
|
69
69
|
|
70
70
|
# Pretrained ResNet backbone.
|
71
|
-
model = keras_hub.models.ResNetBackbone.from_preset("
|
71
|
+
model = keras_hub.models.ResNetBackbone.from_preset("resnet_50_imagenet")
|
72
72
|
model(input_data)
|
73
73
|
|
74
74
|
# Randomly initialized ResNetV2 backbone with a custom config.
|
@@ -12,7 +12,7 @@ backbone_presets = {
|
|
12
12
|
"path": "resnet",
|
13
13
|
"model_card": "https://arxiv.org/abs/2110.00476",
|
14
14
|
},
|
15
|
-
"kaggle_handle": "kaggle://keras/resnetv1/keras/resnet_18_imagenet/
|
15
|
+
"kaggle_handle": "kaggle://keras/resnetv1/keras/resnet_18_imagenet/2",
|
16
16
|
},
|
17
17
|
"resnet_50_imagenet": {
|
18
18
|
"metadata": {
|
@@ -25,7 +25,7 @@ backbone_presets = {
|
|
25
25
|
"path": "resnet",
|
26
26
|
"model_card": "https://arxiv.org/abs/2110.00476",
|
27
27
|
},
|
28
|
-
"kaggle_handle": "kaggle://keras/resnetv1/keras/resnet_50_imagenet/
|
28
|
+
"kaggle_handle": "kaggle://keras/resnetv1/keras/resnet_50_imagenet/2",
|
29
29
|
},
|
30
30
|
"resnet_101_imagenet": {
|
31
31
|
"metadata": {
|
@@ -38,7 +38,7 @@ backbone_presets = {
|
|
38
38
|
"path": "resnet",
|
39
39
|
"model_card": "https://arxiv.org/abs/2110.00476",
|
40
40
|
},
|
41
|
-
"kaggle_handle": "kaggle://keras/resnetv1/keras/resnet_101_imagenet/
|
41
|
+
"kaggle_handle": "kaggle://keras/resnetv1/keras/resnet_101_imagenet/2",
|
42
42
|
},
|
43
43
|
"resnet_152_imagenet": {
|
44
44
|
"metadata": {
|
@@ -51,7 +51,7 @@ backbone_presets = {
|
|
51
51
|
"path": "resnet",
|
52
52
|
"model_card": "https://arxiv.org/abs/2110.00476",
|
53
53
|
},
|
54
|
-
"kaggle_handle": "kaggle://keras/resnetv1/keras/resnet_152_imagenet/
|
54
|
+
"kaggle_handle": "kaggle://keras/resnetv1/keras/resnet_152_imagenet/2",
|
55
55
|
},
|
56
56
|
"resnet_v2_50_imagenet": {
|
57
57
|
"metadata": {
|
@@ -64,7 +64,7 @@ backbone_presets = {
|
|
64
64
|
"path": "resnet",
|
65
65
|
"model_card": "https://arxiv.org/abs/2110.00476",
|
66
66
|
},
|
67
|
-
"kaggle_handle": "kaggle://keras/resnetv2/keras/resnet_v2_50_imagenet/
|
67
|
+
"kaggle_handle": "kaggle://keras/resnetv2/keras/resnet_v2_50_imagenet/2",
|
68
68
|
},
|
69
69
|
"resnet_v2_101_imagenet": {
|
70
70
|
"metadata": {
|
@@ -77,6 +77,147 @@ backbone_presets = {
|
|
77
77
|
"path": "resnet",
|
78
78
|
"model_card": "https://arxiv.org/abs/2110.00476",
|
79
79
|
},
|
80
|
-
"kaggle_handle": "kaggle://keras/resnetv2/keras/resnet_v2_101_imagenet/
|
80
|
+
"kaggle_handle": "kaggle://keras/resnetv2/keras/resnet_v2_101_imagenet/2",
|
81
|
+
},
|
82
|
+
"resnet_vd_18_imagenet": {
|
83
|
+
"metadata": {
|
84
|
+
"description": (
|
85
|
+
"18-layer ResNetVD (ResNet with bag of tricks) model "
|
86
|
+
"pre-trained on the ImageNet 1k dataset at a 224x224 "
|
87
|
+
"resolution."
|
88
|
+
),
|
89
|
+
"params": 11722824,
|
90
|
+
"official_name": "ResNet",
|
91
|
+
"path": "resnet",
|
92
|
+
"model_card": "https://arxiv.org/abs/1812.01187",
|
93
|
+
},
|
94
|
+
"kaggle_handle": "kaggle://kerashub/resnetvd/keras/resnet_vd_18_imagenet",
|
95
|
+
},
|
96
|
+
"resnet_vd_34_imagenet": {
|
97
|
+
"metadata": {
|
98
|
+
"description": (
|
99
|
+
"34-layer ResNetVD (ResNet with bag of tricks) model "
|
100
|
+
"pre-trained on the ImageNet 1k dataset at a 224x224 "
|
101
|
+
"resolution."
|
102
|
+
),
|
103
|
+
"params": 21838408,
|
104
|
+
"official_name": "ResNet",
|
105
|
+
"path": "resnet",
|
106
|
+
"model_card": "https://arxiv.org/abs/1812.01187",
|
107
|
+
},
|
108
|
+
"kaggle_handle": "kaggle://kerashub/resnetvd/keras/resnet_vd_34_imagenet",
|
109
|
+
},
|
110
|
+
"resnet_vd_50_imagenet": {
|
111
|
+
"metadata": {
|
112
|
+
"description": (
|
113
|
+
"50-layer ResNetVD (ResNet with bag of tricks) model "
|
114
|
+
"pre-trained on the ImageNet 1k dataset at a 224x224 "
|
115
|
+
"resolution."
|
116
|
+
),
|
117
|
+
"params": 25629512,
|
118
|
+
"official_name": "ResNet",
|
119
|
+
"path": "resnet",
|
120
|
+
"model_card": "https://arxiv.org/abs/1812.01187",
|
121
|
+
},
|
122
|
+
"kaggle_handle": "kaggle://kerashub/resnetvd/keras/resnet_vd_50_imagenet",
|
123
|
+
},
|
124
|
+
"resnet_vd_50_ssld_imagenet": {
|
125
|
+
"metadata": {
|
126
|
+
"description": (
|
127
|
+
"50-layer ResNetVD (ResNet with bag of tricks) model "
|
128
|
+
"pre-trained on the ImageNet 1k dataset at a 224x224 "
|
129
|
+
"resolution with knowledge distillation."
|
130
|
+
),
|
131
|
+
"params": 25629512,
|
132
|
+
"official_name": "ResNet",
|
133
|
+
"path": "resnet",
|
134
|
+
"model_card": "https://arxiv.org/abs/1812.01187",
|
135
|
+
},
|
136
|
+
"kaggle_handle": "kaggle://kerashub/resnetvd/keras/resnet_vd_50_ssld_imagenet",
|
137
|
+
},
|
138
|
+
"resnet_vd_50_ssld_v2_imagenet": {
|
139
|
+
"metadata": {
|
140
|
+
"description": (
|
141
|
+
"50-layer ResNetVD (ResNet with bag of tricks) model "
|
142
|
+
"pre-trained on the ImageNet 1k dataset at a 224x224 "
|
143
|
+
"resolution with knowledge distillation and AutoAugment."
|
144
|
+
),
|
145
|
+
"params": 25629512,
|
146
|
+
"official_name": "ResNet",
|
147
|
+
"path": "resnet",
|
148
|
+
"model_card": "https://arxiv.org/abs/1812.01187",
|
149
|
+
},
|
150
|
+
"kaggle_handle": "kaggle://kerashub/resnetvd/keras/resnet_vd_50_ssld_v2_imagenet",
|
151
|
+
},
|
152
|
+
"resnet_vd_50_ssld_v2_fix_imagenet": {
|
153
|
+
"metadata": {
|
154
|
+
"description": (
|
155
|
+
"50-layer ResNetVD (ResNet with bag of tricks) model "
|
156
|
+
"pre-trained on the ImageNet 1k dataset at a 224x224 "
|
157
|
+
"resolution with knowledge distillation, AutoAugment and "
|
158
|
+
"additional fine-tuning of the classification head."
|
159
|
+
),
|
160
|
+
"params": 25629512,
|
161
|
+
"official_name": "ResNet",
|
162
|
+
"path": "resnet",
|
163
|
+
"model_card": "https://arxiv.org/abs/1812.01187",
|
164
|
+
},
|
165
|
+
"kaggle_handle": "kaggle://kerashub/resnetvd/keras/resnet_vd_50_ssld_v2_fix_imagenet",
|
166
|
+
},
|
167
|
+
"resnet_vd_101_imagenet": {
|
168
|
+
"metadata": {
|
169
|
+
"description": (
|
170
|
+
"101-layer ResNetVD (ResNet with bag of tricks) model "
|
171
|
+
"pre-trained on the ImageNet 1k dataset at a 224x224 "
|
172
|
+
"resolution."
|
173
|
+
),
|
174
|
+
"params": 44673864,
|
175
|
+
"official_name": "ResNet",
|
176
|
+
"path": "resnet",
|
177
|
+
"model_card": "https://arxiv.org/abs/1812.01187",
|
178
|
+
},
|
179
|
+
"kaggle_handle": "kaggle://kerashub/resnetvd/keras/resnet_vd_101_imagenet",
|
180
|
+
},
|
181
|
+
"resnet_vd_101_ssld_imagenet": {
|
182
|
+
"metadata": {
|
183
|
+
"description": (
|
184
|
+
"101-layer ResNetVD (ResNet with bag of tricks) model "
|
185
|
+
"pre-trained on the ImageNet 1k dataset at a 224x224 "
|
186
|
+
"resolution with knowledge distillation."
|
187
|
+
),
|
188
|
+
"params": 44673864,
|
189
|
+
"official_name": "ResNet",
|
190
|
+
"path": "resnet",
|
191
|
+
"model_card": "https://arxiv.org/abs/1812.01187",
|
192
|
+
},
|
193
|
+
"kaggle_handle": "kaggle://kerashub/resnetvd/keras/resnet_vd_101_ssld_imagenet",
|
194
|
+
},
|
195
|
+
"resnet_vd_152_imagenet": {
|
196
|
+
"metadata": {
|
197
|
+
"description": (
|
198
|
+
"152-layer ResNetVD (ResNet with bag of tricks) model "
|
199
|
+
"pre-trained on the ImageNet 1k dataset at a 224x224 "
|
200
|
+
"resolution."
|
201
|
+
),
|
202
|
+
"params": 60363592,
|
203
|
+
"official_name": "ResNet",
|
204
|
+
"path": "resnet",
|
205
|
+
"model_card": "https://arxiv.org/abs/1812.01187",
|
206
|
+
},
|
207
|
+
"kaggle_handle": "kaggle://kerashub/resnetvd/keras/resnet_vd_152_imagenet",
|
208
|
+
},
|
209
|
+
"resnet_vd_200_imagenet": {
|
210
|
+
"metadata": {
|
211
|
+
"description": (
|
212
|
+
"200-layer ResNetVD (ResNet with bag of tricks) model "
|
213
|
+
"pre-trained on the ImageNet 1k dataset at a 224x224 "
|
214
|
+
"resolution."
|
215
|
+
),
|
216
|
+
"params": 74933064,
|
217
|
+
"official_name": "ResNet",
|
218
|
+
"path": "resnet",
|
219
|
+
"model_card": "https://arxiv.org/abs/1812.01187",
|
220
|
+
},
|
221
|
+
"kaggle_handle": "kaggle://kerashub/resnetvd/keras/resnet_vd_200_imagenet",
|
81
222
|
},
|
82
223
|
}
|
@@ -9,7 +9,7 @@ backbone_presets = {
|
|
9
9
|
"path": "sam",
|
10
10
|
"model_card": "https://arxiv.org/abs/2304.02643",
|
11
11
|
},
|
12
|
-
"kaggle_handle": "kaggle://keras/sam/keras/sam_base_sa1b/
|
12
|
+
"kaggle_handle": "kaggle://keras/sam/keras/sam_base_sa1b/4",
|
13
13
|
},
|
14
14
|
"sam_large_sa1b": {
|
15
15
|
"metadata": {
|
@@ -19,7 +19,7 @@ backbone_presets = {
|
|
19
19
|
"path": "sam",
|
20
20
|
"model_card": "https://arxiv.org/abs/2304.02643",
|
21
21
|
},
|
22
|
-
"kaggle_handle": "kaggle://keras/sam/keras/sam_large_sa1b/
|
22
|
+
"kaggle_handle": "kaggle://keras/sam/keras/sam_large_sa1b/4",
|
23
23
|
},
|
24
24
|
"sam_huge_sa1b": {
|
25
25
|
"metadata": {
|
@@ -29,6 +29,6 @@ backbone_presets = {
|
|
29
29
|
"path": "sam",
|
30
30
|
"model_card": "https://arxiv.org/abs/2304.02643",
|
31
31
|
},
|
32
|
-
"kaggle_handle": "kaggle://keras/sam/keras/sam_huge_sa1b/
|
32
|
+
"kaggle_handle": "kaggle://keras/sam/keras/sam_huge_sa1b/4",
|
33
33
|
},
|
34
34
|
}
|
@@ -10,7 +10,7 @@ backbone_presets = {
|
|
10
10
|
),
|
11
11
|
"params": 2987080931,
|
12
12
|
"official_name": "StableDiffusion3",
|
13
|
-
"path": "
|
13
|
+
"path": "stable_diffusion_3",
|
14
14
|
"model_card": "https://arxiv.org/abs/2110.00476",
|
15
15
|
},
|
16
16
|
"kaggle_handle": "kaggle://keras/stablediffusion3/keras/stable_diffusion_3_medium/1",
|
keras_hub/src/models/task.py
CHANGED
@@ -280,7 +280,7 @@ class Task(PipelineModel):
|
|
280
280
|
|
281
281
|
def highlight_number(x):
|
282
282
|
if x is None:
|
283
|
-
f"[color(45)]{x}[/]"
|
283
|
+
return f"[color(45)]{x}[/]"
|
284
284
|
return f"[color(34)]{x:,}[/]" # Format number with commas.
|
285
285
|
|
286
286
|
def highlight_symbol(x):
|
@@ -339,7 +339,10 @@ class Task(PipelineModel):
|
|
339
339
|
add_layer(layer, info)
|
340
340
|
elif isinstance(layer, ImageConverter):
|
341
341
|
info = "Image size: "
|
342
|
-
|
342
|
+
image_size = layer.image_size
|
343
|
+
if image_size is None:
|
344
|
+
image_size = (None, None)
|
345
|
+
info += highlight_shape(image_size)
|
343
346
|
add_layer(layer, info)
|
344
347
|
elif isinstance(layer, AudioConverter):
|
345
348
|
info = "Audio shape: "
|
@@ -10,7 +10,7 @@ from keras_hub.src.utils.keras_utils import standardize_data_format
|
|
10
10
|
|
11
11
|
|
12
12
|
class VAEBackbone(Backbone):
|
13
|
-
"""VAE backbone used in latent diffusion models.
|
13
|
+
"""Variational Autoencoder(VAE) backbone used in latent diffusion models.
|
14
14
|
|
15
15
|
When encoding, this model generates mean and log variance of the input
|
16
16
|
images. When decoding, it reconstructs images from the latent space.
|
@@ -51,6 +51,18 @@ class VAEBackbone(Backbone):
|
|
51
51
|
`"channels_last"`.
|
52
52
|
dtype: `None` or str or `keras.mixed_precision.DTypePolicy`. The dtype
|
53
53
|
to use for the model's computations and weights.
|
54
|
+
|
55
|
+
Example:
|
56
|
+
```Python
|
57
|
+
backbone = VAEBackbone(
|
58
|
+
encoder_num_filters=[32, 32, 32, 32],
|
59
|
+
encoder_num_blocks=[1, 1, 1, 1],
|
60
|
+
decoder_num_filters=[32, 32, 32, 32],
|
61
|
+
decoder_num_blocks=[1, 1, 1, 1],
|
62
|
+
)
|
63
|
+
input_data = ops.ones((2, self.height, self.width, 3))
|
64
|
+
output = backbone(input_data)
|
65
|
+
```
|
54
66
|
"""
|
55
67
|
|
56
68
|
def __init__(
|
@@ -20,7 +20,7 @@ class VGGBackbone(Backbone):
|
|
20
20
|
stackwise_num_filters: list of ints, filter size for convolutional
|
21
21
|
blocks per VGG block. For both VGG16 and VGG19 this is [
|
22
22
|
64, 128, 256, 512, 512].
|
23
|
-
image_shape: tuple, optional shape tuple, defaults to (
|
23
|
+
image_shape: tuple, optional shape tuple, defaults to (None, None, 3).
|
24
24
|
|
25
25
|
Examples:
|
26
26
|
```python
|
@@ -1,24 +1,12 @@
|
|
1
1
|
import keras
|
2
2
|
|
3
3
|
from keras_hub.src.api_export import keras_hub_export
|
4
|
-
from keras_hub.src.layers.preprocessing.image_converter import ImageConverter
|
5
4
|
from keras_hub.src.models.image_classifier import ImageClassifier
|
6
|
-
from keras_hub.src.models.image_classifier_preprocessor import (
|
7
|
-
ImageClassifierPreprocessor,
|
8
|
-
)
|
9
5
|
from keras_hub.src.models.task import Task
|
10
6
|
from keras_hub.src.models.vgg.vgg_backbone import VGGBackbone
|
11
|
-
|
12
|
-
|
13
|
-
|
14
|
-
class VGGImageConverter(ImageConverter):
|
15
|
-
backbone_cls = VGGBackbone
|
16
|
-
|
17
|
-
|
18
|
-
@keras_hub_export("keras_hub.models.VGGImageClassifierPreprocessor")
|
19
|
-
class VGGImageClassifierPreprocessor(ImageClassifierPreprocessor):
|
20
|
-
backbone_cls = VGGBackbone
|
21
|
-
image_converter_cls = VGGImageConverter
|
7
|
+
from keras_hub.src.models.vgg.vgg_image_classifier_preprocessor import (
|
8
|
+
VGGImageClassifierPreprocessor,
|
9
|
+
)
|
22
10
|
|
23
11
|
|
24
12
|
@keras_hub_export("keras_hub.models.VGGImageClassifier")
|
@@ -211,6 +199,7 @@ class VGGImageClassifier(ImageClassifier):
|
|
211
199
|
self.pooling = pooling
|
212
200
|
self.pooling_hidden_dim = pooling_hidden_dim
|
213
201
|
self.dropout = dropout
|
202
|
+
self.preprocessor = preprocessor
|
214
203
|
|
215
204
|
def get_config(self):
|
216
205
|
# Backbone serialized in `super`
|
@@ -0,0 +1,12 @@
|
|
1
|
+
from keras_hub.src.api_export import keras_hub_export
|
2
|
+
from keras_hub.src.models.image_classifier_preprocessor import (
|
3
|
+
ImageClassifierPreprocessor,
|
4
|
+
)
|
5
|
+
from keras_hub.src.models.vgg.vgg_backbone import VGGBackbone
|
6
|
+
from keras_hub.src.models.vgg.vgg_image_converter import VGGImageConverter
|
7
|
+
|
8
|
+
|
9
|
+
@keras_hub_export("keras_hub.models.VGGImageClassifierPreprocessor")
|
10
|
+
class VGGImageClassifierPreprocessor(ImageClassifierPreprocessor):
|
11
|
+
backbone_cls = VGGBackbone
|
12
|
+
image_converter_cls = VGGImageConverter
|
@@ -0,0 +1,8 @@
|
|
1
|
+
from keras_hub.src.api_export import keras_hub_export
|
2
|
+
from keras_hub.src.layers.preprocessing.image_converter import ImageConverter
|
3
|
+
from keras_hub.src.models.vgg.vgg_backbone import VGGBackbone
|
4
|
+
|
5
|
+
|
6
|
+
@keras_hub_export("keras_hub.layers.VGGImageConverter")
|
7
|
+
class VGGImageConverter(ImageConverter):
|
8
|
+
backbone_cls = VGGBackbone
|
@@ -31,7 +31,7 @@ class ViTDetBackbone(Backbone):
|
|
31
31
|
global_attention_layer_indices (list): Indexes for blocks using
|
32
32
|
global attention.
|
33
33
|
image_shape (tuple[int], optional): The size of the input image in
|
34
|
-
`(H, W, C)` format. Defaults to `(
|
34
|
+
`(H, W, C)` format. Defaults to `(None, None, 3)`.
|
35
35
|
patch_size (int, optional): the patch size to be supplied to the
|
36
36
|
Patching layer to turn input images into a flattened sequence of
|
37
37
|
patches. Defaults to `16`.
|
@@ -79,7 +79,7 @@ class ViTDetBackbone(Backbone):
|
|
79
79
|
intermediate_dim,
|
80
80
|
num_heads,
|
81
81
|
global_attention_layer_indices,
|
82
|
-
image_shape=(
|
82
|
+
image_shape=(None, None, 3),
|
83
83
|
patch_size=16,
|
84
84
|
num_output_channels=256,
|
85
85
|
use_bias=True,
|
@@ -232,7 +232,7 @@ class PipelineModel(keras.Model):
|
|
232
232
|
):
|
233
233
|
data = self.preprocess_samples(x, y, sample_weight)
|
234
234
|
x, y, sample_weight = keras.utils.unpack_x_y_sample_weight(data)
|
235
|
-
x = ops.convert_to_tensor
|
235
|
+
x = tree.map_structure(ops.convert_to_tensor, x)
|
236
236
|
if y is not None:
|
237
237
|
y = ops.convert_to_tensor(y)
|
238
238
|
if sample_weight is not None:
|
@@ -253,7 +253,7 @@ class PipelineModel(keras.Model):
|
|
253
253
|
):
|
254
254
|
data = self.preprocess_samples(x, y, sample_weight)
|
255
255
|
x, y, sample_weight = keras.utils.unpack_x_y_sample_weight(data)
|
256
|
-
x = ops.convert_to_tensor
|
256
|
+
x = tree.map_structure(ops.convert_to_tensor, x)
|
257
257
|
if y is not None:
|
258
258
|
y = ops.convert_to_tensor(y)
|
259
259
|
if sample_weight is not None:
|
@@ -272,7 +272,7 @@ class PipelineModel(keras.Model):
|
|
272
272
|
):
|
273
273
|
data = self.preprocess_samples(x)
|
274
274
|
x, _, _ = keras.utils.unpack_x_y_sample_weight(data)
|
275
|
-
x = ops.convert_to_tensor
|
275
|
+
x = tree.map_structure(ops.convert_to_tensor, x)
|
276
276
|
return super().predict_on_batch(
|
277
277
|
x=x,
|
278
278
|
**kwargs,
|
@@ -53,10 +53,11 @@ class TimmPresetLoader(PresetLoader):
|
|
53
53
|
|
54
54
|
def load_image_converter(self, cls, **kwargs):
|
55
55
|
pretrained_cfg = self.config.get("pretrained_cfg", None)
|
56
|
-
if not pretrained_cfg:
|
56
|
+
if not pretrained_cfg or "input_size" not in pretrained_cfg:
|
57
57
|
return None
|
58
58
|
# This assumes the same basic setup for all timm preprocessing, We may
|
59
59
|
# need to extend this as we cover more model types.
|
60
|
+
input_size = pretrained_cfg["input_size"]
|
60
61
|
mean = pretrained_cfg["mean"]
|
61
62
|
std = pretrained_cfg["std"]
|
62
63
|
scale = [1.0 / 255.0 / s for s in std]
|
@@ -65,6 +66,7 @@ class TimmPresetLoader(PresetLoader):
|
|
65
66
|
if interpolation not in ("bilinear", "nearest", "bicubic"):
|
66
67
|
interpolation = "bilinear" # Unsupported interpolation type.
|
67
68
|
return cls(
|
69
|
+
image_size=input_size[1:],
|
68
70
|
scale=scale,
|
69
71
|
offset=offset,
|
70
72
|
interpolation=interpolation,
|
keras_hub/src/version_utils.py
CHANGED
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: keras-hub-nightly
|
3
|
-
Version: 0.16.1.
|
3
|
+
Version: 0.16.1.dev202410190340
|
4
4
|
Summary: Industry-strength Natural Language Processing extensions for Keras.
|
5
5
|
Home-page: https://github.com/keras-team/keras-hub
|
6
6
|
Author: Keras team
|
@@ -1,15 +1,15 @@
|
|
1
1
|
keras_hub/__init__.py,sha256=QGdXyHgYt6cMUAP1ebxwc6oR86dE0dkMxNy2eOCQtFo,855
|
2
2
|
keras_hub/api/__init__.py,sha256=spMxsgqzjpeuC8rY4WP-2kAZ2qwwKRSbFwddXgUjqQE,524
|
3
3
|
keras_hub/api/bounding_box/__init__.py,sha256=T8R_X7BPm0et1xaZq8565uJmid7dylsSFSj4V-rGuFQ,1097
|
4
|
-
keras_hub/api/layers/__init__.py,sha256=
|
4
|
+
keras_hub/api/layers/__init__.py,sha256=OpXnXktkkpTjlufy1u2hLPqV0cidG2B40x30jQGiy9U,2481
|
5
5
|
keras_hub/api/metrics/__init__.py,sha256=So8Ec-lOcTzn_UUMmAdzDm8RKkPu2dbRUm2px8gpUEI,381
|
6
|
-
keras_hub/api/models/__init__.py,sha256=
|
6
|
+
keras_hub/api/models/__init__.py,sha256=5EfZDUOnHStK8UE6f6ih7cQZo2ZyFeUO15T45TC1uNA,14819
|
7
7
|
keras_hub/api/samplers/__init__.py,sha256=n-_SEXxr2LNUzK2FqVFN7alsrkx1P_HOVTeLZKeGCdE,730
|
8
8
|
keras_hub/api/tokenizers/__init__.py,sha256=_f-r_cyUM2fjBB7iO84ThOdqqsAxHNIewJ2EBDlM0cA,2524
|
9
9
|
keras_hub/api/utils/__init__.py,sha256=Gp1E6gG-RtKQS3PBEQEOz9PQvXkXaJ0ySGMqZ7myN7A,215
|
10
10
|
keras_hub/src/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
11
11
|
keras_hub/src/api_export.py,sha256=9pQZK27JObxWZ96QPLBp1OBsjWigh1iuV6RglPGMRk0,1499
|
12
|
-
keras_hub/src/version_utils.py,sha256=
|
12
|
+
keras_hub/src/version_utils.py,sha256=E7QV4TlZYLCWf3urdy8QAb07u7MCrgMVt4xHY-XOT6k,222
|
13
13
|
keras_hub/src/bounding_box/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
14
14
|
keras_hub/src/bounding_box/converters.py,sha256=a5po8DBm87oz2EXfi-0uEZHCMlCJPIb4-MaZIdYx3Dg,17865
|
15
15
|
keras_hub/src/bounding_box/formats.py,sha256=YmskOz2BOSat7NaE__J9VfpSNGPJJR0znSzA4lp8MMI,3868
|
@@ -61,10 +61,10 @@ keras_hub/src/models/image_to_image.py,sha256=z2TfFh9DiaEj9u6hEY8May3B0etxhptttg
|
|
61
61
|
keras_hub/src/models/inpaint.py,sha256=8TTusRRS7ntPoAd0BsuhEZjedtaoljI4ZbgKQ_bnF34,20411
|
62
62
|
keras_hub/src/models/masked_lm.py,sha256=uXO_dE_hILlOC9jNr6oK6IHi9IGUqLyNGvr6nMt8Rk0,3576
|
63
63
|
keras_hub/src/models/masked_lm_preprocessor.py,sha256=g8vrnyYwqdnSw5xppROM1Gzo_jmMWKYZoQCsKdfrFKk,5656
|
64
|
-
keras_hub/src/models/preprocessor.py,sha256=
|
64
|
+
keras_hub/src/models/preprocessor.py,sha256=KqUJrF24h_6h2CnkuyneqOioCa1Sd3ZA0qzq3BdLqUA,8496
|
65
65
|
keras_hub/src/models/seq_2_seq_lm.py,sha256=w0gX-5YZjatfvAJmFAgSHyqS_BLqc8FF8DPLGK8mrgI,1864
|
66
66
|
keras_hub/src/models/seq_2_seq_lm_preprocessor.py,sha256=HUHRbWRG5SF1pPpotGzBhXlrMh4pLFxgAoFk05FIrB4,9687
|
67
|
-
keras_hub/src/models/task.py,sha256=
|
67
|
+
keras_hub/src/models/task.py,sha256=06ISrWbn7ab-H1uszIPogpt6PuM90xiXKvwrAIEsC-o,14570
|
68
68
|
keras_hub/src/models/text_classifier.py,sha256=VBDvQUHTpJPqKp7A4VAtm35FOmJ3yMo0DW6GdX67xG0,4159
|
69
69
|
keras_hub/src/models/text_classifier_preprocessor.py,sha256=EoWp-GHnaLnAKTdAzDmC-soAV92ATF3QozdubdV2WXI,4722
|
70
70
|
keras_hub/src/models/text_to_image.py,sha256=7s6rB1To46A7l9ItqRw3Pe4DGRm7YnqbHJ-RyNAlLPE,12973
|
@@ -122,14 +122,14 @@ keras_hub/src/models/deeplab_v3/deeplab_v3_backbone.py,sha256=WyFhuLcjFPFVuNL09b
|
|
122
122
|
keras_hub/src/models/deeplab_v3/deeplab_v3_image_converter.py,sha256=mRkH3HdhpV0fCcQcVXEvIX7SNk-bAMb3SAHzgK-FD5c,371
|
123
123
|
keras_hub/src/models/deeplab_v3/deeplab_v3_image_segmeter_preprocessor.py,sha256=hR9S6lNYamY0EBDBo3e1qTCiwtftmLXrN-UYuzfw5Io,581
|
124
124
|
keras_hub/src/models/deeplab_v3/deeplab_v3_layers.py,sha256=qmEiolOOriLAojXB67xXW9IOo717kaCGeDVZJLaGY98,7834
|
125
|
-
keras_hub/src/models/deeplab_v3/deeplab_v3_presets.py,sha256=
|
125
|
+
keras_hub/src/models/deeplab_v3/deeplab_v3_presets.py,sha256=jdSycE_H2Dm1z2WHYu0WtpEJBMiAoioHgJL1gMEGLDI,709
|
126
126
|
keras_hub/src/models/deeplab_v3/deeplab_v3_segmenter.py,sha256=tiMDcCFANHMUx3IVtW3r1P_JTazgPPsbW4IktIytKEU,3650
|
127
127
|
keras_hub/src/models/densenet/__init__.py,sha256=r7StyamnWeeZxOk9r4ZYNbS_YVhu9YGPyXhNxljvdPg,269
|
128
128
|
keras_hub/src/models/densenet/densenet_backbone.py,sha256=dN9lUwKzO3E2HthNV2x54ozeBEQ0ilNs5uYHshFQpT0,6723
|
129
129
|
keras_hub/src/models/densenet/densenet_image_classifier.py,sha256=ptuV6PwgoUpmrSPqX7-a85IpWsElwcCv_G5IVkP9E_Q,530
|
130
130
|
keras_hub/src/models/densenet/densenet_image_classifier_preprocessor.py,sha256=xDZbTw_h6pjLDzf8QmbDyMnMsFzgh-dPX1ldg9kddhg,563
|
131
131
|
keras_hub/src/models/densenet/densenet_image_converter.py,sha256=DoxYlJVZ9uaabFhVjWOmzvhONoc8KNcQj2vQ6Z1AUpU,354
|
132
|
-
keras_hub/src/models/densenet/densenet_presets.py,sha256=
|
132
|
+
keras_hub/src/models/densenet/densenet_presets.py,sha256=QoluKQJnV391K6hoIX5X8UquD8f647u_8Ygta-UxmwE,1531
|
133
133
|
keras_hub/src/models/distil_bert/__init__.py,sha256=3Z0w-Mt3aOR0u9RGzjHQ7B3J3qBF2pGjupDGQ9yyzoc,303
|
134
134
|
keras_hub/src/models/distil_bert/distil_bert_backbone.py,sha256=rnAf_GokB3wAeJwVZtgUKQO_bKJIa8RavhL_ykTJpNw,6440
|
135
135
|
keras_hub/src/models/distil_bert/distil_bert_masked_lm.py,sha256=L0DvOl01MIwqc2f6H_E8si9qVUXPd0OKknJ5Rha33TA,4275
|
@@ -209,15 +209,15 @@ keras_hub/src/models/mistral/mistral_layer_norm.py,sha256=nimMZ5CTPK8v9eflfrGuzq
|
|
209
209
|
keras_hub/src/models/mistral/mistral_presets.py,sha256=gucgdaFAiU-vRDS1g9zWGHjbDF_jaCiljPibCF4yVqY,1329
|
210
210
|
keras_hub/src/models/mistral/mistral_tokenizer.py,sha256=wyzR_Y2XwrDiBV3jIeBChSPiaOkVVaxFuLxMH2F6EYA,2005
|
211
211
|
keras_hub/src/models/mistral/mistral_transformer_decoder.py,sha256=RDIIB3FhneHZP11tNUFQT9DcWawCMnrtVxtSvtnP3ts,9542
|
212
|
-
keras_hub/src/models/
|
213
|
-
keras_hub/src/models/
|
214
|
-
keras_hub/src/models/
|
215
|
-
keras_hub/src/models/
|
216
|
-
keras_hub/src/models/
|
217
|
-
keras_hub/src/models/
|
218
|
-
keras_hub/src/models/
|
212
|
+
keras_hub/src/models/mit/__init__.py,sha256=F70_0PR_nPzPdMI8XOpXDRR_nxclGjcHv3iWSWUX3w8,316
|
213
|
+
keras_hub/src/models/mit/mit_backbone.py,sha256=0lsWM7TkwmFE3euYbI5Xe808_ua9UDPOV4hOPlCBrOo,5984
|
214
|
+
keras_hub/src/models/mit/mit_image_classifier.py,sha256=HKj6u6AqPbxinGYPRsz_ZdW2pEHAcFsKenrGHpRMobM,480
|
215
|
+
keras_hub/src/models/mit/mit_image_classifier_preprocessor.py,sha256=oNYs-pUK8VnzNEPcq5beYX0qfnnlbJcxY8o5s7bVQes,504
|
216
|
+
keras_hub/src/models/mit/mit_image_converter.py,sha256=Mw7nV-OzyBveGuZUNFsPPKyq9jXJVW2_cVH024CNkXM,311
|
217
|
+
keras_hub/src/models/mit/mit_layers.py,sha256=9AbA4kCJkjeV7fAwbRns8VGn0l1pgQ3CqFPjY-99VGA,9695
|
218
|
+
keras_hub/src/models/mit/mit_presets.py,sha256=9bxWVOLhmBdoq2I4uZfZb7wZAB-3YjuMq0T_-JJzr2w,4960
|
219
219
|
keras_hub/src/models/mobilenet/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
220
|
-
keras_hub/src/models/mobilenet/mobilenet_backbone.py,sha256=
|
220
|
+
keras_hub/src/models/mobilenet/mobilenet_backbone.py,sha256=fFqEH3WTspEBYnF9LEdsX7RGHEEL3CVoVDPWG1ZnqBk,18193
|
221
221
|
keras_hub/src/models/mobilenet/mobilenet_image_classifier.py,sha256=l5jo99I0fLlbwLub5jHw07CjC-NnmuV-ySJwXGI20Ek,351
|
222
222
|
keras_hub/src/models/opt/__init__.py,sha256=6Ybj8etxNaPsVcuZvaeHnKB3As92Px--dbiFAqOCIT0,239
|
223
223
|
keras_hub/src/models/opt/opt_backbone.py,sha256=mK5z_E5mSiIX5s0w4hr4IVQpT7K46W2ajZBmuMjxwaY,5873
|
@@ -226,14 +226,14 @@ keras_hub/src/models/opt/opt_causal_lm_preprocessor.py,sha256=xHfslVMOZlAIj2V2jI
|
|
226
226
|
keras_hub/src/models/opt/opt_presets.py,sha256=J1IJ5VRcZZ6UZJSLrxpbWXw39YmbRd_WQujX1a6dxHo,2329
|
227
227
|
keras_hub/src/models/opt/opt_tokenizer.py,sha256=oDHeed4xf07tm14hj_C78BkzMuuRwRP2cRHmqYnObrs,2557
|
228
228
|
keras_hub/src/models/pali_gemma/__init__.py,sha256=uODWTlttOOchcTLpiYHCEWMXnDxIz8ZVIeYFQN2bd8o,288
|
229
|
-
keras_hub/src/models/pali_gemma/pali_gemma_backbone.py,sha256=
|
229
|
+
keras_hub/src/models/pali_gemma/pali_gemma_backbone.py,sha256=Rrl7nof_gAZL2Nge1cFymCsRdwqxQjwmEEhucGspUr0,10586
|
230
230
|
keras_hub/src/models/pali_gemma/pali_gemma_causal_lm.py,sha256=AViEs6YltUqWnIVo7J02JkXcanBgLSdwZwF56TVr8gc,11345
|
231
231
|
keras_hub/src/models/pali_gemma/pali_gemma_causal_lm_preprocessor.py,sha256=F57y0fZ0wYYxfGIjfrJc1W9uQpViYFx5bvFjj5CqUbI,4814
|
232
232
|
keras_hub/src/models/pali_gemma/pali_gemma_decoder_block.py,sha256=Q_sPAULiSo_ZJeXklZjCLhvOMXk8MrPZhEXtL5yNOiI,5175
|
233
233
|
keras_hub/src/models/pali_gemma/pali_gemma_image_converter.py,sha256=5yM_jUtrFsWIieiwfFBoP7mtPmQAwywkeLKbd7fhmzk,371
|
234
|
-
keras_hub/src/models/pali_gemma/pali_gemma_presets.py,sha256=
|
234
|
+
keras_hub/src/models/pali_gemma/pali_gemma_presets.py,sha256=yLLuPwhIDE7HuMNJwLw1_yhHGz3w3mvYCxVcgAtSydc,2401
|
235
235
|
keras_hub/src/models/pali_gemma/pali_gemma_tokenizer.py,sha256=ljTiADHo0Ok88q-jVzwJIle2C8xcxnudLTsBLzIySaM,2415
|
236
|
-
keras_hub/src/models/pali_gemma/pali_gemma_vit.py,sha256=
|
236
|
+
keras_hub/src/models/pali_gemma/pali_gemma_vit.py,sha256=UpmymNkwuN9iuTV2I4M6lvHnlqpZIDmPb5pAADKs-Vg,18029
|
237
237
|
keras_hub/src/models/phi3/__init__.py,sha256=zIbf1MU-ks91mEkjTRJAsk51N3BBnXDF2JM1vO-13PQ,245
|
238
238
|
keras_hub/src/models/phi3/phi3_attention.py,sha256=dN8QwwTP9TxPBDv0MCvObLF3nHm1H6xbYr3T1K0nmg8,9243
|
239
239
|
keras_hub/src/models/phi3/phi3_backbone.py,sha256=fY-OY2ZrqxDHglYjTM0OCacBdEQHwj-XNmU0MnXL7iU,8885
|
@@ -245,11 +245,11 @@ keras_hub/src/models/phi3/phi3_presets.py,sha256=DNyPTDA7PzFC8Ys2QmR2-mxUDa8Y8Id
|
|
245
245
|
keras_hub/src/models/phi3/phi3_rotary_embedding.py,sha256=WTPCN8IKq3R7kMzsES1b8JEKV-8iNi_49WkhNTXoNUk,5012
|
246
246
|
keras_hub/src/models/phi3/phi3_tokenizer.py,sha256=bOPH14wTVVHJHq8mgzXLjsgvKMNhfO8eayevAPpjYVA,1992
|
247
247
|
keras_hub/src/models/resnet/__init__.py,sha256=C5UqlQ6apm8WSp1bnrxB6Bi3BGaknxRQs-r3b2wpaGA,257
|
248
|
-
keras_hub/src/models/resnet/resnet_backbone.py,sha256=
|
248
|
+
keras_hub/src/models/resnet/resnet_backbone.py,sha256=6Fno9Cf8FgNzohzwpAIhGXKTLue0aEvErg5KNMGOPnI,31297
|
249
249
|
keras_hub/src/models/resnet/resnet_image_classifier.py,sha256=nf35EKDzvBkfhHsK-s6Ks0nbhvKO7HEOYZm94YckyWE,510
|
250
250
|
keras_hub/src/models/resnet/resnet_image_classifier_preprocessor.py,sha256=fM7gyQ0qB-RRuI4USJkRD6q9-HVfuC71e-BLTo-UhHQ,543
|
251
251
|
keras_hub/src/models/resnet/resnet_image_converter.py,sha256=fgTxihJznGFss-y3Z-jp0JE3X1gaaB2y-f2KMwrT8Pk,342
|
252
|
-
keras_hub/src/models/resnet/resnet_presets.py,sha256=
|
252
|
+
keras_hub/src/models/resnet/resnet_presets.py,sha256=kgEZQtJKnK37bSKy4Ny0GdE70P71z6OOdR_H--1pYGI,8563
|
253
253
|
keras_hub/src/models/retinanet/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
254
254
|
keras_hub/src/models/retinanet/anchor_generator.py,sha256=43NoI7djbRudH98hUm-9fw5OEGQNRXOUYzypIZhLYhE,6750
|
255
255
|
keras_hub/src/models/retinanet/box_matcher.py,sha256=l820r1R-ByqiyVgmZ0YFjjz0njchDda-wItzLn1X84o,10834
|
@@ -267,11 +267,11 @@ keras_hub/src/models/roberta/roberta_tokenizer.py,sha256=VKPrgXVT9aMKP7et2DIWKlT
|
|
267
267
|
keras_hub/src/models/sam/__init__.py,sha256=fp71Q288xeE81tIOZkkudec4Acs8v4aO5WdyzCD9x-c,239
|
268
268
|
keras_hub/src/models/sam/sam_backbone.py,sha256=fbvtGG6du7tnkcGtEsRyT9TRwPBUJ99GBolGkWR5pkc,4351
|
269
269
|
keras_hub/src/models/sam/sam_image_converter.py,sha256=5POp3aYFu6CK3R0NNfeUBbjhguBkincSMNvlcIJXarE,324
|
270
|
-
keras_hub/src/models/sam/sam_image_segmenter.py,sha256=
|
270
|
+
keras_hub/src/models/sam/sam_image_segmenter.py,sha256=2OIowfl7cF298gIlqQlOJUsx_BcI2mCDntbXR8uohZc,7680
|
271
271
|
keras_hub/src/models/sam/sam_image_segmenter_preprocessor.py,sha256=7slvyhGoMHmSigagqIcjDJ3gX8fUJbuMBwmozC4FlCg,849
|
272
272
|
keras_hub/src/models/sam/sam_layers.py,sha256=SE5e6tYc-lsIVfMp9khewvuA1jY-dEHQmLT00YUok4M,13862
|
273
273
|
keras_hub/src/models/sam/sam_mask_decoder.py,sha256=9RfjoNL7GSY6I9LZ3ulUa5cIoYSPJNP4KnHvq16lnM4,9549
|
274
|
-
keras_hub/src/models/sam/sam_presets.py,sha256=
|
274
|
+
keras_hub/src/models/sam/sam_presets.py,sha256=IzpNyhxa3s5zqQFPTZOuqU75I7DU6y-TvMM27gKXLAs,1211
|
275
275
|
keras_hub/src/models/sam/sam_prompt_encoder.py,sha256=2foB7900QbzQfZjBo335XYsdjmhOnVT8fKD1CubJNVE,11801
|
276
276
|
keras_hub/src/models/sam/sam_transformer.py,sha256=L2bdxdc2RUF1juRZ0F0Z6r0gTva1sUwEdjItJmKKf6w,5730
|
277
277
|
keras_hub/src/models/stable_diffusion_3/__init__.py,sha256=ZKYQuaRObyhKq8GVAHmoRvlXp6FpU8ChvutVCHyXKuc,343
|
@@ -280,7 +280,7 @@ keras_hub/src/models/stable_diffusion_3/mmdit.py,sha256=ByFot4_I1Z6woOBYvPcbkUtY
|
|
280
280
|
keras_hub/src/models/stable_diffusion_3/stable_diffusion_3_backbone.py,sha256=QuggvAy1yvtIXFcwyXOmE_aUdhLcCEUw4FnTuqekys0,22497
|
281
281
|
keras_hub/src/models/stable_diffusion_3/stable_diffusion_3_image_to_image.py,sha256=6_IXkxAv588lAKEasJrXgCjQePSXs-54XrvVIlYOT60,5483
|
282
282
|
keras_hub/src/models/stable_diffusion_3/stable_diffusion_3_inpaint.py,sha256=tKVAQVbKOt3lWkWsQLKN9KK3WYem0-u5fonq2uBAPrc,6367
|
283
|
-
keras_hub/src/models/stable_diffusion_3/stable_diffusion_3_presets.py,sha256=
|
283
|
+
keras_hub/src/models/stable_diffusion_3/stable_diffusion_3_presets.py,sha256=9LcbF9Okin9ba6XJd9EQJKW5AVWdrfvW8r6F6TCx7X8,661
|
284
284
|
keras_hub/src/models/stable_diffusion_3/stable_diffusion_3_text_to_image.py,sha256=pQOC7xMJfJHZxZRiYFtjrbjx0GXb94cNyOr9NELoXo8,4488
|
285
285
|
keras_hub/src/models/stable_diffusion_3/stable_diffusion_3_text_to_image_preprocessor.py,sha256=TB0KESt5dnFYiS292PbzB0LdiH23AD6aTSTGmQEuzGM,2742
|
286
286
|
keras_hub/src/models/stable_diffusion_3/t5_encoder.py,sha256=oV7P1uwCKdGiD93zXq7kmqX0elMZQU4UvBa8wg6P1hs,5113
|
@@ -293,14 +293,16 @@ keras_hub/src/models/t5/t5_presets.py,sha256=95zU4cTNEZMH2yiCLptA9zhu2D4mE1Cay18
|
|
293
293
|
keras_hub/src/models/t5/t5_tokenizer.py,sha256=pLTu15JeYSpVmy-2600vBc-Mxn_uHyTKts4PI2MxxBM,2517
|
294
294
|
keras_hub/src/models/t5/t5_transformer_layer.py,sha256=uDeP84F1x7xJxki5iKe12Zn6eWD_4yVjoFXMuod-a3A,5347
|
295
295
|
keras_hub/src/models/vae/__init__.py,sha256=i3UaSW4IJf76O7lSPE1dyxOVjuHx8iAYKivqvUbDHOw,62
|
296
|
-
keras_hub/src/models/vae/vae_backbone.py,sha256=
|
296
|
+
keras_hub/src/models/vae/vae_backbone.py,sha256=Yk0srJhB-zfxQeAoyZdNzvxfxPxPMVie0nqKU7cp-2M,7033
|
297
297
|
keras_hub/src/models/vae/vae_layers.py,sha256=N83CYM1zgbl1EIjAOs3cFCkJEdxvbXkgM9ghKyljFAg,27752
|
298
|
-
keras_hub/src/models/vgg/__init__.py,sha256=
|
299
|
-
keras_hub/src/models/vgg/vgg_backbone.py,sha256=
|
300
|
-
keras_hub/src/models/vgg/vgg_image_classifier.py,sha256=
|
298
|
+
keras_hub/src/models/vgg/__init__.py,sha256=5ktFtITvvYja4Jg3q1LqPvGH-fMicx5wxCCpXT8aVKQ,239
|
299
|
+
keras_hub/src/models/vgg/vgg_backbone.py,sha256=yzzindEMO1rDgf3eAv9K0rhpg4NgfGZLgxaAv5CyGM8,3699
|
300
|
+
keras_hub/src/models/vgg/vgg_image_classifier.py,sha256=d-hlgvwbNhzR6r3q2oqEmRmuAuCpKzUwNC2JUwdzruI,7460
|
301
|
+
keras_hub/src/models/vgg/vgg_image_classifier_preprocessor.py,sha256=M7hBbDPws5Z7oDQPigBx-upHssul7Q_p0QIv3E4yOwo,504
|
302
|
+
keras_hub/src/models/vgg/vgg_image_converter.py,sha256=FKVrSNNBxIkiKvApzf4TZxidBb1z917Xs9nooHCcRLM,324
|
301
303
|
keras_hub/src/models/vgg/vgg_presets.py,sha256=ltKExQdrR3E30kZPZD53tXVOsc8Gj7Krj6pzHP1UYVU,1879
|
302
304
|
keras_hub/src/models/vit_det/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
303
|
-
keras_hub/src/models/vit_det/vit_det_backbone.py,sha256
|
305
|
+
keras_hub/src/models/vit_det/vit_det_backbone.py,sha256=-ZSvfL_XSjYK_qpo5fDGY-UOI38_rqU4fD9XszK2yxU,7656
|
304
306
|
keras_hub/src/models/vit_det/vit_layers.py,sha256=oCKeUw5ckyUAGvmFPuxIiIAqgmC3uqh85LfZcgyh964,19852
|
305
307
|
keras_hub/src/models/whisper/__init__.py,sha256=45vTF01_e-7VzD-zvXPw1NiA9SCgDE8w0cI-6peG9cA,263
|
306
308
|
keras_hub/src/models/whisper/whisper_audio_converter.py,sha256=aXqQ6uPI9fBSjuYbo7bMr4C0avPh3iDwrVXHEJ7W_zo,8386
|
@@ -345,7 +347,7 @@ keras_hub/src/tokenizers/word_piece_tokenizer.py,sha256=vP6AZgbzsRiuPCt3W_n94nsF
|
|
345
347
|
keras_hub/src/tokenizers/word_piece_tokenizer_trainer.py,sha256=xUhc9EMswarzghNfrDLUFYQBExZOQxbMlfKp9G6A63k,6549
|
346
348
|
keras_hub/src/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
347
349
|
keras_hub/src/utils/keras_utils.py,sha256=lrZuC8HL2lmQfbHaS_t1JUyJann_ji2iTYE0Fzos8PU,1969
|
348
|
-
keras_hub/src/utils/pipeline_model.py,sha256=
|
350
|
+
keras_hub/src/utils/pipeline_model.py,sha256=jgzB6NQPSl0KOu08N-TazfOnXnUJbZjH2EXXhx25Ftg,9084
|
349
351
|
keras_hub/src/utils/preset_utils.py,sha256=w45mluy4bhPPWB68waWpXFQ4MAKvSXS6llVw4rAE70s,30119
|
350
352
|
keras_hub/src/utils/python_utils.py,sha256=N8nWeO3san4YnGkffRXG3Ix7VEIMTKSN21FX5TuL7G8,202
|
351
353
|
keras_hub/src/utils/tensor_utils.py,sha256=JipeJUDnnvLuT-ToVQC0t9dmSzebwPG6XiZgEwGEGI4,14646
|
@@ -355,7 +357,7 @@ keras_hub/src/utils/timm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZ
|
|
355
357
|
keras_hub/src/utils/timm/convert_densenet.py,sha256=V-GRjWuDnlh3b1EMxqahwZ3GMwSgOa3v0HOfb2ZZ-d0,3342
|
356
358
|
keras_hub/src/utils/timm/convert_resnet.py,sha256=ee8eTml0ffJKE8avzGoLFcpjPF63DsvoIUArAGa8Ngg,5832
|
357
359
|
keras_hub/src/utils/timm/convert_vgg.py,sha256=MT5jGnLrzenPpe66Af_Lp1IdR9KGtsSrcmn6_UPqHvQ,2419
|
358
|
-
keras_hub/src/utils/timm/preset_loader.py,sha256=
|
360
|
+
keras_hub/src/utils/timm/preset_loader.py,sha256=PBqmnEj-fash_-GH-_ulb9YYaHAIESlOsI3wXCwKGRo,3221
|
359
361
|
keras_hub/src/utils/transformers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
360
362
|
keras_hub/src/utils/transformers/convert_albert.py,sha256=VdKclZpCxtDWq3UbUUQZf4fR9DJK_JYZ73B4O_G9skg,7695
|
361
363
|
keras_hub/src/utils/transformers/convert_bart.py,sha256=Tk4h9Md9rwN5wjQbGIVrC7qzDpF8kI8qm-FKL8HlUok,14411
|
@@ -368,7 +370,7 @@ keras_hub/src/utils/transformers/convert_mistral.py,sha256=kVhN9h1ZFVhwkNW8p3wnS
|
|
368
370
|
keras_hub/src/utils/transformers/convert_pali_gemma.py,sha256=B1leeDw96Yvu81hYumf66hIid07k5NLqoeWAJgPnaLs,10649
|
369
371
|
keras_hub/src/utils/transformers/preset_loader.py,sha256=GS44hZUuGQCtzsyn8z44ZpHdftd3DFemwV2hx2bQa-U,2738
|
370
372
|
keras_hub/src/utils/transformers/safetensor_utils.py,sha256=rPK-Uw1CG0DX0d_UAD-r2cG9fw8GI8bvAlrcXfQ9g4c,3323
|
371
|
-
keras_hub_nightly-0.16.1.
|
372
|
-
keras_hub_nightly-0.16.1.
|
373
|
-
keras_hub_nightly-0.16.1.
|
374
|
-
keras_hub_nightly-0.16.1.
|
373
|
+
keras_hub_nightly-0.16.1.dev202410190340.dist-info/METADATA,sha256=MXT2uCAnfn99QJFpgrXxXlR2cvFlcPUH9_Q-SetZCpg,7458
|
374
|
+
keras_hub_nightly-0.16.1.dev202410190340.dist-info/WHEEL,sha256=OVMc5UfuAQiSplgO0_WdW7vXVGAt9Hdd6qtN4HotdyA,91
|
375
|
+
keras_hub_nightly-0.16.1.dev202410190340.dist-info/top_level.txt,sha256=N4J6piIWBKa38A4uV-CnIopnOEf8mHAbkNXafXm_CuA,10
|
376
|
+
keras_hub_nightly-0.16.1.dev202410190340.dist-info/RECORD,,
|
@@ -1,12 +0,0 @@
|
|
1
|
-
from keras_hub.src.models.mix_transformer.mix_transformer_backbone import (
|
2
|
-
MiTBackbone,
|
3
|
-
)
|
4
|
-
from keras_hub.src.models.mix_transformer.mix_transformer_classifier import (
|
5
|
-
MiTImageClassifier,
|
6
|
-
)
|
7
|
-
from keras_hub.src.models.mix_transformer.mix_transformer_presets import (
|
8
|
-
backbone_presets,
|
9
|
-
)
|
10
|
-
from keras_hub.src.utils.preset_utils import register_presets
|
11
|
-
|
12
|
-
register_presets(backbone_presets, MiTBackbone)
|
File without changes
|
File without changes
|
File without changes
|