keras-hub-nightly 0.19.0.dev202502090345__py3-none-any.whl → 0.19.0.dev202502110348__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- keras_hub/api/layers/__init__.py +3 -0
- keras_hub/api/models/__init__.py +3 -0
- keras_hub/src/models/mobilenet/mobilenet_backbone.py +679 -386
- keras_hub/src/models/mobilenet/mobilenet_image_classifier.py +73 -0
- keras_hub/src/models/mobilenet/mobilenet_image_classifier_preprocessor.py +14 -0
- keras_hub/src/models/mobilenet/mobilenet_image_converter.py +8 -0
- keras_hub/src/models/mobilenet/mobilenet_presets.py +15 -0
- keras_hub/src/models/mobilenet/util.py +23 -0
- keras_hub/src/utils/preset_utils.py +33 -1
- keras_hub/src/utils/timm/convert_mobilenet.py +201 -0
- keras_hub/src/utils/timm/preset_loader.py +3 -0
- keras_hub/src/version_utils.py +1 -1
- {keras_hub_nightly-0.19.0.dev202502090345.dist-info → keras_hub_nightly-0.19.0.dev202502110348.dist-info}/METADATA +1 -1
- {keras_hub_nightly-0.19.0.dev202502090345.dist-info → keras_hub_nightly-0.19.0.dev202502110348.dist-info}/RECORD +16 -11
- {keras_hub_nightly-0.19.0.dev202502090345.dist-info → keras_hub_nightly-0.19.0.dev202502110348.dist-info}/WHEEL +0 -0
- {keras_hub_nightly-0.19.0.dev202502090345.dist-info → keras_hub_nightly-0.19.0.dev202502110348.dist-info}/top_level.txt +0 -0
@@ -1,8 +1,81 @@
|
|
1
|
+
import keras
|
2
|
+
|
1
3
|
from keras_hub.src.api_export import keras_hub_export
|
2
4
|
from keras_hub.src.models.image_classifier import ImageClassifier
|
3
5
|
from keras_hub.src.models.mobilenet.mobilenet_backbone import MobileNetBackbone
|
6
|
+
from keras_hub.src.models.mobilenet.mobilenet_image_classifier_preprocessor import ( # noqa: E501
|
7
|
+
MobileNetImageClassifierPreprocessor,
|
8
|
+
)
|
9
|
+
from keras_hub.src.models.task import Task
|
4
10
|
|
5
11
|
|
6
12
|
@keras_hub_export("keras_hub.models.MobileNetImageClassifier")
|
7
13
|
class MobileNetImageClassifier(ImageClassifier):
|
8
14
|
backbone_cls = MobileNetBackbone
|
15
|
+
preprocessor_cls = MobileNetImageClassifierPreprocessor
|
16
|
+
|
17
|
+
def __init__(
|
18
|
+
self,
|
19
|
+
backbone,
|
20
|
+
num_classes,
|
21
|
+
preprocessor=None,
|
22
|
+
head_dtype=None,
|
23
|
+
**kwargs,
|
24
|
+
):
|
25
|
+
head_dtype = head_dtype or backbone.dtype_policy
|
26
|
+
data_format = getattr(backbone, "data_format", None)
|
27
|
+
|
28
|
+
# === Layers ===
|
29
|
+
self.backbone = backbone
|
30
|
+
self.preprocessor = preprocessor
|
31
|
+
self.pooler = keras.layers.GlobalAveragePooling2D(
|
32
|
+
data_format, keepdims=True, dtype=head_dtype, name="pooler"
|
33
|
+
)
|
34
|
+
|
35
|
+
self.output_conv = keras.layers.Conv2D(
|
36
|
+
filters=1024,
|
37
|
+
kernel_size=(1, 1),
|
38
|
+
strides=(1, 1),
|
39
|
+
use_bias=True,
|
40
|
+
padding="valid",
|
41
|
+
activation="hard_silu",
|
42
|
+
name="classifier_conv",
|
43
|
+
dtype=head_dtype,
|
44
|
+
)
|
45
|
+
|
46
|
+
self.flatten = keras.layers.Flatten(
|
47
|
+
dtype=head_dtype,
|
48
|
+
)
|
49
|
+
|
50
|
+
self.output_dense = keras.layers.Dense(
|
51
|
+
num_classes,
|
52
|
+
dtype=head_dtype,
|
53
|
+
name="predictions",
|
54
|
+
)
|
55
|
+
|
56
|
+
# === Functional Model ===
|
57
|
+
inputs = self.backbone.input
|
58
|
+
x = self.backbone(inputs)
|
59
|
+
x = self.pooler(x)
|
60
|
+
x = self.output_conv(x)
|
61
|
+
x = self.flatten(x)
|
62
|
+
outputs = self.output_dense(x)
|
63
|
+
Task.__init__(
|
64
|
+
self,
|
65
|
+
inputs=inputs,
|
66
|
+
outputs=outputs,
|
67
|
+
**kwargs,
|
68
|
+
)
|
69
|
+
|
70
|
+
# === Config ===
|
71
|
+
self.num_classes = num_classes
|
72
|
+
|
73
|
+
def get_config(self):
|
74
|
+
# Skip ImageClassifier
|
75
|
+
config = Task.get_config(self)
|
76
|
+
config.update(
|
77
|
+
{
|
78
|
+
"num_classes": self.num_classes,
|
79
|
+
}
|
80
|
+
)
|
81
|
+
return config
|
@@ -0,0 +1,14 @@
|
|
1
|
+
from keras_hub.src.api_export import keras_hub_export
|
2
|
+
from keras_hub.src.models.image_classifier_preprocessor import (
|
3
|
+
ImageClassifierPreprocessor,
|
4
|
+
)
|
5
|
+
from keras_hub.src.models.mobilenet.mobilenet_backbone import MobileNetBackbone
|
6
|
+
from keras_hub.src.models.mobilenet.mobilenet_image_converter import (
|
7
|
+
MobileNetImageConverter,
|
8
|
+
)
|
9
|
+
|
10
|
+
|
11
|
+
@keras_hub_export("keras_hub.models.MobileNetImageClassifierPreprocessor")
|
12
|
+
class MobileNetImageClassifierPreprocessor(ImageClassifierPreprocessor):
|
13
|
+
backbone_cls = MobileNetBackbone
|
14
|
+
image_converter_cls = MobileNetImageConverter
|
@@ -0,0 +1,8 @@
|
|
1
|
+
from keras_hub.src.api_export import keras_hub_export
|
2
|
+
from keras_hub.src.layers.preprocessing.image_converter import ImageConverter
|
3
|
+
from keras_hub.src.models.mobilenet.mobilenet_backbone import MobileNetBackbone
|
4
|
+
|
5
|
+
|
6
|
+
@keras_hub_export("keras_hub.layers.MobileNetImageConverter")
|
7
|
+
class MobileNetImageConverter(ImageConverter):
|
8
|
+
backbone_cls = MobileNetBackbone
|
@@ -0,0 +1,15 @@
|
|
1
|
+
"""MobileNet preset configurations."""
|
2
|
+
|
3
|
+
backbone_presets = {
|
4
|
+
"mobilenetv3_small_050": {
|
5
|
+
"metadata": {
|
6
|
+
"description": (
|
7
|
+
"Small MObilenet V3 model pre-trained on the ImageNet 1k "
|
8
|
+
"dataset at a 224x224 resolution."
|
9
|
+
),
|
10
|
+
"official_name": "MobileNet",
|
11
|
+
"path": "mobilenet3",
|
12
|
+
},
|
13
|
+
"kaggle_handle": "kaggle://keras/mobilenet/keras/mobilenetv3_small_050",
|
14
|
+
},
|
15
|
+
}
|
@@ -0,0 +1,23 @@
|
|
1
|
+
def adjust_channels(x, divisor=8, min_value=None):
|
2
|
+
"""Ensure that all layers have a channel number divisible by the `divisor`.
|
3
|
+
|
4
|
+
Args:
|
5
|
+
x: integer, input value.
|
6
|
+
divisor: integer, the value by which a channel number should be
|
7
|
+
divisible, defaults to 8.
|
8
|
+
min_value: float, optional minimum value for the new tensor. If None,
|
9
|
+
defaults to value of divisor.
|
10
|
+
|
11
|
+
Returns:
|
12
|
+
the updated input scalar.
|
13
|
+
"""
|
14
|
+
|
15
|
+
if min_value is None:
|
16
|
+
min_value = divisor
|
17
|
+
|
18
|
+
new_x = max(min_value, int(x + divisor / 2) // divisor * divisor)
|
19
|
+
|
20
|
+
# make sure that round down does not go down by more than 10%.
|
21
|
+
if new_x < 0.9 * x:
|
22
|
+
new_x += divisor
|
23
|
+
return new_x
|
@@ -27,11 +27,12 @@ except ImportError:
|
|
27
27
|
KAGGLE_PREFIX = "kaggle://"
|
28
28
|
GS_PREFIX = "gs://"
|
29
29
|
HF_PREFIX = "hf://"
|
30
|
+
MODELSCOPE_PREFIX = "modelscope://"
|
30
31
|
|
31
32
|
KAGGLE_SCHEME = "kaggle"
|
32
33
|
GS_SCHEME = "gs"
|
33
34
|
HF_SCHEME = "hf"
|
34
|
-
|
35
|
+
MODELSCOPE_SCHEME = "modelscope"
|
35
36
|
ASSET_DIR = "assets"
|
36
37
|
TOKENIZER_ASSET_DIR = f"{ASSET_DIR}/tokenizer"
|
37
38
|
|
@@ -165,6 +166,37 @@ def get_file(preset, path):
|
|
165
166
|
raise ValueError(message)
|
166
167
|
elif scheme in tf_registered_schemes():
|
167
168
|
return tf_copy_gfile_to_cache(preset, path)
|
169
|
+
elif scheme == MODELSCOPE_SCHEME:
|
170
|
+
try:
|
171
|
+
from modelscope.hub.snapshot_download import snapshot_download
|
172
|
+
except ImportError:
|
173
|
+
raise ImportError(
|
174
|
+
"To load a preset from ModelScope {preset} using from_preset,"
|
175
|
+
"install the modelscope package with: pip install modelscope."
|
176
|
+
)
|
177
|
+
modelscope_handle = preset.removeprefix(MODELSCOPE_SCHEME + "://")
|
178
|
+
try:
|
179
|
+
return_path = snapshot_download(modelscope_handle) + "/" + path
|
180
|
+
if os.path.exists(return_path):
|
181
|
+
return return_path
|
182
|
+
raise FileNotFoundError(
|
183
|
+
f"`{return_path}` doesn't exist in preset directory `{preset}`."
|
184
|
+
)
|
185
|
+
except ValueError as e:
|
186
|
+
raise ValueError(
|
187
|
+
"ModelScope handles should follow the format "
|
188
|
+
f"'modelscope://{{org}}/{{model}}' "
|
189
|
+
"(e.g., 'modelscope://username/bert_base_en')."
|
190
|
+
f"Received: preset='{preset}.'"
|
191
|
+
) from e
|
192
|
+
except EntryNotFoundError as e:
|
193
|
+
message = str(e)
|
194
|
+
if message.find("403 Client Error"):
|
195
|
+
raise FileNotFoundError(
|
196
|
+
f"`{path}` not exist in preset directory `{preset}`."
|
197
|
+
)
|
198
|
+
else:
|
199
|
+
raise ValueError(message)
|
168
200
|
elif scheme == HF_SCHEME:
|
169
201
|
if huggingface_hub is None:
|
170
202
|
raise ImportError(
|
@@ -0,0 +1,201 @@
|
|
1
|
+
import numpy as np
|
2
|
+
|
3
|
+
from keras_hub.src.models.mobilenet.mobilenet_backbone import MobileNetBackbone
|
4
|
+
|
5
|
+
backbone_cls = MobileNetBackbone
|
6
|
+
|
7
|
+
|
8
|
+
def convert_backbone_config(timm_config):
|
9
|
+
timm_architecture = timm_config["architecture"]
|
10
|
+
|
11
|
+
if "mobilenetv3_" in timm_architecture:
|
12
|
+
input_activation = "hard_swish"
|
13
|
+
output_activation = "hard_swish"
|
14
|
+
else:
|
15
|
+
input_activation = "relu6"
|
16
|
+
output_activation = "relu6"
|
17
|
+
|
18
|
+
if timm_architecture == "mobilenetv3_small_050":
|
19
|
+
stackwise_num_blocks = [2, 3, 2, 3]
|
20
|
+
stackwise_expansion = [
|
21
|
+
[40, 56],
|
22
|
+
[64, 144, 144],
|
23
|
+
[72, 72],
|
24
|
+
[144, 288, 288],
|
25
|
+
]
|
26
|
+
stackwise_num_filters = [[16, 16], [24, 24, 24], [24, 24], [48, 48, 48]]
|
27
|
+
stackwise_kernel_size = [[3, 3], [5, 5, 5], [5, 5], [5, 5, 5]]
|
28
|
+
stackwise_num_strides = [[2, 1], [2, 1, 1], [1, 1], [2, 1, 1]]
|
29
|
+
stackwise_se_ratio = [
|
30
|
+
[None, None],
|
31
|
+
[0.25, 0.25, 0.25],
|
32
|
+
[0.25, 0.25],
|
33
|
+
[0.25, 0.25, 0.25],
|
34
|
+
]
|
35
|
+
stackwise_activation = [
|
36
|
+
["relu", "relu"],
|
37
|
+
["hard_swish", "hard_swish", "hard_swish"],
|
38
|
+
["hard_swish", "hard_swish"],
|
39
|
+
["hard_swish", "hard_swish", "hard_swish"],
|
40
|
+
]
|
41
|
+
stackwise_padding = [[1, 1], [2, 2, 2], [2, 2], [2, 2, 2]]
|
42
|
+
output_num_filters = 1024
|
43
|
+
input_num_filters = 16
|
44
|
+
depthwise_filters = 8
|
45
|
+
squeeze_and_excite = 0.5
|
46
|
+
last_layer_filter = 288
|
47
|
+
else:
|
48
|
+
raise ValueError(
|
49
|
+
f"Currently, the architecture {timm_architecture} is not supported."
|
50
|
+
)
|
51
|
+
|
52
|
+
return dict(
|
53
|
+
input_num_filters=input_num_filters,
|
54
|
+
input_activation=input_activation,
|
55
|
+
depthwise_filters=depthwise_filters,
|
56
|
+
squeeze_and_excite=squeeze_and_excite,
|
57
|
+
stackwise_num_blocks=stackwise_num_blocks,
|
58
|
+
stackwise_expansion=stackwise_expansion,
|
59
|
+
stackwise_num_filters=stackwise_num_filters,
|
60
|
+
stackwise_kernel_size=stackwise_kernel_size,
|
61
|
+
stackwise_num_strides=stackwise_num_strides,
|
62
|
+
stackwise_se_ratio=stackwise_se_ratio,
|
63
|
+
stackwise_activation=stackwise_activation,
|
64
|
+
stackwise_padding=stackwise_padding,
|
65
|
+
output_num_filters=output_num_filters,
|
66
|
+
output_activation=output_activation,
|
67
|
+
last_layer_filter=last_layer_filter,
|
68
|
+
)
|
69
|
+
|
70
|
+
|
71
|
+
def convert_weights(backbone, loader, timm_config):
|
72
|
+
def port_conv2d(keras_layer, hf_weight_prefix, port_bias=False):
|
73
|
+
print(f"porting weights {hf_weight_prefix} -> {keras_layer}")
|
74
|
+
loader.port_weight(
|
75
|
+
keras_layer.kernel,
|
76
|
+
hf_weight_key=f"{hf_weight_prefix}.weight",
|
77
|
+
hook_fn=lambda x, _: np.transpose(x, (2, 3, 1, 0)),
|
78
|
+
)
|
79
|
+
|
80
|
+
if port_bias:
|
81
|
+
print(f"porting bias {hf_weight_prefix} -> {keras_layer}")
|
82
|
+
loader.port_weight(
|
83
|
+
keras_layer.bias,
|
84
|
+
hf_weight_key=f"{hf_weight_prefix}.bias",
|
85
|
+
)
|
86
|
+
|
87
|
+
def port_batch_normalization(keras_layer, hf_weight_prefix):
|
88
|
+
print(f"porting weights {hf_weight_prefix} -> {keras_layer}")
|
89
|
+
loader.port_weight(
|
90
|
+
keras_layer.gamma,
|
91
|
+
hf_weight_key=f"{hf_weight_prefix}.weight",
|
92
|
+
)
|
93
|
+
loader.port_weight(
|
94
|
+
keras_layer.beta,
|
95
|
+
hf_weight_key=f"{hf_weight_prefix}.bias",
|
96
|
+
)
|
97
|
+
loader.port_weight(
|
98
|
+
keras_layer.moving_mean,
|
99
|
+
hf_weight_key=f"{hf_weight_prefix}.running_mean",
|
100
|
+
)
|
101
|
+
loader.port_weight(
|
102
|
+
keras_layer.moving_variance,
|
103
|
+
hf_weight_key=f"{hf_weight_prefix}.running_var",
|
104
|
+
)
|
105
|
+
loader.port_weight(
|
106
|
+
keras_layer.moving_variance,
|
107
|
+
hf_weight_key=f"{hf_weight_prefix}.running_var",
|
108
|
+
)
|
109
|
+
|
110
|
+
# Stem
|
111
|
+
port_conv2d(backbone.get_layer("input_conv"), "conv_stem")
|
112
|
+
port_batch_normalization(backbone.get_layer("input_batch_norm"), "bn1")
|
113
|
+
|
114
|
+
# DepthWise Block (block 0)
|
115
|
+
hf_name = "blocks.0.0"
|
116
|
+
keras_name = "block_0_0"
|
117
|
+
|
118
|
+
stem_block = backbone.get_layer(keras_name)
|
119
|
+
|
120
|
+
port_conv2d(stem_block.conv1, f"{hf_name}.conv_dw")
|
121
|
+
port_batch_normalization(stem_block.batch_normalization1, f"{hf_name}.bn1")
|
122
|
+
|
123
|
+
stem_se_block = stem_block.se_layer
|
124
|
+
port_conv2d(stem_se_block.conv_reduce, f"{hf_name}.se.conv_reduce", True)
|
125
|
+
port_conv2d(stem_se_block.conv_expand, f"{hf_name}.se.conv_expand", True)
|
126
|
+
|
127
|
+
port_conv2d(stem_block.conv2, f"{hf_name}.conv_pw")
|
128
|
+
port_batch_normalization(stem_block.batch_normalization2, f"{hf_name}.bn2")
|
129
|
+
|
130
|
+
# Stages
|
131
|
+
num_stacks = len(backbone.stackwise_num_blocks)
|
132
|
+
for block_idx in range(num_stacks):
|
133
|
+
for inverted_block in range(backbone.stackwise_num_blocks[block_idx]):
|
134
|
+
keras_name = f"block_{block_idx + 1}_{inverted_block}"
|
135
|
+
hf_name = f"blocks.{block_idx + 1}.{inverted_block}"
|
136
|
+
|
137
|
+
# Inverted Residual Block
|
138
|
+
ir_block = backbone.get_layer(keras_name)
|
139
|
+
port_conv2d(ir_block.conv1, f"{hf_name}.conv_pw")
|
140
|
+
port_batch_normalization(
|
141
|
+
ir_block.batch_normalization1, f"{hf_name}.bn1"
|
142
|
+
)
|
143
|
+
port_conv2d(ir_block.conv2, f"{hf_name}.conv_dw")
|
144
|
+
port_batch_normalization(
|
145
|
+
ir_block.batch_normalization2, f"{hf_name}.bn2"
|
146
|
+
)
|
147
|
+
|
148
|
+
if backbone.stackwise_se_ratio[block_idx][inverted_block]:
|
149
|
+
ir_se_block = ir_block.squeeze_excite
|
150
|
+
port_conv2d(
|
151
|
+
ir_se_block.conv_reduce,
|
152
|
+
f"{hf_name}.se.conv_reduce",
|
153
|
+
True,
|
154
|
+
)
|
155
|
+
port_conv2d(
|
156
|
+
ir_se_block.conv_expand,
|
157
|
+
f"{hf_name}.se.conv_expand",
|
158
|
+
True,
|
159
|
+
)
|
160
|
+
|
161
|
+
port_conv2d(ir_block.conv3, f"{hf_name}.conv_pwl")
|
162
|
+
port_batch_normalization(
|
163
|
+
ir_block.batch_normalization3, f"{hf_name}.bn3"
|
164
|
+
)
|
165
|
+
|
166
|
+
# ConvBnAct Block
|
167
|
+
cba_block_name = f"block_{num_stacks + 1}_0"
|
168
|
+
cba_block = backbone.get_layer(cba_block_name)
|
169
|
+
port_conv2d(cba_block.conv, f"blocks.{num_stacks + 1}.0.conv")
|
170
|
+
port_batch_normalization(
|
171
|
+
cba_block.batch_normalization, f"blocks.{num_stacks + 1}.0.bn1"
|
172
|
+
)
|
173
|
+
|
174
|
+
|
175
|
+
def convert_head(task, loader, timm_config):
|
176
|
+
def port_conv2d(keras_layer, hf_weight_prefix, port_bias=False):
|
177
|
+
print(f"porting weights {hf_weight_prefix} -> {keras_layer}")
|
178
|
+
loader.port_weight(
|
179
|
+
keras_layer.kernel,
|
180
|
+
hf_weight_key=f"{hf_weight_prefix}.weight",
|
181
|
+
hook_fn=lambda x, _: np.transpose(x, (2, 3, 1, 0)),
|
182
|
+
)
|
183
|
+
|
184
|
+
if port_bias:
|
185
|
+
print(f"porting bias {hf_weight_prefix} -> {keras_layer}")
|
186
|
+
loader.port_weight(
|
187
|
+
keras_layer.bias,
|
188
|
+
hf_weight_key=f"{hf_weight_prefix}.bias",
|
189
|
+
)
|
190
|
+
|
191
|
+
port_conv2d(task.output_conv, "conv_head", True)
|
192
|
+
prefix = "classifier."
|
193
|
+
loader.port_weight(
|
194
|
+
task.output_dense.kernel,
|
195
|
+
hf_weight_key=prefix + "weight",
|
196
|
+
hook_fn=lambda x, _: np.transpose(np.squeeze(x)),
|
197
|
+
)
|
198
|
+
loader.port_weight(
|
199
|
+
task.output_dense.bias,
|
200
|
+
hf_weight_key=prefix + "bias",
|
201
|
+
)
|
@@ -5,6 +5,7 @@ from keras_hub.src.utils.preset_utils import PresetLoader
|
|
5
5
|
from keras_hub.src.utils.preset_utils import jax_memory_cleanup
|
6
6
|
from keras_hub.src.utils.timm import convert_densenet
|
7
7
|
from keras_hub.src.utils.timm import convert_efficientnet
|
8
|
+
from keras_hub.src.utils.timm import convert_mobilenet
|
8
9
|
from keras_hub.src.utils.timm import convert_resnet
|
9
10
|
from keras_hub.src.utils.timm import convert_vgg
|
10
11
|
from keras_hub.src.utils.transformers.safetensor_utils import SafetensorLoader
|
@@ -18,6 +19,8 @@ class TimmPresetLoader(PresetLoader):
|
|
18
19
|
self.converter = convert_resnet
|
19
20
|
elif "densenet" in architecture:
|
20
21
|
self.converter = convert_densenet
|
22
|
+
elif "mobilenet" in architecture:
|
23
|
+
self.converter = convert_mobilenet
|
21
24
|
elif "vgg" in architecture:
|
22
25
|
self.converter = convert_vgg
|
23
26
|
elif "efficientnet" in architecture:
|
keras_hub/src/version_utils.py
CHANGED
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.2
|
2
2
|
Name: keras-hub-nightly
|
3
|
-
Version: 0.19.0.
|
3
|
+
Version: 0.19.0.dev202502110348
|
4
4
|
Summary: Industry-strength Natural Language Processing extensions for Keras.
|
5
5
|
Home-page: https://github.com/keras-team/keras-hub
|
6
6
|
Author: Keras team
|
@@ -1,14 +1,14 @@
|
|
1
1
|
keras_hub/__init__.py,sha256=QGdXyHgYt6cMUAP1ebxwc6oR86dE0dkMxNy2eOCQtFo,855
|
2
2
|
keras_hub/api/__init__.py,sha256=EzR6D-XWsm_gDrX5LDwKEmrah_gu3ffpj8GKBudE0yI,485
|
3
|
-
keras_hub/api/layers/__init__.py,sha256=
|
3
|
+
keras_hub/api/layers/__init__.py,sha256=SJBSu8wXHIw5aTCp4LJ8NHyzvUD4ecw8zf2Gg-jkOwo,3440
|
4
4
|
keras_hub/api/metrics/__init__.py,sha256=So8Ec-lOcTzn_UUMmAdzDm8RKkPu2dbRUm2px8gpUEI,381
|
5
|
-
keras_hub/api/models/__init__.py,sha256=
|
5
|
+
keras_hub/api/models/__init__.py,sha256=HXkKL1jkOQDO0QCF-V2HLzq3h0MZiz_QvI2iCmUGyqE,17131
|
6
6
|
keras_hub/api/samplers/__init__.py,sha256=n-_SEXxr2LNUzK2FqVFN7alsrkx1P_HOVTeLZKeGCdE,730
|
7
7
|
keras_hub/api/tokenizers/__init__.py,sha256=mtJgQy1spfQnPAkeLoeinsT_W9iCWHlJXwzcol5W1aU,2524
|
8
8
|
keras_hub/api/utils/__init__.py,sha256=Gp1E6gG-RtKQS3PBEQEOz9PQvXkXaJ0ySGMqZ7myN7A,215
|
9
9
|
keras_hub/src/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
10
10
|
keras_hub/src/api_export.py,sha256=9pQZK27JObxWZ96QPLBp1OBsjWigh1iuV6RglPGMRk0,1499
|
11
|
-
keras_hub/src/version_utils.py,sha256=
|
11
|
+
keras_hub/src/version_utils.py,sha256=FneQdFaevL30uXjE0jJPrD-FtPxtkJ6P_4pRtsyUc2c,222
|
12
12
|
keras_hub/src/layers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
13
13
|
keras_hub/src/layers/modeling/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
14
14
|
keras_hub/src/layers/modeling/alibi_bias.py,sha256=1XBTHI52L_iJDhN_w5ydu_iMhCuTgQAxEPwcLA6BPuk,4411
|
@@ -237,8 +237,12 @@ keras_hub/src/models/mit/mit_image_converter.py,sha256=Mw7nV-OzyBveGuZUNFsPPKyq9
|
|
237
237
|
keras_hub/src/models/mit/mit_layers.py,sha256=HUJO5uhJ6jgwANpwbQdPlEVwLRVb3BZQ-Ftjg3B9XvY,9734
|
238
238
|
keras_hub/src/models/mit/mit_presets.py,sha256=ooLrh2OoGZKxnCGnhB6BynYJtVCXH7nDDFhgQRWt36U,4528
|
239
239
|
keras_hub/src/models/mobilenet/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
240
|
-
keras_hub/src/models/mobilenet/mobilenet_backbone.py,sha256=
|
241
|
-
keras_hub/src/models/mobilenet/mobilenet_image_classifier.py,sha256=
|
240
|
+
keras_hub/src/models/mobilenet/mobilenet_backbone.py,sha256=kUEDzML7MzXjUVKDY0BFn-sjGFJbu8IB8DBvG8t2nLA,28880
|
241
|
+
keras_hub/src/models/mobilenet/mobilenet_image_classifier.py,sha256=jgyEn_WO5zLqC9UIuRV564rIY_8bMsmuUBaJA9ag5N0,2310
|
242
|
+
keras_hub/src/models/mobilenet/mobilenet_image_classifier_preprocessor.py,sha256=yqM4wQ3ae7wXTBO0aMuvJx6XqllA7Psqzjvpm2NABXM,573
|
243
|
+
keras_hub/src/models/mobilenet/mobilenet_image_converter.py,sha256=a3Ka0UYYK5wHSOjf2oMHSgofRazTAeUfttklVefq14w,360
|
244
|
+
keras_hub/src/models/mobilenet/mobilenet_presets.py,sha256=6LBAiUF7DWIzfKTfC5QeYYapRK2Ov8M-NVACWdHZfHU,461
|
245
|
+
keras_hub/src/models/mobilenet/util.py,sha256=S7j4UacmVIJ3fU8cymyAoK49eHcpWIKTOyUQiEjcbzQ,721
|
242
246
|
keras_hub/src/models/opt/__init__.py,sha256=6Ybj8etxNaPsVcuZvaeHnKB3As92Px--dbiFAqOCIT0,239
|
243
247
|
keras_hub/src/models/opt/opt_backbone.py,sha256=mK5z_E5mSiIX5s0w4hr4IVQpT7K46W2ajZBmuMjxwaY,5873
|
244
248
|
keras_hub/src/models/opt/opt_causal_lm.py,sha256=UqN6E3vJDMx1Wgc5tpptsdFu6wadRgdHqgOLTAMiazw,10851
|
@@ -384,7 +388,7 @@ keras_hub/src/tokenizers/word_piece_tokenizer_trainer.py,sha256=cylrs02ZrYQ1TuZr
|
|
384
388
|
keras_hub/src/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
385
389
|
keras_hub/src/utils/keras_utils.py,sha256=ZULqIQylAQen-_pNC96htvLaxSJbfAenNoCo3ZSvY5g,1843
|
386
390
|
keras_hub/src/utils/pipeline_model.py,sha256=jgzB6NQPSl0KOu08N-TazfOnXnUJbZjH2EXXhx25Ftg,9084
|
387
|
-
keras_hub/src/utils/preset_utils.py,sha256=
|
391
|
+
keras_hub/src/utils/preset_utils.py,sha256=ZbSEUSacKlr_mgHyB3ChUohgOQN7nMCkE6E2lGxt2HA,31927
|
388
392
|
keras_hub/src/utils/python_utils.py,sha256=N8nWeO3san4YnGkffRXG3Ix7VEIMTKSN21FX5TuL7G8,202
|
389
393
|
keras_hub/src/utils/tensor_utils.py,sha256=lczQWgPVJU09cLtNbo8MErVFNV9ne4gNlrzbNVQazg4,15042
|
390
394
|
keras_hub/src/utils/imagenet/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
@@ -392,9 +396,10 @@ keras_hub/src/utils/imagenet/imagenet_utils.py,sha256=MvIvv1WJo51ZXBxy4S7t_DsN3Z
|
|
392
396
|
keras_hub/src/utils/timm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
393
397
|
keras_hub/src/utils/timm/convert_densenet.py,sha256=fu8HBIQis5o3ib2tyI2qnmYScVrVIQySok8vTfa1qJ8,3393
|
394
398
|
keras_hub/src/utils/timm/convert_efficientnet.py,sha256=SgEIlyyinS04qoQpEgh3WazHq544zNUCCpfmWh3EjSs,17100
|
399
|
+
keras_hub/src/utils/timm/convert_mobilenet.py,sha256=0CHzc2kk36C1aaxt8x1UmfcxPtywQ8Jvfgt_6N8xICw,7215
|
395
400
|
keras_hub/src/utils/timm/convert_resnet.py,sha256=8JFkVtdpy5z9h83LJ97rD-a8FRejXPZvMNksNuStqjM,5834
|
396
401
|
keras_hub/src/utils/timm/convert_vgg.py,sha256=MT5jGnLrzenPpe66Af_Lp1IdR9KGtsSrcmn6_UPqHvQ,2419
|
397
|
-
keras_hub/src/utils/timm/preset_loader.py,sha256=
|
402
|
+
keras_hub/src/utils/timm/preset_loader.py,sha256=yhEV8D99GszCCpOw4I9GdOzAWJB0_gBnlS6ecaaIRGk,3518
|
398
403
|
keras_hub/src/utils/transformers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
399
404
|
keras_hub/src/utils/transformers/convert_albert.py,sha256=VdKclZpCxtDWq3UbUUQZf4fR9DJK_JYZ73B4O_G9skg,7695
|
400
405
|
keras_hub/src/utils/transformers/convert_bart.py,sha256=Tk4h9Md9rwN5wjQbGIVrC7qzDpF8kI8qm-FKL8HlUok,14411
|
@@ -408,7 +413,7 @@ keras_hub/src/utils/transformers/convert_pali_gemma.py,sha256=B1leeDw96Yvu81hYum
|
|
408
413
|
keras_hub/src/utils/transformers/convert_vit.py,sha256=9SUZ9utNJhW_5cj3acMn9cRy47u2eIcDsrhmzj77o9k,5187
|
409
414
|
keras_hub/src/utils/transformers/preset_loader.py,sha256=DgGJXbTSB9Na8FIR-YWWVqQPOFxHwWrGm41EwcS_EFs,3797
|
410
415
|
keras_hub/src/utils/transformers/safetensor_utils.py,sha256=CYUHyA4y-B61r7NDnCsFb4t_UmSwZ1k9L-8gzEd6KRg,3339
|
411
|
-
keras_hub_nightly-0.19.0.
|
412
|
-
keras_hub_nightly-0.19.0.
|
413
|
-
keras_hub_nightly-0.19.0.
|
414
|
-
keras_hub_nightly-0.19.0.
|
416
|
+
keras_hub_nightly-0.19.0.dev202502110348.dist-info/METADATA,sha256=BTyTx2Yps11VrmGpPRYcEHaKLSxHCv5c3NQeiBbZO84,7498
|
417
|
+
keras_hub_nightly-0.19.0.dev202502110348.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
|
418
|
+
keras_hub_nightly-0.19.0.dev202502110348.dist-info/top_level.txt,sha256=N4J6piIWBKa38A4uV-CnIopnOEf8mHAbkNXafXm_CuA,10
|
419
|
+
keras_hub_nightly-0.19.0.dev202502110348.dist-info/RECORD,,
|
File without changes
|