keras-hub-nightly 0.19.0.dev202503060350__py3-none-any.whl → 0.20.0.dev202503140353__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- keras_hub/api/layers/__init__.py +3 -0
- keras_hub/api/models/__init__.py +5 -4
- keras_hub/src/models/cspnet/__init__.py +5 -0
- keras_hub/src/models/cspnet/cspnet_backbone.py +1279 -0
- keras_hub/src/models/cspnet/cspnet_image_classifier.py +12 -0
- keras_hub/src/models/cspnet/cspnet_image_classifier_preprocessor.py +14 -0
- keras_hub/src/models/cspnet/cspnet_image_converter.py +8 -0
- keras_hub/src/models/cspnet/cspnet_presets.py +16 -0
- keras_hub/src/models/gemma/gemma_attention.py +23 -12
- keras_hub/src/models/mobilenet/mobilenet_backbone.py +18 -1
- keras_hub/src/models/mobilenet/mobilenet_image_classifier.py +4 -1
- keras_hub/src/models/mobilenet/mobilenet_presets.py +38 -2
- keras_hub/src/models/siglip/siglip_presets.py +206 -10
- keras_hub/src/models/siglip/siglip_text_encoder.py +7 -1
- keras_hub/src/utils/keras_utils.py +32 -0
- keras_hub/src/utils/preset_utils.py +1 -0
- keras_hub/src/utils/timm/convert_cspnet.py +165 -0
- keras_hub/src/utils/timm/convert_mobilenet.py +120 -44
- keras_hub/src/utils/timm/preset_loader.py +9 -0
- keras_hub/src/version_utils.py +1 -1
- {keras_hub_nightly-0.19.0.dev202503060350.dist-info → keras_hub_nightly-0.20.0.dev202503140353.dist-info}/METADATA +1 -1
- {keras_hub_nightly-0.19.0.dev202503060350.dist-info → keras_hub_nightly-0.20.0.dev202503140353.dist-info}/RECORD +24 -20
- {keras_hub_nightly-0.19.0.dev202503060350.dist-info → keras_hub_nightly-0.20.0.dev202503140353.dist-info}/WHEEL +1 -1
- keras_hub/src/models/csp_darknet/__init__.py +0 -0
- keras_hub/src/models/csp_darknet/csp_darknet_backbone.py +0 -427
- keras_hub/src/models/csp_darknet/csp_darknet_image_classifier.py +0 -10
- {keras_hub_nightly-0.19.0.dev202503060350.dist-info → keras_hub_nightly-0.20.0.dev202503140353.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,165 @@
|
|
1
|
+
import numpy as np
|
2
|
+
|
3
|
+
from keras_hub.src.models.cspnet.cspnet_backbone import CSPNetBackbone
|
4
|
+
|
5
|
+
backbone_cls = CSPNetBackbone
|
6
|
+
|
7
|
+
|
8
|
+
def convert_backbone_config(timm_config):
|
9
|
+
timm_architecture = timm_config["architecture"]
|
10
|
+
|
11
|
+
if timm_architecture == "cspdarknet53":
|
12
|
+
stem_filters = 32
|
13
|
+
stem_kernel_size = 3
|
14
|
+
stem_strides = 1
|
15
|
+
stackwise_depth = [1, 2, 8, 8, 4]
|
16
|
+
stackwise_num_filters = [64, 128, 256, 512, 1024]
|
17
|
+
bottle_ratio = (0.5,) + (1.0,)
|
18
|
+
block_ratio = (1.0,) + (0.5,)
|
19
|
+
expand_ratio = (2.0,) + (1.0,)
|
20
|
+
stage_type = "csp"
|
21
|
+
block_type = "dark_block"
|
22
|
+
down_growth = True
|
23
|
+
stackwise_strides = 2
|
24
|
+
else:
|
25
|
+
raise ValueError(
|
26
|
+
f"Currently, the architecture {timm_architecture} is not supported."
|
27
|
+
)
|
28
|
+
return dict(
|
29
|
+
stem_filters=stem_filters,
|
30
|
+
stem_kernel_size=stem_kernel_size,
|
31
|
+
stem_strides=stem_strides,
|
32
|
+
stackwise_depth=stackwise_depth,
|
33
|
+
stackwise_num_filters=stackwise_num_filters,
|
34
|
+
bottle_ratio=bottle_ratio,
|
35
|
+
block_ratio=block_ratio,
|
36
|
+
expand_ratio=expand_ratio,
|
37
|
+
stage_type=stage_type,
|
38
|
+
block_type=block_type,
|
39
|
+
stackwise_strides=stackwise_strides,
|
40
|
+
down_growth=down_growth,
|
41
|
+
)
|
42
|
+
|
43
|
+
|
44
|
+
def convert_weights(backbone, loader, timm_config):
|
45
|
+
def port_conv2d(hf_weight_prefix, keras_layer_name):
|
46
|
+
loader.port_weight(
|
47
|
+
backbone.get_layer(keras_layer_name).kernel,
|
48
|
+
hf_weight_key=f"{hf_weight_prefix}.weight",
|
49
|
+
hook_fn=lambda x, _: np.transpose(x, (2, 3, 1, 0)),
|
50
|
+
)
|
51
|
+
|
52
|
+
def port_batch_normalization(hf_weight_prefix, keras_layer_name):
|
53
|
+
loader.port_weight(
|
54
|
+
backbone.get_layer(keras_layer_name).gamma,
|
55
|
+
hf_weight_key=f"{hf_weight_prefix}.weight",
|
56
|
+
)
|
57
|
+
loader.port_weight(
|
58
|
+
backbone.get_layer(keras_layer_name).beta,
|
59
|
+
hf_weight_key=f"{hf_weight_prefix}.bias",
|
60
|
+
)
|
61
|
+
loader.port_weight(
|
62
|
+
backbone.get_layer(keras_layer_name).moving_mean,
|
63
|
+
hf_weight_key=f"{hf_weight_prefix}.running_mean",
|
64
|
+
)
|
65
|
+
loader.port_weight(
|
66
|
+
backbone.get_layer(keras_layer_name).moving_variance,
|
67
|
+
hf_weight_key=f"{hf_weight_prefix}.running_var",
|
68
|
+
)
|
69
|
+
|
70
|
+
# Stem
|
71
|
+
|
72
|
+
stem_filter = backbone.stem_filters
|
73
|
+
if not isinstance(stem_filter, (tuple, list)):
|
74
|
+
stem_filter = [stem_filter]
|
75
|
+
|
76
|
+
for i in range(len(stem_filter)):
|
77
|
+
port_conv2d(f"stem.conv{i + 1}.conv", f"csp_stem_conv_{i}")
|
78
|
+
port_batch_normalization(f"stem.conv{i + 1}.bn", f"csp_stem_bn_{i}")
|
79
|
+
|
80
|
+
# Stages
|
81
|
+
stackwise_depth = backbone.stackwise_depth
|
82
|
+
stage_type = backbone.stage_type
|
83
|
+
block_type = backbone.block_type
|
84
|
+
|
85
|
+
for idx, block in enumerate(stackwise_depth):
|
86
|
+
port_conv2d(
|
87
|
+
f"stages.{idx}.conv_down.conv",
|
88
|
+
f"stage_{idx}_{stage_type}_conv_down_1",
|
89
|
+
)
|
90
|
+
port_batch_normalization(
|
91
|
+
f"stages.{idx}.conv_down.bn", f"stage_{idx}_{stage_type}_bn_1"
|
92
|
+
)
|
93
|
+
port_conv2d(
|
94
|
+
f"stages.{idx}.conv_exp.conv", f"stage_{idx}_{stage_type}_conv_exp"
|
95
|
+
)
|
96
|
+
port_batch_normalization(
|
97
|
+
f"stages.{idx}.conv_exp.bn", f"stage_{idx}_{stage_type}_bn_2"
|
98
|
+
)
|
99
|
+
|
100
|
+
for i in range(block):
|
101
|
+
port_conv2d(
|
102
|
+
f"stages.{idx}.blocks.{i}.conv1.conv",
|
103
|
+
f"stage_{idx}_block_{i}_{block_type}_conv_1",
|
104
|
+
)
|
105
|
+
port_batch_normalization(
|
106
|
+
f"stages.{idx}.blocks.{i}.conv1.bn",
|
107
|
+
f"stage_{idx}_block_{i}_{block_type}_bn_1",
|
108
|
+
)
|
109
|
+
port_conv2d(
|
110
|
+
f"stages.{idx}.blocks.{i}.conv2.conv",
|
111
|
+
f"stage_{idx}_block_{i}_{block_type}_conv_2",
|
112
|
+
)
|
113
|
+
port_batch_normalization(
|
114
|
+
f"stages.{idx}.blocks.{i}.conv2.bn",
|
115
|
+
f"stage_{idx}_block_{i}_{block_type}_bn_2",
|
116
|
+
)
|
117
|
+
if block_type == "bottleneck_block":
|
118
|
+
port_conv2d(
|
119
|
+
f"stages.{idx}.blocks.{i}.conv3.conv",
|
120
|
+
f"stage_{idx}_block_{i}_{block_type}_conv_3",
|
121
|
+
)
|
122
|
+
port_batch_normalization(
|
123
|
+
f"stages.{idx}.blocks.{i}.conv3.bn",
|
124
|
+
f"stage_{idx}_block_{i}_{block_type}_bn_3",
|
125
|
+
)
|
126
|
+
|
127
|
+
if stage_type == "csp":
|
128
|
+
port_conv2d(
|
129
|
+
f"stages.{idx}.conv_transition_b.conv",
|
130
|
+
f"stage_{idx}_{stage_type}_conv_transition_b",
|
131
|
+
)
|
132
|
+
port_batch_normalization(
|
133
|
+
f"stages.{idx}.conv_transition_b.bn",
|
134
|
+
f"stage_{idx}_{stage_type}_transition_b_bn",
|
135
|
+
)
|
136
|
+
port_conv2d(
|
137
|
+
f"stages.{idx}.conv_transition.conv",
|
138
|
+
f"stage_{idx}_{stage_type}_conv_transition",
|
139
|
+
)
|
140
|
+
port_batch_normalization(
|
141
|
+
f"stages.{idx}.conv_transition.bn",
|
142
|
+
f"stage_{idx}_{stage_type}_transition_bn",
|
143
|
+
)
|
144
|
+
|
145
|
+
else:
|
146
|
+
port_conv2d(
|
147
|
+
f"stages.{idx}.conv_transition.conv",
|
148
|
+
f"stage_{idx}_{stage_type}_conv_transition",
|
149
|
+
)
|
150
|
+
port_batch_normalization(
|
151
|
+
f"stages.{idx}.conv_transition.bn",
|
152
|
+
f"stage_{idx}_{stage_type}_transition_bn",
|
153
|
+
)
|
154
|
+
|
155
|
+
|
156
|
+
def convert_head(task, loader, timm_config):
|
157
|
+
loader.port_weight(
|
158
|
+
task.output_dense.kernel,
|
159
|
+
hf_weight_key="head.fc.weight",
|
160
|
+
hook_fn=lambda x, _: np.transpose(np.squeeze(x)),
|
161
|
+
)
|
162
|
+
loader.port_weight(
|
163
|
+
task.output_dense.bias,
|
164
|
+
hf_weight_key="head.fc.bias",
|
165
|
+
)
|
@@ -8,64 +8,135 @@ backbone_cls = MobileNetBackbone
|
|
8
8
|
def convert_backbone_config(timm_config):
|
9
9
|
timm_architecture = timm_config["architecture"]
|
10
10
|
|
11
|
-
|
12
|
-
|
13
|
-
|
14
|
-
else:
|
15
|
-
input_activation = "relu6"
|
16
|
-
output_activation = "relu6"
|
17
|
-
|
18
|
-
if timm_architecture == "mobilenetv3_small_050":
|
19
|
-
stackwise_num_blocks = [2, 3, 2, 3]
|
20
|
-
stackwise_expansion = [
|
11
|
+
kwargs = {
|
12
|
+
"stackwise_num_blocks": [2, 3, 2, 3],
|
13
|
+
"stackwise_expansion": [
|
21
14
|
[40, 56],
|
22
15
|
[64, 144, 144],
|
23
16
|
[72, 72],
|
24
17
|
[144, 288, 288],
|
25
|
-
]
|
26
|
-
stackwise_num_filters
|
27
|
-
|
28
|
-
|
29
|
-
|
18
|
+
],
|
19
|
+
"stackwise_num_filters": [
|
20
|
+
[16, 16],
|
21
|
+
[24, 24, 24],
|
22
|
+
[24, 24],
|
23
|
+
[48, 48, 48],
|
24
|
+
],
|
25
|
+
"stackwise_kernel_size": [[3, 3], [5, 5, 5], [5, 5], [5, 5, 5]],
|
26
|
+
"stackwise_num_strides": [[2, 1], [2, 1, 1], [1, 1], [2, 1, 1]],
|
27
|
+
"stackwise_se_ratio": [
|
30
28
|
[None, None],
|
31
29
|
[0.25, 0.25, 0.25],
|
32
30
|
[0.25, 0.25],
|
33
31
|
[0.25, 0.25, 0.25],
|
34
|
-
]
|
35
|
-
stackwise_activation
|
32
|
+
],
|
33
|
+
"stackwise_activation": [
|
36
34
|
["relu", "relu"],
|
37
35
|
["hard_swish", "hard_swish", "hard_swish"],
|
38
36
|
["hard_swish", "hard_swish"],
|
39
37
|
["hard_swish", "hard_swish", "hard_swish"],
|
40
|
-
]
|
41
|
-
stackwise_padding
|
42
|
-
output_num_filters
|
43
|
-
input_num_filters
|
44
|
-
depthwise_filters
|
45
|
-
|
46
|
-
|
38
|
+
],
|
39
|
+
"stackwise_padding": [[1, 1], [2, 2, 2], [2, 2], [2, 2, 2]],
|
40
|
+
"output_num_filters": 1024,
|
41
|
+
"input_num_filters": 16,
|
42
|
+
"depthwise_filters": 8,
|
43
|
+
"depthwise_stride": 2,
|
44
|
+
"depthwise_residual": False,
|
45
|
+
"squeeze_and_excite": 0.5,
|
46
|
+
"last_layer_filter": 288,
|
47
|
+
"input_activation": "relu6",
|
48
|
+
"output_activation": "relu6",
|
49
|
+
}
|
50
|
+
|
51
|
+
if "mobilenetv3_" in timm_architecture:
|
52
|
+
kwargs["input_activation"] = "hard_swish"
|
53
|
+
kwargs["output_activation"] = "hard_swish"
|
54
|
+
|
55
|
+
if timm_architecture == "mobilenetv3_small_050":
|
56
|
+
pass
|
57
|
+
elif timm_architecture == "mobilenetv3_small_100":
|
58
|
+
modified_kwargs = {
|
59
|
+
"stackwise_expansion": [
|
60
|
+
[72, 88],
|
61
|
+
[96, 240, 240],
|
62
|
+
[120, 144],
|
63
|
+
[288, 576, 576],
|
64
|
+
],
|
65
|
+
"stackwise_num_filters": [
|
66
|
+
[24, 24],
|
67
|
+
[40, 40, 40],
|
68
|
+
[48, 48],
|
69
|
+
[96, 96, 96],
|
70
|
+
],
|
71
|
+
"depthwise_filters": 16,
|
72
|
+
"last_layer_filter": 576,
|
73
|
+
}
|
74
|
+
kwargs.update(modified_kwargs)
|
75
|
+
elif timm_architecture.startswith("mobilenetv3_large_100"):
|
76
|
+
modified_kwargs = {
|
77
|
+
"stackwise_num_blocks": [2, 3, 4, 2, 3],
|
78
|
+
"stackwise_expansion": [
|
79
|
+
[64, 72],
|
80
|
+
[72, 120, 120],
|
81
|
+
[240, 200, 184, 184],
|
82
|
+
[480, 672],
|
83
|
+
[672, 960, 960],
|
84
|
+
],
|
85
|
+
"stackwise_num_filters": [
|
86
|
+
[24, 24],
|
87
|
+
[40, 40, 40],
|
88
|
+
[80, 80, 80, 80],
|
89
|
+
[112, 112],
|
90
|
+
[160, 160, 160],
|
91
|
+
],
|
92
|
+
"stackwise_kernel_size": [
|
93
|
+
[3, 3],
|
94
|
+
[5, 5, 5],
|
95
|
+
[3, 3, 3, 3],
|
96
|
+
[3, 3],
|
97
|
+
[5, 5, 5],
|
98
|
+
],
|
99
|
+
"stackwise_num_strides": [
|
100
|
+
[2, 1],
|
101
|
+
[2, 1, 1],
|
102
|
+
[2, 1, 1, 1],
|
103
|
+
[1, 1],
|
104
|
+
[2, 1, 1],
|
105
|
+
],
|
106
|
+
"stackwise_se_ratio": [
|
107
|
+
[None, None],
|
108
|
+
[0.25, 0.25, 0.25],
|
109
|
+
[None, None, None, None],
|
110
|
+
[0.25, 0.25],
|
111
|
+
[0.25, 0.25, 0.25],
|
112
|
+
],
|
113
|
+
"stackwise_activation": [
|
114
|
+
["relu", "relu"],
|
115
|
+
["relu", "relu", "relu"],
|
116
|
+
["hard_swish", "hard_swish", "hard_swish", "hard_swish"],
|
117
|
+
["hard_swish", "hard_swish"],
|
118
|
+
["hard_swish", "hard_swish", "hard_swish"],
|
119
|
+
],
|
120
|
+
"stackwise_padding": [
|
121
|
+
[1, 1],
|
122
|
+
[2, 2, 2],
|
123
|
+
[1, 1, 1, 1],
|
124
|
+
[1, 1],
|
125
|
+
[2, 2, 2],
|
126
|
+
],
|
127
|
+
"depthwise_filters": 16,
|
128
|
+
"depthwise_stride": 1,
|
129
|
+
"depthwise_residual": True,
|
130
|
+
"squeeze_and_excite": None,
|
131
|
+
"last_layer_filter": 960,
|
132
|
+
}
|
133
|
+
kwargs.update(modified_kwargs)
|
47
134
|
else:
|
48
135
|
raise ValueError(
|
49
136
|
f"Currently, the architecture {timm_architecture} is not supported."
|
50
137
|
)
|
51
138
|
|
52
|
-
return
|
53
|
-
input_num_filters=input_num_filters,
|
54
|
-
input_activation=input_activation,
|
55
|
-
depthwise_filters=depthwise_filters,
|
56
|
-
squeeze_and_excite=squeeze_and_excite,
|
57
|
-
stackwise_num_blocks=stackwise_num_blocks,
|
58
|
-
stackwise_expansion=stackwise_expansion,
|
59
|
-
stackwise_num_filters=stackwise_num_filters,
|
60
|
-
stackwise_kernel_size=stackwise_kernel_size,
|
61
|
-
stackwise_num_strides=stackwise_num_strides,
|
62
|
-
stackwise_se_ratio=stackwise_se_ratio,
|
63
|
-
stackwise_activation=stackwise_activation,
|
64
|
-
stackwise_padding=stackwise_padding,
|
65
|
-
output_num_filters=output_num_filters,
|
66
|
-
output_activation=output_activation,
|
67
|
-
last_layer_filter=last_layer_filter,
|
68
|
-
)
|
139
|
+
return kwargs
|
69
140
|
|
70
141
|
|
71
142
|
def convert_weights(backbone, loader, timm_config):
|
@@ -120,9 +191,14 @@ def convert_weights(backbone, loader, timm_config):
|
|
120
191
|
port_conv2d(stem_block.conv1, f"{hf_name}.conv_dw")
|
121
192
|
port_batch_normalization(stem_block.batch_normalization1, f"{hf_name}.bn1")
|
122
193
|
|
123
|
-
|
124
|
-
|
125
|
-
|
194
|
+
if stem_block.squeeze_excite_ratio:
|
195
|
+
stem_se_block = stem_block.se_layer
|
196
|
+
port_conv2d(
|
197
|
+
stem_se_block.conv_reduce, f"{hf_name}.se.conv_reduce", True
|
198
|
+
)
|
199
|
+
port_conv2d(
|
200
|
+
stem_se_block.conv_expand, f"{hf_name}.se.conv_expand", True
|
201
|
+
)
|
126
202
|
|
127
203
|
port_conv2d(stem_block.conv2, f"{hf_name}.conv_pw")
|
128
204
|
port_batch_normalization(stem_block.batch_normalization2, f"{hf_name}.bn2")
|
@@ -3,6 +3,7 @@
|
|
3
3
|
from keras_hub.src.models.image_classifier import ImageClassifier
|
4
4
|
from keras_hub.src.utils.preset_utils import PresetLoader
|
5
5
|
from keras_hub.src.utils.preset_utils import jax_memory_cleanup
|
6
|
+
from keras_hub.src.utils.timm import convert_cspnet
|
6
7
|
from keras_hub.src.utils.timm import convert_densenet
|
7
8
|
from keras_hub.src.utils.timm import convert_efficientnet
|
8
9
|
from keras_hub.src.utils.timm import convert_mobilenet
|
@@ -17,6 +18,8 @@ class TimmPresetLoader(PresetLoader):
|
|
17
18
|
architecture = self.config["architecture"]
|
18
19
|
if "resnet" in architecture:
|
19
20
|
self.converter = convert_resnet
|
21
|
+
elif "csp" in architecture:
|
22
|
+
self.converter = convert_cspnet
|
20
23
|
elif "densenet" in architecture:
|
21
24
|
self.converter = convert_densenet
|
22
25
|
elif "mobilenet" in architecture:
|
@@ -51,6 +54,12 @@ class TimmPresetLoader(PresetLoader):
|
|
51
54
|
)
|
52
55
|
# Support loading the classification head for classifier models.
|
53
56
|
kwargs["num_classes"] = self.config["num_classes"]
|
57
|
+
if (
|
58
|
+
"num_features" in self.config
|
59
|
+
and "mobilenet" in self.config["architecture"]
|
60
|
+
):
|
61
|
+
kwargs["num_features"] = self.config["num_features"]
|
62
|
+
|
54
63
|
task = super().load_task(cls, load_weights, load_task_weights, **kwargs)
|
55
64
|
if load_task_weights:
|
56
65
|
with SafetensorLoader(self.preset, prefix="") as loader:
|
keras_hub/src/version_utils.py
CHANGED
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.2
|
2
2
|
Name: keras-hub-nightly
|
3
|
-
Version: 0.
|
3
|
+
Version: 0.20.0.dev202503140353
|
4
4
|
Summary: Industry-strength Natural Language Processing extensions for Keras.
|
5
5
|
Home-page: https://github.com/keras-team/keras-hub
|
6
6
|
Author: Keras team
|
@@ -1,14 +1,14 @@
|
|
1
1
|
keras_hub/__init__.py,sha256=QGdXyHgYt6cMUAP1ebxwc6oR86dE0dkMxNy2eOCQtFo,855
|
2
2
|
keras_hub/api/__init__.py,sha256=EzR6D-XWsm_gDrX5LDwKEmrah_gu3ffpj8GKBudE0yI,485
|
3
|
-
keras_hub/api/layers/__init__.py,sha256
|
3
|
+
keras_hub/api/layers/__init__.py,sha256=-yHyqsjWBhmFv9RSS2cMyPcieU1RkNzcNsq9IDXSVFE,3626
|
4
4
|
keras_hub/api/metrics/__init__.py,sha256=So8Ec-lOcTzn_UUMmAdzDm8RKkPu2dbRUm2px8gpUEI,381
|
5
|
-
keras_hub/api/models/__init__.py,sha256=
|
5
|
+
keras_hub/api/models/__init__.py,sha256=vtOXBt8YNXvaDrfpKRpJ6MXjU5FzMv2a44Db_P8cGUg,17606
|
6
6
|
keras_hub/api/samplers/__init__.py,sha256=n-_SEXxr2LNUzK2FqVFN7alsrkx1P_HOVTeLZKeGCdE,730
|
7
7
|
keras_hub/api/tokenizers/__init__.py,sha256=lhvIqP8xqdkjmKSEBujHNxh5Tk5A3T0I7AUuMmKzx00,2597
|
8
8
|
keras_hub/api/utils/__init__.py,sha256=Gp1E6gG-RtKQS3PBEQEOz9PQvXkXaJ0ySGMqZ7myN7A,215
|
9
9
|
keras_hub/src/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
10
10
|
keras_hub/src/api_export.py,sha256=9pQZK27JObxWZ96QPLBp1OBsjWigh1iuV6RglPGMRk0,1499
|
11
|
-
keras_hub/src/version_utils.py,sha256=
|
11
|
+
keras_hub/src/version_utils.py,sha256=N47T0B_GT_1nZdiGR84whto889H0T1gB3_bDPndh2rQ,222
|
12
12
|
keras_hub/src/layers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
13
13
|
keras_hub/src/layers/modeling/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
14
14
|
keras_hub/src/layers/modeling/alibi_bias.py,sha256=1XBTHI52L_iJDhN_w5ydu_iMhCuTgQAxEPwcLA6BPuk,4411
|
@@ -111,9 +111,12 @@ keras_hub/src/models/clip/clip_text_encoder.py,sha256=BCIE24eKZJ3yc4T0sjD6-Msjr1
|
|
111
111
|
keras_hub/src/models/clip/clip_tokenizer.py,sha256=6gIm_LWRbCeBQUI9M2gA8-OXb4tXGygixkbcL6joV1c,7444
|
112
112
|
keras_hub/src/models/clip/clip_vision_embedding.py,sha256=6_qC7T1dqKd-39EreGmHZj-YfjOLEDDKjWnEKcKIyuY,3667
|
113
113
|
keras_hub/src/models/clip/clip_vision_encoder.py,sha256=q62MXySZN38uCsjqq8cttfBxD7P5abaKQV2i8_u4N6E,6385
|
114
|
-
keras_hub/src/models/
|
115
|
-
keras_hub/src/models/
|
116
|
-
keras_hub/src/models/
|
114
|
+
keras_hub/src/models/cspnet/__init__.py,sha256=TOpvk2cfOVv1bPA1BOGZj0mhmhc6E98zZmW9e0PIvhk,257
|
115
|
+
keras_hub/src/models/cspnet/cspnet_backbone.py,sha256=xCeu8BpQSpf-EgCrQehQDg4jNKRAWm0h8paWBfN2DGE,41381
|
116
|
+
keras_hub/src/models/cspnet/cspnet_image_classifier.py,sha256=JqfBHIBTFxaLOyAWx6TdXs0aAOMbcCx1oo47RoQnytc,510
|
117
|
+
keras_hub/src/models/cspnet/cspnet_image_classifier_preprocessor.py,sha256=ACRnOhjslk2ZZhpPfJioW4um4RLYa-Suk59z9wa5vfo,543
|
118
|
+
keras_hub/src/models/cspnet/cspnet_image_converter.py,sha256=f-ICTY2T-RlCykU6qOHDxg0fY7ECfZ_xpSJzIVmbvpc,342
|
119
|
+
keras_hub/src/models/cspnet/cspnet_presets.py,sha256=fWzPz3eZuhFNxxPn9MJHabcXiyJA2PRRVlzNmoFBwWg,533
|
117
120
|
keras_hub/src/models/deberta_v3/__init__.py,sha256=6E-QtAD1uvTBobrn5bUoyB1qtaCJU-t73TtbAEH6i9g,288
|
118
121
|
keras_hub/src/models/deberta_v3/deberta_v3_backbone.py,sha256=oXdV7naTiMowuU3GsXEUo5K0GXiKbPKxdo27o5fXWjc,7258
|
119
122
|
keras_hub/src/models/deberta_v3/deberta_v3_masked_lm.py,sha256=ADBktf1DdiP9T6LCaMhdFiZ_mUbBRKMekY5mGwAeJIo,4186
|
@@ -183,7 +186,7 @@ keras_hub/src/models/flux/flux_presets.py,sha256=z7C_FbI1_F5YETXuWpc7Yh_0w-5N0eB
|
|
183
186
|
keras_hub/src/models/flux/flux_text_to_image.py,sha256=Rf5dD2EhG0bE8Gyg9sqaA8YEexS1kdraofIkxiZDjvc,4166
|
184
187
|
keras_hub/src/models/flux/flux_text_to_image_preprocessor.py,sha256=Fs9jr97QtmRUbRRz1kITpkuhDM2GoV3n0XSFC-qQA14,2252
|
185
188
|
keras_hub/src/models/gemma/__init__.py,sha256=rVzOJMJ39bgVlT8UdC0t8PlN2c237GKTBmfHIsbPuOQ,251
|
186
|
-
keras_hub/src/models/gemma/gemma_attention.py,sha256=
|
189
|
+
keras_hub/src/models/gemma/gemma_attention.py,sha256=XShBTunOWQOOE4Aapy3HdV9uIWuMcdNdYS1k1P3ia60,9708
|
187
190
|
keras_hub/src/models/gemma/gemma_backbone.py,sha256=GzAUSArw_pN9dtWQzTVhWDbW-XyWt4GyMcFLn9hwmh0,13391
|
188
191
|
keras_hub/src/models/gemma/gemma_causal_lm.py,sha256=3OXaIXlrKqMIuUnBk-bUz-0SYFL-XkkQTWm8qRY2YII,16770
|
189
192
|
keras_hub/src/models/gemma/gemma_causal_lm_preprocessor.py,sha256=bpKkEurWIfa6Kp9s4pz84-sBDSA6ZFNHP8nXG1fFQrg,2912
|
@@ -237,11 +240,11 @@ keras_hub/src/models/mit/mit_image_converter.py,sha256=Mw7nV-OzyBveGuZUNFsPPKyq9
|
|
237
240
|
keras_hub/src/models/mit/mit_layers.py,sha256=HUJO5uhJ6jgwANpwbQdPlEVwLRVb3BZQ-Ftjg3B9XvY,9734
|
238
241
|
keras_hub/src/models/mit/mit_presets.py,sha256=ooLrh2OoGZKxnCGnhB6BynYJtVCXH7nDDFhgQRWt36U,4528
|
239
242
|
keras_hub/src/models/mobilenet/__init__.py,sha256=hxkNGGj_iAMu62iooUDEPA818sNOIgjG7pXMLEMOsAE,275
|
240
|
-
keras_hub/src/models/mobilenet/mobilenet_backbone.py,sha256=
|
241
|
-
keras_hub/src/models/mobilenet/mobilenet_image_classifier.py,sha256=
|
243
|
+
keras_hub/src/models/mobilenet/mobilenet_backbone.py,sha256=aZBSFeLUObYYoi3od9DI1KfgPCqh5GHTcAI8Y2ZHShA,29536
|
244
|
+
keras_hub/src/models/mobilenet/mobilenet_image_classifier.py,sha256=rgPVJeSRqyp3-Fgf5ERbg_97c4cSawRmAtoJpdBN8WA,2437
|
242
245
|
keras_hub/src/models/mobilenet/mobilenet_image_classifier_preprocessor.py,sha256=yqM4wQ3ae7wXTBO0aMuvJx6XqllA7Psqzjvpm2NABXM,573
|
243
246
|
keras_hub/src/models/mobilenet/mobilenet_image_converter.py,sha256=a3Ka0UYYK5wHSOjf2oMHSgofRazTAeUfttklVefq14w,360
|
244
|
-
keras_hub/src/models/mobilenet/mobilenet_presets.py,sha256
|
247
|
+
keras_hub/src/models/mobilenet/mobilenet_presets.py,sha256=--nhaM6LmaiCtQlZPDwoQTHW7ciU0igzS4f9ssdD9Lo,1903
|
245
248
|
keras_hub/src/models/mobilenet/util.py,sha256=S7j4UacmVIJ3fU8cymyAoK49eHcpWIKTOyUQiEjcbzQ,721
|
246
249
|
keras_hub/src/models/opt/__init__.py,sha256=6Ybj8etxNaPsVcuZvaeHnKB3As92Px--dbiFAqOCIT0,239
|
247
250
|
keras_hub/src/models/opt/opt_backbone.py,sha256=mK5z_E5mSiIX5s0w4hr4IVQpT7K46W2ajZBmuMjxwaY,5873
|
@@ -313,8 +316,8 @@ keras_hub/src/models/siglip/siglip_image_converter.py,sha256=yjYc0XOyL37WLlr-X6V
|
|
313
316
|
keras_hub/src/models/siglip/siglip_layers.py,sha256=c20n6v3cFsI-Im9GBVTknhj_IpX79I4a-fajBKRMzQA,19893
|
314
317
|
keras_hub/src/models/siglip/siglip_loss.py,sha256=n6zmOeL0o7Nwb5iaoEZfrxiAsQoqZ9yLIlaCJsAfTg4,1442
|
315
318
|
keras_hub/src/models/siglip/siglip_preprocessor.py,sha256=r1Ej7hVwr5BudFYTHkjW5yc3lk4OYZD1s3t32lKkuec,5660
|
316
|
-
keras_hub/src/models/siglip/siglip_presets.py,sha256=
|
317
|
-
keras_hub/src/models/siglip/siglip_text_encoder.py,sha256=
|
319
|
+
keras_hub/src/models/siglip/siglip_presets.py,sha256=EOx72XhwD_kflqn1kgwosRc3a6QqDE2ku5Pkxg0kfMI,13213
|
320
|
+
keras_hub/src/models/siglip/siglip_text_encoder.py,sha256=xOVvzyQHLX9ne30y4ussar99gNMXPXHYKlkbCX_On2Y,5380
|
318
321
|
keras_hub/src/models/siglip/siglip_tokenizer.py,sha256=j_67JbIHJDRk-CbiemG2dgAO6lp3_0_JdnfroZ90G18,2579
|
319
322
|
keras_hub/src/models/siglip/siglip_vision_encoder.py,sha256=CaNaFq5thBC3TUXXOf2qknk5vWsauM20ZoaDPYRnXcs,5927
|
320
323
|
keras_hub/src/models/stable_diffusion_3/__init__.py,sha256=ZKYQuaRObyhKq8GVAHmoRvlXp6FpU8ChvutVCHyXKuc,343
|
@@ -396,20 +399,21 @@ keras_hub/src/tokenizers/unicode_codepoint_tokenizer.py,sha256=hRv_XxoPIPDpHfO0Z
|
|
396
399
|
keras_hub/src/tokenizers/word_piece_tokenizer.py,sha256=vP6AZgbzsRiuPCt3W_n94nsF7XiERnagWcH_rqJHtVU,19943
|
397
400
|
keras_hub/src/tokenizers/word_piece_tokenizer_trainer.py,sha256=cylrs02ZrYQ1TuZr9oyS3NrVbDwGctA3VXbIh1pFJMQ,6743
|
398
401
|
keras_hub/src/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
399
|
-
keras_hub/src/utils/keras_utils.py,sha256=
|
402
|
+
keras_hub/src/utils/keras_utils.py,sha256=IB_eIrln3N5sVyCapwv1jzLEmuBv8vBRwSVd3toSgyI,3097
|
400
403
|
keras_hub/src/utils/pipeline_model.py,sha256=jgzB6NQPSl0KOu08N-TazfOnXnUJbZjH2EXXhx25Ftg,9084
|
401
|
-
keras_hub/src/utils/preset_utils.py,sha256=
|
404
|
+
keras_hub/src/utils/preset_utils.py,sha256=5xEm6Uz1vfQkBqyENt97qaxWoq-P7mlPC0LIpXqDM70,31928
|
402
405
|
keras_hub/src/utils/python_utils.py,sha256=N8nWeO3san4YnGkffRXG3Ix7VEIMTKSN21FX5TuL7G8,202
|
403
406
|
keras_hub/src/utils/tensor_utils.py,sha256=lczQWgPVJU09cLtNbo8MErVFNV9ne4gNlrzbNVQazg4,15042
|
404
407
|
keras_hub/src/utils/imagenet/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
405
408
|
keras_hub/src/utils/imagenet/imagenet_utils.py,sha256=MvIvv1WJo51ZXBxy4S7t_DsN3ZMtJWlC4cmRvKM2kIA,39304
|
406
409
|
keras_hub/src/utils/timm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
410
|
+
keras_hub/src/utils/timm/convert_cspnet.py,sha256=O5HCdeKcSFWOoFr8_wIUQb4Noc0tBEo5Aogk2d6SEes,5676
|
407
411
|
keras_hub/src/utils/timm/convert_densenet.py,sha256=fu8HBIQis5o3ib2tyI2qnmYScVrVIQySok8vTfa1qJ8,3393
|
408
412
|
keras_hub/src/utils/timm/convert_efficientnet.py,sha256=SgEIlyyinS04qoQpEgh3WazHq544zNUCCpfmWh3EjSs,17100
|
409
|
-
keras_hub/src/utils/timm/convert_mobilenet.py,sha256=
|
413
|
+
keras_hub/src/utils/timm/convert_mobilenet.py,sha256=XTqHOK4nJwigKefsw7ktWJtOgRpEVMO9MtRhuP5qP_k,9219
|
410
414
|
keras_hub/src/utils/timm/convert_resnet.py,sha256=8JFkVtdpy5z9h83LJ97rD-a8FRejXPZvMNksNuStqjM,5834
|
411
415
|
keras_hub/src/utils/timm/convert_vgg.py,sha256=MT5jGnLrzenPpe66Af_Lp1IdR9KGtsSrcmn6_UPqHvQ,2419
|
412
|
-
keras_hub/src/utils/timm/preset_loader.py,sha256=
|
416
|
+
keras_hub/src/utils/timm/preset_loader.py,sha256=j2HYi61Zbt0CGd33evFJ8j2fraXl0Zardf4qqAb82K0,3841
|
413
417
|
keras_hub/src/utils/transformers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
414
418
|
keras_hub/src/utils/transformers/convert_albert.py,sha256=VdKclZpCxtDWq3UbUUQZf4fR9DJK_JYZ73B4O_G9skg,7695
|
415
419
|
keras_hub/src/utils/transformers/convert_bart.py,sha256=Tk4h9Md9rwN5wjQbGIVrC7qzDpF8kI8qm-FKL8HlUok,14411
|
@@ -423,7 +427,7 @@ keras_hub/src/utils/transformers/convert_pali_gemma.py,sha256=B1leeDw96Yvu81hYum
|
|
423
427
|
keras_hub/src/utils/transformers/convert_vit.py,sha256=9SUZ9utNJhW_5cj3acMn9cRy47u2eIcDsrhmzj77o9k,5187
|
424
428
|
keras_hub/src/utils/transformers/preset_loader.py,sha256=DgGJXbTSB9Na8FIR-YWWVqQPOFxHwWrGm41EwcS_EFs,3797
|
425
429
|
keras_hub/src/utils/transformers/safetensor_utils.py,sha256=CYUHyA4y-B61r7NDnCsFb4t_UmSwZ1k9L-8gzEd6KRg,3339
|
426
|
-
keras_hub_nightly-0.
|
427
|
-
keras_hub_nightly-0.
|
428
|
-
keras_hub_nightly-0.
|
429
|
-
keras_hub_nightly-0.
|
430
|
+
keras_hub_nightly-0.20.0.dev202503140353.dist-info/METADATA,sha256=o0qWoNN3PIokUVVwG263_H5KoOW_xPP_EV6SKnp5LEY,7715
|
431
|
+
keras_hub_nightly-0.20.0.dev202503140353.dist-info/WHEEL,sha256=52BFRY2Up02UkjOa29eZOS2VxUrpPORXg1pkohGGUS8,91
|
432
|
+
keras_hub_nightly-0.20.0.dev202503140353.dist-info/top_level.txt,sha256=N4J6piIWBKa38A4uV-CnIopnOEf8mHAbkNXafXm_CuA,10
|
433
|
+
keras_hub_nightly-0.20.0.dev202503140353.dist-info/RECORD,,
|
File without changes
|