keras-hub-nightly 0.21.0.dev202505280410__py3-none-any.whl → 0.22.0.dev202505300409__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -11,7 +11,7 @@ backbone_presets = {
11
11
  "params": 85798656,
12
12
  "path": "vit",
13
13
  },
14
- "kaggle_handle": "kaggle://keras/vit/keras/vit_base_patch16_224_imagenet/2",
14
+ "kaggle_handle": "kaggle://keras/vit/keras/vit_base_patch16_224_imagenet/3",
15
15
  },
16
16
  "vit_base_patch16_384_imagenet": {
17
17
  "metadata": {
@@ -22,7 +22,7 @@ backbone_presets = {
22
22
  "params": 86090496,
23
23
  "path": "vit",
24
24
  },
25
- "kaggle_handle": "kaggle://keras/vit/keras/vit_base_patch16_384_imagenet/2",
25
+ "kaggle_handle": "kaggle://keras/vit/keras/vit_base_patch16_384_imagenet/3",
26
26
  },
27
27
  "vit_large_patch16_224_imagenet": {
28
28
  "metadata": {
@@ -33,7 +33,7 @@ backbone_presets = {
33
33
  "params": 303301632,
34
34
  "path": "vit",
35
35
  },
36
- "kaggle_handle": "kaggle://keras/vit/keras/vit_large_patch16_224_imagenet/2",
36
+ "kaggle_handle": "kaggle://keras/vit/keras/vit_large_patch16_224_imagenet/3",
37
37
  },
38
38
  "vit_large_patch16_384_imagenet": {
39
39
  "metadata": {
@@ -44,7 +44,7 @@ backbone_presets = {
44
44
  "params": 303690752,
45
45
  "path": "vit",
46
46
  },
47
- "kaggle_handle": "kaggle://keras/vit/keras/vit_large_patch16_384_imagenet/2",
47
+ "kaggle_handle": "kaggle://keras/vit/keras/vit_large_patch16_384_imagenet/3",
48
48
  },
49
49
  "vit_base_patch32_384_imagenet": {
50
50
  "metadata": {
@@ -55,7 +55,7 @@ backbone_presets = {
55
55
  "params": 87528192,
56
56
  "path": "vit",
57
57
  },
58
- "kaggle_handle": "kaggle://keras/vit/keras/vit_base_patch32_384_imagenet/1",
58
+ "kaggle_handle": "kaggle://keras/vit/keras/vit_base_patch32_384_imagenet/2",
59
59
  },
60
60
  "vit_large_patch32_384_imagenet": {
61
61
  "metadata": {
@@ -66,7 +66,7 @@ backbone_presets = {
66
66
  "params": 305607680,
67
67
  "path": "vit",
68
68
  },
69
- "kaggle_handle": "kaggle://keras/vit/keras/vit_large_patch32_384_imagenet/1",
69
+ "kaggle_handle": "kaggle://keras/vit/keras/vit_large_patch32_384_imagenet/2",
70
70
  },
71
71
  "vit_base_patch16_224_imagenet21k": {
72
72
  "metadata": {
@@ -77,7 +77,7 @@ backbone_presets = {
77
77
  "params": 85798656,
78
78
  "path": "vit",
79
79
  },
80
- "kaggle_handle": "kaggle://keras/vit/keras/vit_base_patch16_224_imagenet21k/1",
80
+ "kaggle_handle": "kaggle://keras/vit/keras/vit_base_patch16_224_imagenet21k/2",
81
81
  },
82
82
  "vit_base_patch32_224_imagenet21k": {
83
83
  "metadata": {
@@ -88,7 +88,7 @@ backbone_presets = {
88
88
  "params": 87455232,
89
89
  "path": "vit",
90
90
  },
91
- "kaggle_handle": "kaggle://keras/vit/keras/vit_base_patch32_224_imagenet21k/1",
91
+ "kaggle_handle": "kaggle://keras/vit/keras/vit_base_patch32_224_imagenet21k/2",
92
92
  },
93
93
  "vit_huge_patch14_224_imagenet21k": {
94
94
  "metadata": {
@@ -99,7 +99,7 @@ backbone_presets = {
99
99
  "params": 630764800,
100
100
  "path": "vit",
101
101
  },
102
- "kaggle_handle": "kaggle://keras/vit/keras/vit_huge_patch14_224_imagenet21k/1",
102
+ "kaggle_handle": "kaggle://keras/vit/keras/vit_huge_patch14_224_imagenet21k/2",
103
103
  },
104
104
  "vit_large_patch16_224_imagenet21k": {
105
105
  "metadata": {
@@ -110,7 +110,7 @@ backbone_presets = {
110
110
  "params": 303301632,
111
111
  "path": "vit",
112
112
  },
113
- "kaggle_handle": "kaggle://keras/vit/keras/vit_large_patch16_224_imagenet21k/1",
113
+ "kaggle_handle": "kaggle://keras/vit/keras/vit_large_patch16_224_imagenet21k/2",
114
114
  },
115
115
  "vit_large_patch32_224_imagenet21k": {
116
116
  "metadata": {
@@ -121,6 +121,6 @@ backbone_presets = {
121
121
  "params": 305510400,
122
122
  "path": "vit",
123
123
  },
124
- "kaggle_handle": "kaggle://keras/vit/keras/vit_large_patch32_224_imagenet21k/1",
124
+ "kaggle_handle": "kaggle://keras/vit/keras/vit_large_patch32_224_imagenet21k/2",
125
125
  },
126
126
  }
@@ -0,0 +1,145 @@
1
+ import numpy as np
2
+
3
+ from keras_hub.src.models.qwen3.qwen3_backbone import Qwen3Backbone
4
+ from keras_hub.src.utils.preset_utils import load_json
5
+
6
+ backbone_cls = Qwen3Backbone
7
+
8
+
9
+ def convert_backbone_config(transformers_config):
10
+ return {
11
+ "vocabulary_size": transformers_config["vocab_size"],
12
+ "head_dim": transformers_config["head_dim"],
13
+ "hidden_dim": transformers_config["hidden_size"],
14
+ "num_layers": transformers_config["num_hidden_layers"],
15
+ "num_query_heads": transformers_config["num_attention_heads"],
16
+ "num_key_value_heads": transformers_config["num_key_value_heads"],
17
+ "intermediate_dim": transformers_config["intermediate_size"],
18
+ "layer_norm_epsilon": transformers_config["rms_norm_eps"],
19
+ "rope_max_wavelength": transformers_config["rope_theta"],
20
+ "sliding_window_size": transformers_config["sliding_window"]
21
+ if transformers_config["use_sliding_window"]
22
+ else None,
23
+ "tie_word_embeddings": transformers_config["tie_word_embeddings"],
24
+ }
25
+
26
+
27
+ def convert_weights(backbone, loader, transformers_config):
28
+ loader.port_weight(
29
+ keras_variable=backbone.get_layer("token_embedding").embeddings,
30
+ hf_weight_key="model.embed_tokens.weight",
31
+ )
32
+ if not backbone.tie_word_embeddings:
33
+ loader.port_weight(
34
+ keras_variable=backbone.get_layer(
35
+ "token_embedding"
36
+ ).reverse_embeddings,
37
+ hf_weight_key="lm_head.weight",
38
+ # rearrange_pattern="b a -> a b",
39
+ hook_fn=lambda hf_tensor, _: np.transpose(hf_tensor, axes=(1, 0)),
40
+ )
41
+
42
+ def transpose_and_reshape(x, shape):
43
+ return np.reshape(np.transpose(x), shape)
44
+
45
+ for i in range(backbone.num_layers):
46
+ decoder_layer = backbone.get_layer(f"transformer_layer_{i}")
47
+
48
+ # Input layernorm
49
+ loader.port_weight(
50
+ keras_variable=decoder_layer._self_attention_layernorm.scale,
51
+ hf_weight_key=f"model.layers.{i}.input_layernorm.weight",
52
+ )
53
+
54
+ # Attention layers
55
+
56
+ ## Query
57
+ loader.port_weight(
58
+ keras_variable=decoder_layer._self_attention_layer._query_dense.kernel,
59
+ hf_weight_key=f"model.layers.{i}.self_attn.q_proj.weight",
60
+ hook_fn=transpose_and_reshape,
61
+ )
62
+ loader.port_weight(
63
+ keras_variable=decoder_layer._self_attention_layer._query_dense_layer_norm.scale,
64
+ hf_weight_key=f"model.layers.{i}.self_attn.q_norm.weight",
65
+ )
66
+ ## Key
67
+ loader.port_weight(
68
+ keras_variable=decoder_layer._self_attention_layer._key_dense.kernel,
69
+ hf_weight_key=f"model.layers.{i}.self_attn.k_proj.weight",
70
+ hook_fn=transpose_and_reshape,
71
+ )
72
+ loader.port_weight(
73
+ keras_variable=decoder_layer._self_attention_layer._key_dense_layer_norm.scale,
74
+ hf_weight_key=f"model.layers.{i}.self_attn.k_norm.weight",
75
+ )
76
+ ## Value
77
+ loader.port_weight(
78
+ keras_variable=decoder_layer._self_attention_layer._value_dense.kernel,
79
+ hf_weight_key=f"model.layers.{i}.self_attn.v_proj.weight",
80
+ hook_fn=transpose_and_reshape,
81
+ )
82
+ ## Output
83
+ loader.port_weight(
84
+ keras_variable=decoder_layer._self_attention_layer._output_dense.kernel,
85
+ hf_weight_key=f"model.layers.{i}.self_attn.o_proj.weight",
86
+ # rearrange_patterns="c (a b) -> a b c",
87
+ # rearrange_dims={"a": backbone.num_query_heads},
88
+ hook_fn=transpose_and_reshape,
89
+ )
90
+
91
+ # MLP layers
92
+ loader.port_weight(
93
+ keras_variable=decoder_layer._feedforward_intermediate_dense.kernel,
94
+ hf_weight_key=f"model.layers.{i}.mlp.up_proj.weight",
95
+ # rearrange_patterns="b a -> a b",
96
+ hook_fn=lambda hf_tensor, _: np.transpose(hf_tensor, axes=(1, 0)),
97
+ )
98
+ loader.port_weight(
99
+ keras_variable=decoder_layer._feedforward_output_dense.kernel,
100
+ hf_weight_key=f"model.layers.{i}.mlp.down_proj.weight",
101
+ # rearrange_patterns="b a -> a b",
102
+ hook_fn=lambda hf_tensor, _: np.transpose(hf_tensor, axes=(1, 0)),
103
+ )
104
+ loader.port_weight(
105
+ keras_variable=decoder_layer._feedforward_gate_dense.kernel,
106
+ hf_weight_key=f"model.layers.{i}.mlp.gate_proj.weight",
107
+ # rearrange_patterns="b a -> a b",
108
+ hook_fn=lambda hf_tensor, _: np.transpose(hf_tensor, axes=(1, 0)),
109
+ )
110
+
111
+ # Feedforward layernorm
112
+ loader.port_weight(
113
+ keras_variable=decoder_layer._feedforward_layernorm.scale,
114
+ hf_weight_key=f"model.layers.{i}.post_attention_layernorm.weight",
115
+ )
116
+
117
+ # Final normalization layer
118
+ loader.port_weight(
119
+ keras_variable=backbone.get_layer("sequence_output_layernorm").scale,
120
+ hf_weight_key="model.norm.weight",
121
+ )
122
+
123
+ return backbone
124
+
125
+
126
+ def convert_tokenizer(cls, preset, **kwargs):
127
+ tokenizer_config = load_json(preset, "tokenizer.json")
128
+ vocab = tokenizer_config["model"]["vocab"]
129
+ merges = tokenizer_config["model"]["merges"]
130
+ merges = [" ".join(item) for item in merges]
131
+
132
+ # Load all special tokens with the exception of "reserved" ones.
133
+ special_tokens = set()
134
+ for token in tokenizer_config["added_tokens"]:
135
+ if not token["content"].startswith("<|reserved_special_token_"):
136
+ vocab[token["content"]] = token["id"]
137
+ special_tokens.add(token["content"])
138
+
139
+ kwargs.update(
140
+ {
141
+ "unsplittable_tokens": list(special_tokens),
142
+ }
143
+ )
144
+
145
+ return cls(vocabulary=vocab, merges=merges, **kwargs)
@@ -14,6 +14,7 @@ from keras_hub.src.utils.transformers import convert_mistral
14
14
  from keras_hub.src.utils.transformers import convert_mixtral
15
15
  from keras_hub.src.utils.transformers import convert_pali_gemma
16
16
  from keras_hub.src.utils.transformers import convert_qwen
17
+ from keras_hub.src.utils.transformers import convert_qwen3
17
18
  from keras_hub.src.utils.transformers import convert_qwen_moe
18
19
  from keras_hub.src.utils.transformers import convert_vit
19
20
  from keras_hub.src.utils.transformers.safetensor_utils import SafetensorLoader
@@ -50,6 +51,8 @@ class TransformersPresetLoader(PresetLoader):
50
51
  self.converter = convert_mixtral
51
52
  elif model_type == "qwen2_moe":
52
53
  self.converter = convert_qwen_moe
54
+ elif model_type == "qwen3":
55
+ self.converter = convert_qwen3
53
56
  else:
54
57
  raise ValueError(
55
58
  "KerasHub has no converter for huggingface/transformers models "
keras_hub/src/version.py CHANGED
@@ -1,7 +1,7 @@
1
1
  from keras_hub.src.api_export import keras_hub_export
2
2
 
3
3
  # Unique source of truth for the version number.
4
- __version__ = "0.21.0.dev202505280410"
4
+ __version__ = "0.22.0.dev202505300409"
5
5
 
6
6
 
7
7
  @keras_hub_export("keras_hub.version")
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: keras-hub-nightly
3
- Version: 0.21.0.dev202505280410
3
+ Version: 0.22.0.dev202505300409
4
4
  Summary: Pretrained models for Keras.
5
5
  Author-email: Keras team <keras-users@googlegroups.com>
6
6
  License-Expression: Apache-2.0
@@ -1,11 +1,11 @@
1
1
  keras_hub/__init__.py,sha256=bJbUZkqwhZvTb1Tqx1fbkq6mzBYiEyq-Hin3oQIkhdE,558
2
2
  keras_hub/layers/__init__.py,sha256=gnvT-GuASB1hZwY4zrRkLs5yohSQu9Pp1SHDxsWPLY8,5081
3
3
  keras_hub/metrics/__init__.py,sha256=KYalsMPBnfwim9BdGHFfJ5WxUKFXOQ1QoKIMT_0lwlM,439
4
- keras_hub/models/__init__.py,sha256=itSzodVUeuX6HQnmsSXY0Wv-5Htbu397410R-SFW_4I,26411
4
+ keras_hub/models/__init__.py,sha256=1ZKgLK4AZ44s_cH7vu6FvmVocxf0biLAnY_lEh3dgxw,26734
5
5
  keras_hub/samplers/__init__.py,sha256=aFQIkiqbZpi8vjrPp2MVII4QUfE-eQjra5fMeHsoy7k,886
6
6
  keras_hub/src/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
7
7
  keras_hub/src/api_export.py,sha256=9pQZK27JObxWZ96QPLBp1OBsjWigh1iuV6RglPGMRk0,1499
8
- keras_hub/src/version.py,sha256=LkNchDcdwovYSb5xEjf09V8MHim3X9wy6VrAyA_7afw,222
8
+ keras_hub/src/version.py,sha256=hJyx_F3-Sy3RRaPET6xBnbg7QRtPkFgRHC4_SaxL3bw,222
9
9
  keras_hub/src/layers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
10
10
  keras_hub/src/layers/modeling/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
11
11
  keras_hub/src/layers/modeling/alibi_bias.py,sha256=1XBTHI52L_iJDhN_w5ydu_iMhCuTgQAxEPwcLA6BPuk,4411
@@ -259,7 +259,7 @@ keras_hub/src/models/mixtral/mixtral_causal_lm.py,sha256=JA1t6xTeaYX_fNo9ftRyvzd
259
259
  keras_hub/src/models/mixtral/mixtral_causal_lm_preprocessor.py,sha256=q2qXa9QAUWBvOWv9DeNvwsBNXSORJAbQFoQsWQ7e8V8,3079
260
260
  keras_hub/src/models/mixtral/mixtral_decoder.py,sha256=CvOjhTxPnGQ_HNknZXRI6Cx1kpuHG99_TiOh-mNcsDw,18190
261
261
  keras_hub/src/models/mixtral/mixtral_layer_norm.py,sha256=zfbDKZEb45FTwP0zQd7WPPp8tuiGoSNfS-DRYWkZyWw,1031
262
- keras_hub/src/models/mixtral/mixtral_presets.py,sha256=AteLrYXyVjooz_DHLnBA1OMlZS6LMu7Y7gGUWddn6go,856
262
+ keras_hub/src/models/mixtral/mixtral_presets.py,sha256=pi5hHcwVSqr7ytf4dSnU_ew_t7NYw7EsZrmklQDqDVo,852
263
263
  keras_hub/src/models/mixtral/mixtral_tokenizer.py,sha256=Kc233k879QMyX164X_CzWbqpnqEkKWNqa648guTGkBk,661
264
264
  keras_hub/src/models/mobilenet/__init__.py,sha256=hxkNGGj_iAMu62iooUDEPA818sNOIgjG7pXMLEMOsAE,275
265
265
  keras_hub/src/models/mobilenet/mobilenet_backbone.py,sha256=aZBSFeLUObYYoi3od9DI1KfgPCqh5GHTcAI8Y2ZHShA,29536
@@ -311,8 +311,14 @@ keras_hub/src/models/qwen/qwen_causal_lm.py,sha256=_f-UHaKHp0ncxknpkpEJiW3jlng3E
311
311
  keras_hub/src/models/qwen/qwen_causal_lm_preprocessor.py,sha256=Va-4TLJD3ycEnkS41rF3dVj4_6K0j-gxLTrREFRcyr0,609
312
312
  keras_hub/src/models/qwen/qwen_decoder.py,sha256=utmAvZlU7_nP-6pjGPDinK4JaMzsQSwOARG0ote-jAg,11771
313
313
  keras_hub/src/models/qwen/qwen_layernorm.py,sha256=DS35r3qd6g5ocL7Nhf_vNzLLMo1aI9VCSmL64dgNOYI,924
314
- keras_hub/src/models/qwen/qwen_presets.py,sha256=DpRplWNwktM4KDgIP495PTUBJxQE_mS6KQSK5LGWOyc,1998
314
+ keras_hub/src/models/qwen/qwen_presets.py,sha256=1FkKV6M3yqJz4EP1xa7bEvfIQ721xXT-_ikjWX0xvww,1992
315
315
  keras_hub/src/models/qwen/qwen_tokenizer.py,sha256=LCv3IyiDDHqVnM9N3lf5-BE3iwicIh0nKS1hjoPw9lE,1532
316
+ keras_hub/src/models/qwen3/qwen3_attention.py,sha256=sewLjli290XvJ1efGZJEAYqUZfRll7cmhu0258s4C48,13042
317
+ keras_hub/src/models/qwen3/qwen3_backbone.py,sha256=Ylpk_rRWWRxy8irlAPjJU-YrxYGpo8c9lSEO1zZl4gU,7456
318
+ keras_hub/src/models/qwen3/qwen3_causal_lm_preprocessor.py,sha256=H4g-bgvuhAUnDwjJovydK16Kes38ZFZWPvflrgHqZis,458
319
+ keras_hub/src/models/qwen3/qwen3_decoder.py,sha256=68s9jQj53zFmXE4-SGXKYHu546fXOyi9LUbnKk-HGYY,11595
320
+ keras_hub/src/models/qwen3/qwen3_layernorm.py,sha256=EJxjf7Pr6ufPQnNeuYQxkExzPjPk4PQxqMsoBeSEkDo,1073
321
+ keras_hub/src/models/qwen3/qwen3_tokenizer.py,sha256=LmPtg0vprMchDvYfTj8m5PraXI2QS3-YgdIIpIm5iAs,1448
316
322
  keras_hub/src/models/qwen_moe/__init__.py,sha256=5D8GUmVDsJs0J4sVZHcXOLkZf12U96l-WtwyVee4lu8,267
317
323
  keras_hub/src/models/qwen_moe/qwen_moe_attention.py,sha256=pE79_iHUm2LGkoWL6zMJw_pNfzIvmyq3yJaiq47W2TY,13242
318
324
  keras_hub/src/models/qwen_moe/qwen_moe_backbone.py,sha256=nrfELvIvRLmrgKrUNXci2CrecmeI6bWzJj7HH-RcWJA,15341
@@ -320,7 +326,7 @@ keras_hub/src/models/qwen_moe/qwen_moe_causal_lm.py,sha256=MeP60v7GcN_SmH5_ULRpq
320
326
  keras_hub/src/models/qwen_moe/qwen_moe_causal_lm_preprocessor.py,sha256=uKaXRrJs02vkVudjdehzJPp0B84tPMkxNHlp166kceE,589
321
327
  keras_hub/src/models/qwen_moe/qwen_moe_decoder.py,sha256=kmUjLpYTbJQ3J_31qWhLOd0Dg2_9cl_JX_zM8ZMH1Qo,23130
322
328
  keras_hub/src/models/qwen_moe/qwen_moe_layernorm.py,sha256=DbkWJo7U0-cwdZwHPeAnFznYwtao6o0fjpoDJ9UWnpc,927
323
- keras_hub/src/models/qwen_moe/qwen_moe_presets.py,sha256=uKrA9xLV3P3jtYUUsqdhKq_HPkB4lXmOYseB1wXTZnI,457
329
+ keras_hub/src/models/qwen_moe/qwen_moe_presets.py,sha256=LhOA3Ow-z3cNTan4AOrtyCXS58EgfvO_gtqiZt5cUQc,455
324
330
  keras_hub/src/models/qwen_moe/qwen_moe_tokenizer.py,sha256=2c3X8jNGO0q0UL5NtUqSgHWLqhyJGi2ohNcTeOGhd84,1407
325
331
  keras_hub/src/models/resnet/__init__.py,sha256=C5UqlQ6apm8WSp1bnrxB6Bi3BGaknxRQs-r3b2wpaGA,257
326
332
  keras_hub/src/models/resnet/resnet_backbone.py,sha256=Q7nlqcTXZzjqd0e-DsjHC4ok58yOX7qxseotym3uZpM,31276
@@ -409,12 +415,12 @@ keras_hub/src/models/vgg/vgg_image_classifier_preprocessor.py,sha256=M7hBbDPws5Z
409
415
  keras_hub/src/models/vgg/vgg_image_converter.py,sha256=FKVrSNNBxIkiKvApzf4TZxidBb1z917Xs9nooHCcRLM,324
410
416
  keras_hub/src/models/vgg/vgg_presets.py,sha256=UL7a8hdZ22duMADXwVypGnc20ME-ywI4QjtXu15usEI,1491
411
417
  keras_hub/src/models/vit/__init__.py,sha256=GH7x3VjEXZLm-4F-c9-55QZE0lP2OLVICH0Hr5YCp9A,239
412
- keras_hub/src/models/vit/vit_backbone.py,sha256=kGmRZO4u-1q4PBcbhJbiWVIEVYAcp2H4SPJgQimrJd0,5909
418
+ keras_hub/src/models/vit/vit_backbone.py,sha256=VnypiTAf0ORaBTVzdDOXsnKnQxKbrIlX9z9qOumZH50,6699
413
419
  keras_hub/src/models/vit/vit_image_classifier.py,sha256=lMVxiD1_6drx7XQ7P7YzlqnFP7kT1zlMe84f-T3SDQI,6332
414
420
  keras_hub/src/models/vit/vit_image_classifier_preprocessor.py,sha256=wu6YcBlXMWB9sKCPvmNdGBZKTLQt_HyHWS6P9nyDwsk,504
415
- keras_hub/src/models/vit/vit_image_converter.py,sha256=IQYgLOhnsXudZ_S344lzGvO6pbMOhXK6rW12Q3kHykI,2824
416
- keras_hub/src/models/vit/vit_layers.py,sha256=_cZ1FMYEXcnjwvNPVJXug3rEbatv89OzRTMuzx62dnA,13312
417
- keras_hub/src/models/vit/vit_presets.py,sha256=zZhxUleOom1ie3gn0Mi-_xhhdFEEsnqSQyKADV2L38k,4479
421
+ keras_hub/src/models/vit/vit_image_converter.py,sha256=JhdXcbfKu9pKSJZiaKk7FKf_CjSXztSa2rsBFQvlgAo,324
422
+ keras_hub/src/models/vit/vit_layers.py,sha256=c0ApxF7cMqeEEa0LcWrBhc6zIolwOFVb2HjzLV-q98k,13940
423
+ keras_hub/src/models/vit/vit_presets.py,sha256=mlLBJxxonru14fBiMnMF4ud-JgbJHclpVV3FsoIubrk,4479
418
424
  keras_hub/src/models/vit_det/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
419
425
  keras_hub/src/models/vit_det/vit_det_backbone.py,sha256=DOZ5J7c1t5PAZ6y0pMmBoQTMOUup7UoUrYVfCs69ltY,7697
420
426
  keras_hub/src/models/vit_det/vit_layers.py,sha256=mnwu56chMc6zxmfp_hsLdR7TXYy1_YsWy1KwGX9M5Ic,19840
@@ -496,13 +502,14 @@ keras_hub/src/utils/transformers/convert_mistral.py,sha256=kVhN9h1ZFVhwkNW8p3wnS
496
502
  keras_hub/src/utils/transformers/convert_mixtral.py,sha256=PxeCY8Xe7U_caICugwOCEjuSZ51ZUtmef6rUxh-Wt54,5508
497
503
  keras_hub/src/utils/transformers/convert_pali_gemma.py,sha256=B1leeDw96Yvu81hYumf66hIid07k5NLqoeWAJgPnaLs,10649
498
504
  keras_hub/src/utils/transformers/convert_qwen.py,sha256=WUxMAEFVqRs7TRw7QU5TH3_ev4yf02R1xFVliMvTQqg,5886
505
+ keras_hub/src/utils/transformers/convert_qwen3.py,sha256=LIormvCMWPq6X9Wo2eNbADjtFZ0nI7tFGZFBxmo4GKw,5700
499
506
  keras_hub/src/utils/transformers/convert_qwen_moe.py,sha256=a7R28aln-PdAcNuKAXdrtzvslho2Co6GypChxLMKPpc,10618
500
507
  keras_hub/src/utils/transformers/convert_vit.py,sha256=9SUZ9utNJhW_5cj3acMn9cRy47u2eIcDsrhmzj77o9k,5187
501
- keras_hub/src/utils/transformers/preset_loader.py,sha256=1nfS5xVsl-JROGXJXltTqV1fQdcUlZbGGcbf-n79pXM,4225
508
+ keras_hub/src/utils/transformers/preset_loader.py,sha256=7tFnbyAiUCMcTG8VQ7Wpi-J7cvRoSZn-ZYE_l0xuh0M,4363
502
509
  keras_hub/src/utils/transformers/safetensor_utils.py,sha256=CYUHyA4y-B61r7NDnCsFb4t_UmSwZ1k9L-8gzEd6KRg,3339
503
510
  keras_hub/tokenizers/__init__.py,sha256=uMjjm0mzUkRb0e4Ac_JK8aJ9cKGUi5UqmzWoWAFJprE,4164
504
511
  keras_hub/utils/__init__.py,sha256=jXPqVGBpJr_PpYmqD8aDG-fRMlxH-ulqCR2SZMn288Y,646
505
- keras_hub_nightly-0.21.0.dev202505280410.dist-info/METADATA,sha256=DW6jOe7Tbk32rdB5bnZHYlyBZYuzTYIui1EoKkhPMpY,7393
506
- keras_hub_nightly-0.21.0.dev202505280410.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
507
- keras_hub_nightly-0.21.0.dev202505280410.dist-info/top_level.txt,sha256=N4J6piIWBKa38A4uV-CnIopnOEf8mHAbkNXafXm_CuA,10
508
- keras_hub_nightly-0.21.0.dev202505280410.dist-info/RECORD,,
512
+ keras_hub_nightly-0.22.0.dev202505300409.dist-info/METADATA,sha256=hH3xqnggYJvyKQ7DG5U0pJyM8umkP1oRPj32GKEu1E8,7393
513
+ keras_hub_nightly-0.22.0.dev202505300409.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
514
+ keras_hub_nightly-0.22.0.dev202505300409.dist-info/top_level.txt,sha256=N4J6piIWBKa38A4uV-CnIopnOEf8mHAbkNXafXm_CuA,10
515
+ keras_hub_nightly-0.22.0.dev202505300409.dist-info/RECORD,,