keras-hub-nightly 0.22.0.dev202505290412__py3-none-any.whl → 0.22.0.dev202505310408__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (26) hide show
  1. keras_hub/layers/__init__.py +3 -0
  2. keras_hub/models/__init__.py +16 -0
  3. keras_hub/src/models/deit/__init__.py +0 -0
  4. keras_hub/src/models/deit/deit_backbone.py +154 -0
  5. keras_hub/src/models/deit/deit_image_classifier.py +171 -0
  6. keras_hub/src/models/deit/deit_image_classifier_preprocessor.py +12 -0
  7. keras_hub/src/models/deit/deit_image_converter.py +8 -0
  8. keras_hub/src/models/deit/deit_layers.py +519 -0
  9. keras_hub/src/models/deit/deit_presets.py +49 -0
  10. keras_hub/src/models/mixtral/mixtral_presets.py +4 -4
  11. keras_hub/src/models/qwen/qwen_presets.py +6 -6
  12. keras_hub/src/models/qwen3/qwen3_attention.py +369 -0
  13. keras_hub/src/models/qwen3/qwen3_backbone.py +191 -0
  14. keras_hub/src/models/qwen3/qwen3_causal_lm_preprocessor.py +10 -0
  15. keras_hub/src/models/qwen3/qwen3_decoder.py +309 -0
  16. keras_hub/src/models/qwen3/qwen3_layernorm.py +38 -0
  17. keras_hub/src/models/qwen3/qwen3_tokenizer.py +48 -0
  18. keras_hub/src/models/qwen_moe/qwen_moe_presets.py +2 -2
  19. keras_hub/src/utils/transformers/convert_deit.py +155 -0
  20. keras_hub/src/utils/transformers/convert_qwen3.py +145 -0
  21. keras_hub/src/utils/transformers/preset_loader.py +7 -1
  22. keras_hub/src/version.py +1 -1
  23. {keras_hub_nightly-0.22.0.dev202505290412.dist-info → keras_hub_nightly-0.22.0.dev202505310408.dist-info}/METADATA +1 -1
  24. {keras_hub_nightly-0.22.0.dev202505290412.dist-info → keras_hub_nightly-0.22.0.dev202505310408.dist-info}/RECORD +26 -11
  25. {keras_hub_nightly-0.22.0.dev202505290412.dist-info → keras_hub_nightly-0.22.0.dev202505310408.dist-info}/WHEEL +0 -0
  26. {keras_hub_nightly-0.22.0.dev202505290412.dist-info → keras_hub_nightly-0.22.0.dev202505310408.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,145 @@
1
+ import numpy as np
2
+
3
+ from keras_hub.src.models.qwen3.qwen3_backbone import Qwen3Backbone
4
+ from keras_hub.src.utils.preset_utils import load_json
5
+
6
+ backbone_cls = Qwen3Backbone
7
+
8
+
9
+ def convert_backbone_config(transformers_config):
10
+ return {
11
+ "vocabulary_size": transformers_config["vocab_size"],
12
+ "head_dim": transformers_config["head_dim"],
13
+ "hidden_dim": transformers_config["hidden_size"],
14
+ "num_layers": transformers_config["num_hidden_layers"],
15
+ "num_query_heads": transformers_config["num_attention_heads"],
16
+ "num_key_value_heads": transformers_config["num_key_value_heads"],
17
+ "intermediate_dim": transformers_config["intermediate_size"],
18
+ "layer_norm_epsilon": transformers_config["rms_norm_eps"],
19
+ "rope_max_wavelength": transformers_config["rope_theta"],
20
+ "sliding_window_size": transformers_config["sliding_window"]
21
+ if transformers_config["use_sliding_window"]
22
+ else None,
23
+ "tie_word_embeddings": transformers_config["tie_word_embeddings"],
24
+ }
25
+
26
+
27
+ def convert_weights(backbone, loader, transformers_config):
28
+ loader.port_weight(
29
+ keras_variable=backbone.get_layer("token_embedding").embeddings,
30
+ hf_weight_key="model.embed_tokens.weight",
31
+ )
32
+ if not backbone.tie_word_embeddings:
33
+ loader.port_weight(
34
+ keras_variable=backbone.get_layer(
35
+ "token_embedding"
36
+ ).reverse_embeddings,
37
+ hf_weight_key="lm_head.weight",
38
+ # rearrange_pattern="b a -> a b",
39
+ hook_fn=lambda hf_tensor, _: np.transpose(hf_tensor, axes=(1, 0)),
40
+ )
41
+
42
+ def transpose_and_reshape(x, shape):
43
+ return np.reshape(np.transpose(x), shape)
44
+
45
+ for i in range(backbone.num_layers):
46
+ decoder_layer = backbone.get_layer(f"transformer_layer_{i}")
47
+
48
+ # Input layernorm
49
+ loader.port_weight(
50
+ keras_variable=decoder_layer._self_attention_layernorm.scale,
51
+ hf_weight_key=f"model.layers.{i}.input_layernorm.weight",
52
+ )
53
+
54
+ # Attention layers
55
+
56
+ ## Query
57
+ loader.port_weight(
58
+ keras_variable=decoder_layer._self_attention_layer._query_dense.kernel,
59
+ hf_weight_key=f"model.layers.{i}.self_attn.q_proj.weight",
60
+ hook_fn=transpose_and_reshape,
61
+ )
62
+ loader.port_weight(
63
+ keras_variable=decoder_layer._self_attention_layer._query_dense_layer_norm.scale,
64
+ hf_weight_key=f"model.layers.{i}.self_attn.q_norm.weight",
65
+ )
66
+ ## Key
67
+ loader.port_weight(
68
+ keras_variable=decoder_layer._self_attention_layer._key_dense.kernel,
69
+ hf_weight_key=f"model.layers.{i}.self_attn.k_proj.weight",
70
+ hook_fn=transpose_and_reshape,
71
+ )
72
+ loader.port_weight(
73
+ keras_variable=decoder_layer._self_attention_layer._key_dense_layer_norm.scale,
74
+ hf_weight_key=f"model.layers.{i}.self_attn.k_norm.weight",
75
+ )
76
+ ## Value
77
+ loader.port_weight(
78
+ keras_variable=decoder_layer._self_attention_layer._value_dense.kernel,
79
+ hf_weight_key=f"model.layers.{i}.self_attn.v_proj.weight",
80
+ hook_fn=transpose_and_reshape,
81
+ )
82
+ ## Output
83
+ loader.port_weight(
84
+ keras_variable=decoder_layer._self_attention_layer._output_dense.kernel,
85
+ hf_weight_key=f"model.layers.{i}.self_attn.o_proj.weight",
86
+ # rearrange_patterns="c (a b) -> a b c",
87
+ # rearrange_dims={"a": backbone.num_query_heads},
88
+ hook_fn=transpose_and_reshape,
89
+ )
90
+
91
+ # MLP layers
92
+ loader.port_weight(
93
+ keras_variable=decoder_layer._feedforward_intermediate_dense.kernel,
94
+ hf_weight_key=f"model.layers.{i}.mlp.up_proj.weight",
95
+ # rearrange_patterns="b a -> a b",
96
+ hook_fn=lambda hf_tensor, _: np.transpose(hf_tensor, axes=(1, 0)),
97
+ )
98
+ loader.port_weight(
99
+ keras_variable=decoder_layer._feedforward_output_dense.kernel,
100
+ hf_weight_key=f"model.layers.{i}.mlp.down_proj.weight",
101
+ # rearrange_patterns="b a -> a b",
102
+ hook_fn=lambda hf_tensor, _: np.transpose(hf_tensor, axes=(1, 0)),
103
+ )
104
+ loader.port_weight(
105
+ keras_variable=decoder_layer._feedforward_gate_dense.kernel,
106
+ hf_weight_key=f"model.layers.{i}.mlp.gate_proj.weight",
107
+ # rearrange_patterns="b a -> a b",
108
+ hook_fn=lambda hf_tensor, _: np.transpose(hf_tensor, axes=(1, 0)),
109
+ )
110
+
111
+ # Feedforward layernorm
112
+ loader.port_weight(
113
+ keras_variable=decoder_layer._feedforward_layernorm.scale,
114
+ hf_weight_key=f"model.layers.{i}.post_attention_layernorm.weight",
115
+ )
116
+
117
+ # Final normalization layer
118
+ loader.port_weight(
119
+ keras_variable=backbone.get_layer("sequence_output_layernorm").scale,
120
+ hf_weight_key="model.norm.weight",
121
+ )
122
+
123
+ return backbone
124
+
125
+
126
+ def convert_tokenizer(cls, preset, **kwargs):
127
+ tokenizer_config = load_json(preset, "tokenizer.json")
128
+ vocab = tokenizer_config["model"]["vocab"]
129
+ merges = tokenizer_config["model"]["merges"]
130
+ merges = [" ".join(item) for item in merges]
131
+
132
+ # Load all special tokens with the exception of "reserved" ones.
133
+ special_tokens = set()
134
+ for token in tokenizer_config["added_tokens"]:
135
+ if not token["content"].startswith("<|reserved_special_token_"):
136
+ vocab[token["content"]] = token["id"]
137
+ special_tokens.add(token["content"])
138
+
139
+ kwargs.update(
140
+ {
141
+ "unsplittable_tokens": list(special_tokens),
142
+ }
143
+ )
144
+
145
+ return cls(vocabulary=vocab, merges=merges, **kwargs)
@@ -6,6 +6,7 @@ from keras_hub.src.utils.preset_utils import jax_memory_cleanup
6
6
  from keras_hub.src.utils.transformers import convert_albert
7
7
  from keras_hub.src.utils.transformers import convert_bart
8
8
  from keras_hub.src.utils.transformers import convert_bert
9
+ from keras_hub.src.utils.transformers import convert_deit
9
10
  from keras_hub.src.utils.transformers import convert_distilbert
10
11
  from keras_hub.src.utils.transformers import convert_gemma
11
12
  from keras_hub.src.utils.transformers import convert_gpt2
@@ -14,6 +15,7 @@ from keras_hub.src.utils.transformers import convert_mistral
14
15
  from keras_hub.src.utils.transformers import convert_mixtral
15
16
  from keras_hub.src.utils.transformers import convert_pali_gemma
16
17
  from keras_hub.src.utils.transformers import convert_qwen
18
+ from keras_hub.src.utils.transformers import convert_qwen3
17
19
  from keras_hub.src.utils.transformers import convert_qwen_moe
18
20
  from keras_hub.src.utils.transformers import convert_vit
19
21
  from keras_hub.src.utils.transformers.safetensor_utils import SafetensorLoader
@@ -29,6 +31,8 @@ class TransformersPresetLoader(PresetLoader):
29
31
  self.converter = convert_bart
30
32
  elif model_type == "bert":
31
33
  self.converter = convert_bert
34
+ elif model_type == "deit":
35
+ self.converter = convert_deit
32
36
  elif model_type == "distilbert":
33
37
  self.converter = convert_distilbert
34
38
  elif model_type == "gemma" or model_type == "gemma2":
@@ -50,6 +54,8 @@ class TransformersPresetLoader(PresetLoader):
50
54
  self.converter = convert_mixtral
51
55
  elif model_type == "qwen2_moe":
52
56
  self.converter = convert_qwen_moe
57
+ elif model_type == "qwen3":
58
+ self.converter = convert_qwen3
53
59
  else:
54
60
  raise ValueError(
55
61
  "KerasHub has no converter for huggingface/transformers models "
@@ -79,7 +85,7 @@ class TransformersPresetLoader(PresetLoader):
79
85
  cls, load_weights, load_task_weights, **kwargs
80
86
  )
81
87
  # Support loading the classification head for classifier models.
82
- if architecture == "ViTForImageClassification":
88
+ if "ForImageClassification" in architecture:
83
89
  kwargs["num_classes"] = len(self.config["id2label"])
84
90
  task = super().load_task(cls, load_weights, load_task_weights, **kwargs)
85
91
  if load_task_weights:
keras_hub/src/version.py CHANGED
@@ -1,7 +1,7 @@
1
1
  from keras_hub.src.api_export import keras_hub_export
2
2
 
3
3
  # Unique source of truth for the version number.
4
- __version__ = "0.22.0.dev202505290412"
4
+ __version__ = "0.22.0.dev202505310408"
5
5
 
6
6
 
7
7
  @keras_hub_export("keras_hub.version")
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: keras-hub-nightly
3
- Version: 0.22.0.dev202505290412
3
+ Version: 0.22.0.dev202505310408
4
4
  Summary: Pretrained models for Keras.
5
5
  Author-email: Keras team <keras-users@googlegroups.com>
6
6
  License-Expression: Apache-2.0
@@ -1,11 +1,11 @@
1
1
  keras_hub/__init__.py,sha256=bJbUZkqwhZvTb1Tqx1fbkq6mzBYiEyq-Hin3oQIkhdE,558
2
- keras_hub/layers/__init__.py,sha256=gnvT-GuASB1hZwY4zrRkLs5yohSQu9Pp1SHDxsWPLY8,5081
2
+ keras_hub/layers/__init__.py,sha256=YQ4bW0_mI39Jqj2yoc8xcnynqoaXV2FBjHJviA9Ffas,5190
3
3
  keras_hub/metrics/__init__.py,sha256=KYalsMPBnfwim9BdGHFfJ5WxUKFXOQ1QoKIMT_0lwlM,439
4
- keras_hub/models/__init__.py,sha256=itSzodVUeuX6HQnmsSXY0Wv-5Htbu397410R-SFW_4I,26411
4
+ keras_hub/models/__init__.py,sha256=7MhCw7S-uIPcko-R6g5a-Jy1idKe7BwlI836PfekhHc,27076
5
5
  keras_hub/samplers/__init__.py,sha256=aFQIkiqbZpi8vjrPp2MVII4QUfE-eQjra5fMeHsoy7k,886
6
6
  keras_hub/src/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
7
7
  keras_hub/src/api_export.py,sha256=9pQZK27JObxWZ96QPLBp1OBsjWigh1iuV6RglPGMRk0,1499
8
- keras_hub/src/version.py,sha256=DDvaRSyKJcjRMYdIJIroiLLIbnEZPXF5mlsR_VQNowQ,222
8
+ keras_hub/src/version.py,sha256=A_oYO8DhCB-uOrecxZt2B7NMyEpt94fhLGZT7-dbdBg,222
9
9
  keras_hub/src/layers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
10
10
  keras_hub/src/layers/modeling/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
11
11
  keras_hub/src/layers/modeling/alibi_bias.py,sha256=1XBTHI52L_iJDhN_w5ydu_iMhCuTgQAxEPwcLA6BPuk,4411
@@ -135,6 +135,13 @@ keras_hub/src/models/deeplab_v3/deeplab_v3_image_segmeter_preprocessor.py,sha256
135
135
  keras_hub/src/models/deeplab_v3/deeplab_v3_layers.py,sha256=mz9nG55gdXSTDE96AXgeTCwUFB95DIpTuqrvWIt5Lco,7840
136
136
  keras_hub/src/models/deeplab_v3/deeplab_v3_presets.py,sha256=ZKYY8A7mV2QvwXwjDUd9xAbVHo58-Hgj_IqNUbuyCIU,625
137
137
  keras_hub/src/models/deeplab_v3/deeplab_v3_segmenter.py,sha256=pubi30sPJKLOpz9fRQff2FZt_53KBvwf2uyaJ5YL7J8,3726
138
+ keras_hub/src/models/deit/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
139
+ keras_hub/src/models/deit/deit_backbone.py,sha256=R5pBOqe8vcvD8VaRnsy_zIRIz6BLnUbkTeKUOoGNHPA,5942
140
+ keras_hub/src/models/deit/deit_image_classifier.py,sha256=pUS2638yBAxEBxcJoHyLABsgjCWv_Y0Mj_8u0YgDPdI,5758
141
+ keras_hub/src/models/deit/deit_image_classifier_preprocessor.py,sha256=s5pTcsUjlt1oIXFWIu-9gf2-sBesAyrjJIYmFOB96Xs,514
142
+ keras_hub/src/models/deit/deit_image_converter.py,sha256=wEGCLHS_i4wF9WA4m7uUXcHNbwf6TYgvPoM6C_t0rpM,330
143
+ keras_hub/src/models/deit/deit_layers.py,sha256=A80-UTHEUV8g5rEG-fr8OQpGe3HeoYlYwpoDCtq71ZU,17278
144
+ keras_hub/src/models/deit/deit_presets.py,sha256=0c2jm2DIznOr6ciQoLM6QYopQTLiMx4jONGLaXvtt6g,1778
138
145
  keras_hub/src/models/densenet/__init__.py,sha256=r7StyamnWeeZxOk9r4ZYNbS_YVhu9YGPyXhNxljvdPg,269
139
146
  keras_hub/src/models/densenet/densenet_backbone.py,sha256=f2nfsXyXQert2aYHq-L-JZtp8inq1fs1K47rzZQ9nTI,6744
140
147
  keras_hub/src/models/densenet/densenet_image_classifier.py,sha256=ye-Ix3oU42pfsDoh-h1PG4di1kzldO0ZO7Nj304p_X4,544
@@ -259,7 +266,7 @@ keras_hub/src/models/mixtral/mixtral_causal_lm.py,sha256=JA1t6xTeaYX_fNo9ftRyvzd
259
266
  keras_hub/src/models/mixtral/mixtral_causal_lm_preprocessor.py,sha256=q2qXa9QAUWBvOWv9DeNvwsBNXSORJAbQFoQsWQ7e8V8,3079
260
267
  keras_hub/src/models/mixtral/mixtral_decoder.py,sha256=CvOjhTxPnGQ_HNknZXRI6Cx1kpuHG99_TiOh-mNcsDw,18190
261
268
  keras_hub/src/models/mixtral/mixtral_layer_norm.py,sha256=zfbDKZEb45FTwP0zQd7WPPp8tuiGoSNfS-DRYWkZyWw,1031
262
- keras_hub/src/models/mixtral/mixtral_presets.py,sha256=AteLrYXyVjooz_DHLnBA1OMlZS6LMu7Y7gGUWddn6go,856
269
+ keras_hub/src/models/mixtral/mixtral_presets.py,sha256=pi5hHcwVSqr7ytf4dSnU_ew_t7NYw7EsZrmklQDqDVo,852
263
270
  keras_hub/src/models/mixtral/mixtral_tokenizer.py,sha256=Kc233k879QMyX164X_CzWbqpnqEkKWNqa648guTGkBk,661
264
271
  keras_hub/src/models/mobilenet/__init__.py,sha256=hxkNGGj_iAMu62iooUDEPA818sNOIgjG7pXMLEMOsAE,275
265
272
  keras_hub/src/models/mobilenet/mobilenet_backbone.py,sha256=aZBSFeLUObYYoi3od9DI1KfgPCqh5GHTcAI8Y2ZHShA,29536
@@ -311,8 +318,14 @@ keras_hub/src/models/qwen/qwen_causal_lm.py,sha256=_f-UHaKHp0ncxknpkpEJiW3jlng3E
311
318
  keras_hub/src/models/qwen/qwen_causal_lm_preprocessor.py,sha256=Va-4TLJD3ycEnkS41rF3dVj4_6K0j-gxLTrREFRcyr0,609
312
319
  keras_hub/src/models/qwen/qwen_decoder.py,sha256=utmAvZlU7_nP-6pjGPDinK4JaMzsQSwOARG0ote-jAg,11771
313
320
  keras_hub/src/models/qwen/qwen_layernorm.py,sha256=DS35r3qd6g5ocL7Nhf_vNzLLMo1aI9VCSmL64dgNOYI,924
314
- keras_hub/src/models/qwen/qwen_presets.py,sha256=DpRplWNwktM4KDgIP495PTUBJxQE_mS6KQSK5LGWOyc,1998
321
+ keras_hub/src/models/qwen/qwen_presets.py,sha256=1FkKV6M3yqJz4EP1xa7bEvfIQ721xXT-_ikjWX0xvww,1992
315
322
  keras_hub/src/models/qwen/qwen_tokenizer.py,sha256=LCv3IyiDDHqVnM9N3lf5-BE3iwicIh0nKS1hjoPw9lE,1532
323
+ keras_hub/src/models/qwen3/qwen3_attention.py,sha256=sewLjli290XvJ1efGZJEAYqUZfRll7cmhu0258s4C48,13042
324
+ keras_hub/src/models/qwen3/qwen3_backbone.py,sha256=Ylpk_rRWWRxy8irlAPjJU-YrxYGpo8c9lSEO1zZl4gU,7456
325
+ keras_hub/src/models/qwen3/qwen3_causal_lm_preprocessor.py,sha256=H4g-bgvuhAUnDwjJovydK16Kes38ZFZWPvflrgHqZis,458
326
+ keras_hub/src/models/qwen3/qwen3_decoder.py,sha256=68s9jQj53zFmXE4-SGXKYHu546fXOyi9LUbnKk-HGYY,11595
327
+ keras_hub/src/models/qwen3/qwen3_layernorm.py,sha256=EJxjf7Pr6ufPQnNeuYQxkExzPjPk4PQxqMsoBeSEkDo,1073
328
+ keras_hub/src/models/qwen3/qwen3_tokenizer.py,sha256=LmPtg0vprMchDvYfTj8m5PraXI2QS3-YgdIIpIm5iAs,1448
316
329
  keras_hub/src/models/qwen_moe/__init__.py,sha256=5D8GUmVDsJs0J4sVZHcXOLkZf12U96l-WtwyVee4lu8,267
317
330
  keras_hub/src/models/qwen_moe/qwen_moe_attention.py,sha256=pE79_iHUm2LGkoWL6zMJw_pNfzIvmyq3yJaiq47W2TY,13242
318
331
  keras_hub/src/models/qwen_moe/qwen_moe_backbone.py,sha256=nrfELvIvRLmrgKrUNXci2CrecmeI6bWzJj7HH-RcWJA,15341
@@ -320,7 +333,7 @@ keras_hub/src/models/qwen_moe/qwen_moe_causal_lm.py,sha256=MeP60v7GcN_SmH5_ULRpq
320
333
  keras_hub/src/models/qwen_moe/qwen_moe_causal_lm_preprocessor.py,sha256=uKaXRrJs02vkVudjdehzJPp0B84tPMkxNHlp166kceE,589
321
334
  keras_hub/src/models/qwen_moe/qwen_moe_decoder.py,sha256=kmUjLpYTbJQ3J_31qWhLOd0Dg2_9cl_JX_zM8ZMH1Qo,23130
322
335
  keras_hub/src/models/qwen_moe/qwen_moe_layernorm.py,sha256=DbkWJo7U0-cwdZwHPeAnFznYwtao6o0fjpoDJ9UWnpc,927
323
- keras_hub/src/models/qwen_moe/qwen_moe_presets.py,sha256=uKrA9xLV3P3jtYUUsqdhKq_HPkB4lXmOYseB1wXTZnI,457
336
+ keras_hub/src/models/qwen_moe/qwen_moe_presets.py,sha256=LhOA3Ow-z3cNTan4AOrtyCXS58EgfvO_gtqiZt5cUQc,455
324
337
  keras_hub/src/models/qwen_moe/qwen_moe_tokenizer.py,sha256=2c3X8jNGO0q0UL5NtUqSgHWLqhyJGi2ohNcTeOGhd84,1407
325
338
  keras_hub/src/models/resnet/__init__.py,sha256=C5UqlQ6apm8WSp1bnrxB6Bi3BGaknxRQs-r3b2wpaGA,257
326
339
  keras_hub/src/models/resnet/resnet_backbone.py,sha256=Q7nlqcTXZzjqd0e-DsjHC4ok58yOX7qxseotym3uZpM,31276
@@ -488,6 +501,7 @@ keras_hub/src/utils/transformers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRk
488
501
  keras_hub/src/utils/transformers/convert_albert.py,sha256=VdKclZpCxtDWq3UbUUQZf4fR9DJK_JYZ73B4O_G9skg,7695
489
502
  keras_hub/src/utils/transformers/convert_bart.py,sha256=Tk4h9Md9rwN5wjQbGIVrC7qzDpF8kI8qm-FKL8HlUok,14411
490
503
  keras_hub/src/utils/transformers/convert_bert.py,sha256=4gQqXCJzC9QWdLPDUAq741K8t_kjPIET050YjUnLeDA,5977
504
+ keras_hub/src/utils/transformers/convert_deit.py,sha256=ubcqYzMlhWTCE2S_TsXICCMmqjN9RsQPaw_70vArnjo,5306
491
505
  keras_hub/src/utils/transformers/convert_distilbert.py,sha256=SlfIRhSRk5c1ir2HGiDPiXa5XdOId_DbcnZO9lbwyZ8,6498
492
506
  keras_hub/src/utils/transformers/convert_gemma.py,sha256=ElCgwBpSN5Q7rV5PJawTsoytPzs5ZjuwoY60YAe8y_A,6533
493
507
  keras_hub/src/utils/transformers/convert_gpt2.py,sha256=HCeHN_-GiQJRxLCM9OCJJ1watPVpIBF8ujS8pGbBOWc,5703
@@ -496,13 +510,14 @@ keras_hub/src/utils/transformers/convert_mistral.py,sha256=kVhN9h1ZFVhwkNW8p3wnS
496
510
  keras_hub/src/utils/transformers/convert_mixtral.py,sha256=PxeCY8Xe7U_caICugwOCEjuSZ51ZUtmef6rUxh-Wt54,5508
497
511
  keras_hub/src/utils/transformers/convert_pali_gemma.py,sha256=B1leeDw96Yvu81hYumf66hIid07k5NLqoeWAJgPnaLs,10649
498
512
  keras_hub/src/utils/transformers/convert_qwen.py,sha256=WUxMAEFVqRs7TRw7QU5TH3_ev4yf02R1xFVliMvTQqg,5886
513
+ keras_hub/src/utils/transformers/convert_qwen3.py,sha256=LIormvCMWPq6X9Wo2eNbADjtFZ0nI7tFGZFBxmo4GKw,5700
499
514
  keras_hub/src/utils/transformers/convert_qwen_moe.py,sha256=a7R28aln-PdAcNuKAXdrtzvslho2Co6GypChxLMKPpc,10618
500
515
  keras_hub/src/utils/transformers/convert_vit.py,sha256=9SUZ9utNJhW_5cj3acMn9cRy47u2eIcDsrhmzj77o9k,5187
501
- keras_hub/src/utils/transformers/preset_loader.py,sha256=1nfS5xVsl-JROGXJXltTqV1fQdcUlZbGGcbf-n79pXM,4225
516
+ keras_hub/src/utils/transformers/preset_loader.py,sha256=K5FzDAtCuXS9rmZc0Zj7UCwbz5J9_pf7ozWov1qRAfg,4495
502
517
  keras_hub/src/utils/transformers/safetensor_utils.py,sha256=CYUHyA4y-B61r7NDnCsFb4t_UmSwZ1k9L-8gzEd6KRg,3339
503
518
  keras_hub/tokenizers/__init__.py,sha256=uMjjm0mzUkRb0e4Ac_JK8aJ9cKGUi5UqmzWoWAFJprE,4164
504
519
  keras_hub/utils/__init__.py,sha256=jXPqVGBpJr_PpYmqD8aDG-fRMlxH-ulqCR2SZMn288Y,646
505
- keras_hub_nightly-0.22.0.dev202505290412.dist-info/METADATA,sha256=W4vT73-ho1j4QwQv59qS5xF4i6bWH5k7tHiUJ7-_y4k,7393
506
- keras_hub_nightly-0.22.0.dev202505290412.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
507
- keras_hub_nightly-0.22.0.dev202505290412.dist-info/top_level.txt,sha256=N4J6piIWBKa38A4uV-CnIopnOEf8mHAbkNXafXm_CuA,10
508
- keras_hub_nightly-0.22.0.dev202505290412.dist-info/RECORD,,
520
+ keras_hub_nightly-0.22.0.dev202505310408.dist-info/METADATA,sha256=v4Rvzln90tKecsbiwiU29ZFrct9xpLCV10RQDme4-DI,7393
521
+ keras_hub_nightly-0.22.0.dev202505310408.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
522
+ keras_hub_nightly-0.22.0.dev202505310408.dist-info/top_level.txt,sha256=N4J6piIWBKa38A4uV-CnIopnOEf8mHAbkNXafXm_CuA,10
523
+ keras_hub_nightly-0.22.0.dev202505310408.dist-info/RECORD,,