keras-hub 0.23.0.dev0__py3-none-any.whl → 0.24.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,93 @@
1
+ """DINOV3 model preset configurations."""
2
+
3
+ # Metadata for loading pretrained model weights.
4
+ backbone_presets = {
5
+ "dinov3_vit_small_lvd1689m": {
6
+ "metadata": {
7
+ "description": (
8
+ "Vision Transformer (small-sized model) trained on LVD-1689M "
9
+ "using DINOv3."
10
+ ),
11
+ "params": 21_600_000,
12
+ "path": "dinov3",
13
+ },
14
+ "kaggle_handle": "kaggle://keras/dinov3/keras/dinov3_vit_small_lvd1689m/1",
15
+ },
16
+ "dinov3_vit_small_plus_lvd1689m": {
17
+ "metadata": {
18
+ "description": (
19
+ "Vision Transformer (small-plus-sized model) trained on "
20
+ "LVD-1689M using DINOv3."
21
+ ),
22
+ "params": 29_000_000,
23
+ "path": "dinov3",
24
+ },
25
+ "kaggle_handle": "kaggle://keras/dinov3/keras/dinov3_vit_small_plus_lvd1689m/1",
26
+ },
27
+ "dinov3_vit_base_lvd1689m": {
28
+ "metadata": {
29
+ "description": (
30
+ "Vision Transformer (base-sized model) trained on LVD-1689M "
31
+ "using DINOv3."
32
+ ),
33
+ "params": 86_000_000,
34
+ "path": "dinov3",
35
+ },
36
+ "kaggle_handle": "kaggle://keras/dinov3/keras/dinov3_vit_base_lvd1689m/1",
37
+ },
38
+ "dinov3_vit_large_lvd1689m": {
39
+ "metadata": {
40
+ "description": (
41
+ "Vision Transformer (large-sized model) trained on LVD-1689M "
42
+ "using DINOv3."
43
+ ),
44
+ "params": 300_000_000,
45
+ "path": "dinov3",
46
+ },
47
+ "kaggle_handle": "kaggle://keras/dinov3/keras/dinov3_vit_large_lvd1689m/1",
48
+ },
49
+ "dinov3_vit_huge_plus_lvd1689m": {
50
+ "metadata": {
51
+ "description": (
52
+ "Vision Transformer (huge-plus-sized model) trained on "
53
+ "LVD-1689M using DINOv3."
54
+ ),
55
+ "params": 840_000_000,
56
+ "path": "dinov3",
57
+ },
58
+ "kaggle_handle": "kaggle://keras/dinov3/keras/dinov3_vit_huge_plus_lvd1689m/1",
59
+ },
60
+ "dinov3_vit_7b_lvd1689m": {
61
+ "metadata": {
62
+ "description": (
63
+ "Vision Transformer (7B-sized model) trained on LVD-1689M "
64
+ "using DINOv3."
65
+ ),
66
+ "params": 6_700_000_000,
67
+ "path": "dinov3",
68
+ },
69
+ "kaggle_handle": "kaggle://keras/dinov3/keras/dinov3_vit_7b_lvd1689m/1",
70
+ },
71
+ "dinov3_vit_large_sat493m": {
72
+ "metadata": {
73
+ "description": (
74
+ "Vision Transformer (large-sized model) trained on SAT-493M "
75
+ "using DINOv3."
76
+ ),
77
+ "params": 300_000_000,
78
+ "path": "dinov3",
79
+ },
80
+ "kaggle_handle": "kaggle://keras/dinov3/keras/dinov3_vit_large_sat493m/1",
81
+ },
82
+ "dinov3_vit_7b_sat493m": {
83
+ "metadata": {
84
+ "description": (
85
+ "Vision Transformer (7B-sized model) trained on SAT-493M "
86
+ "using DINOv3."
87
+ ),
88
+ "params": 6_700_000_000,
89
+ "path": "dinov3",
90
+ },
91
+ "kaggle_handle": "kaggle://keras/dinov3/keras/dinov3_vit_7b_sat493m/1",
92
+ },
93
+ }
@@ -431,3 +431,19 @@ class GemmaCausalLM(CausalLM):
431
431
  )
432
432
  per_token_loss = per_token_loss_fn(target_ids, logits)
433
433
  return per_token_loss
434
+
435
+ def get_quantization_layer_structure(self, mode):
436
+ if mode != "gptq":
437
+ return None
438
+
439
+ # Wrap embedding + scaling
440
+ backbone = self.backbone
441
+ inputs = keras.Input(shape=(None,), dtype="int32")
442
+ x = backbone.token_embedding(inputs)
443
+ x = x * ops.cast(ops.sqrt(backbone.hidden_dim), x.dtype)
444
+ pre_processor = keras.Model(inputs=inputs, outputs=x)
445
+
446
+ return {
447
+ "pre_block_layers": [pre_processor],
448
+ "sequential_blocks": backbone.transformer_layers,
449
+ }
@@ -181,4 +181,43 @@ backbone_presets = {
181
181
  },
182
182
  "kaggle_handle": "kaggle://keras/gemma3/keras/gemma3_instruct_270m/4",
183
183
  },
184
+ "medgemma_instruct_4b": {
185
+ "metadata": {
186
+ "description": (
187
+ "A 4 billion parameter model based on Gemma 3. "
188
+ "This model is trained for performance on medical text"
189
+ "and image comprehension and is optimized for medical"
190
+ "applications that involve a text generation component."
191
+ ),
192
+ "params": 4300079472,
193
+ "path": "gemma3",
194
+ },
195
+ "kaggle_handle": "kaggle://keras/medgemma/keras/medgemma_instruct_4b/1",
196
+ },
197
+ "medgemma_instruct_27b": {
198
+ "metadata": {
199
+ "description": (
200
+ "A 27 billion parameter model based on Gemma 3. "
201
+ "This model trained for performance on medical text "
202
+ "and image comprehension and is optimized for medical "
203
+ "applications that involve a text generation component."
204
+ ),
205
+ "params": 27432406640,
206
+ "path": "gemma3",
207
+ },
208
+ "kaggle_handle": "kaggle://keras/medgemma/keras/medgemma_instruct_27b/1",
209
+ },
210
+ "medgemma_instruct_27b_text": {
211
+ "metadata": {
212
+ "description": (
213
+ "A 27 billion parameter text-only model based on Gemma 3. "
214
+ "This model is trained for performance on medical text "
215
+ "comprehension and is optimized for medical applications "
216
+ "that involve a text generation component."
217
+ ),
218
+ "params": 27009002240,
219
+ "path": "gemma3",
220
+ },
221
+ "kaggle_handle": "kaggle://keras/medgemma/keras/medgemma_instruct_27b_text/1",
222
+ },
184
223
  }
@@ -420,3 +420,20 @@ class GPT2CausalLM(CausalLM):
420
420
  )
421
421
  per_token_loss = per_token_loss_fn(target_ids, logits)
422
422
  return per_token_loss
423
+
424
+ def get_quantization_layer_structure(self, mode):
425
+ if mode != "gptq":
426
+ return None
427
+
428
+ backbone = self.backbone
429
+ token_ids = keras.Input(shape=(None,), dtype="int32")
430
+ tokens = backbone.token_embedding(token_ids)
431
+ positions = backbone.position_embedding(tokens)
432
+ x = backbone.embeddings_add((tokens, positions))
433
+ x = backbone.embeddings_dropout(x)
434
+ pre_processor = keras.Model(inputs=token_ids, outputs=x)
435
+
436
+ return {
437
+ "pre_block_layers": [pre_processor],
438
+ "sequential_blocks": backbone.transformer_layers,
439
+ }
@@ -84,3 +84,25 @@ class MaskedLM(Task):
84
84
  weighted_metrics=weighted_metrics,
85
85
  **kwargs,
86
86
  )
87
+
88
+ def get_quantization_layer_structure(self, mode):
89
+ if mode != "gptq":
90
+ return None
91
+
92
+ backbone = self.backbone
93
+ # Check for standard backbone structure.
94
+ if not hasattr(backbone, "transformer_layers"):
95
+ return None
96
+
97
+ # Check for embedding.
98
+ embedding = getattr(backbone, "token_embedding", None)
99
+ if embedding is None:
100
+ embedding = getattr(backbone, "embedding", None)
101
+
102
+ if embedding is None:
103
+ return None
104
+
105
+ return {
106
+ "pre_block_layers": [embedding],
107
+ "sequential_blocks": backbone.transformer_layers,
108
+ }
@@ -70,4 +70,40 @@ backbone_presets = {
70
70
  },
71
71
  "kaggle_handle": "kaggle://keras/qwen-3/keras/qwen3_32b_en/1",
72
72
  },
73
+ "qwen3_embedding_0.6b_en": {
74
+ "metadata": {
75
+ "description": (
76
+ "This text embedding model features a 32k context length and "
77
+ "offers flexible, user-defined embedding dimensions that can "
78
+ "range from 32 to 1024."
79
+ ),
80
+ "params": 595776512,
81
+ "path": "qwen3",
82
+ },
83
+ "kaggle_handle": "kaggle://keras/qwen-3-embedding/keras/qwen3_embedding_0.6b_en/1",
84
+ },
85
+ "qwen3_embedding_4b_en": {
86
+ "metadata": {
87
+ "description": (
88
+ "This text embedding model features a 32k context length and "
89
+ "offers flexible, user-defined embedding dimensions that can "
90
+ "range from 32 to 2560."
91
+ ),
92
+ "params": 4021774336,
93
+ "path": "qwen3",
94
+ },
95
+ "kaggle_handle": "kaggle://keras/qwen-3-embedding/keras/qwen3_embedding_4b_en/1",
96
+ },
97
+ "qwen3_embedding_8b_en": {
98
+ "metadata": {
99
+ "description": (
100
+ "This text embedding model features a 32k context length and "
101
+ "offers flexible, user-defined embedding dimensions that can "
102
+ "range from 32 to 4096."
103
+ ),
104
+ "params": 8188515328,
105
+ "path": "qwen3",
106
+ },
107
+ "kaggle_handle": "kaggle://keras/qwen-3-embedding/keras/qwen3_embedding_8b_en/1",
108
+ },
73
109
  }
@@ -321,4 +321,19 @@ backbone_presets = {
321
321
  },
322
322
  "kaggle_handle": "kaggle://keras/siglip/keras/siglip2_so400m_patch16_512/1",
323
323
  },
324
+ "medsiglip_900m_448": {
325
+ "metadata": {
326
+ "description": (
327
+ "A 900 million parameter variant of SigLIP trained to encode "
328
+ "medical images and text into a common embedding space. "
329
+ "MedSigLIP contains a vision encoder and a text encoder, and "
330
+ "supports 448x448 image resolution with up to 64 text tokens."
331
+ ),
332
+ "params": 878301426,
333
+ "official_name": "SigLIP2",
334
+ "path": "siglip",
335
+ "model_card": "https://huggingface.co/google/medsiglip-448#medsiglip-model-card",
336
+ },
337
+ "kaggle_handle": "kaggle://keras/medsiglip/keras/medsiglip_900m_448/1",
338
+ },
324
339
  }
@@ -0,0 +1,5 @@
1
+ from keras_hub.src.models.smollm3.smollm3_backbone import SmolLM3Backbone
2
+ from keras_hub.src.models.smollm3.smollm3_presets import backbone_presets
3
+ from keras_hub.src.utils.preset_utils import register_presets
4
+
5
+ register_presets(backbone_presets, SmolLM3Backbone)
@@ -0,0 +1,16 @@
1
+ """SmolLM3 model preset configurations."""
2
+
3
+ backbone_presets = {
4
+ "smollm3_3b_en": {
5
+ "metadata": {
6
+ "description": (
7
+ "Dense decoder-only model has 3 billion total parameters, "
8
+ "built on 36 layers and utilizes 16 query and "
9
+ "4 key/value attention heads."
10
+ ),
11
+ "params": 3075100928,
12
+ "path": "smollm3",
13
+ },
14
+ "kaggle_handle": "kaggle://keras/smollm3/keras/smollm3_3b_en/1",
15
+ },
16
+ }
@@ -12,9 +12,11 @@ from packaging import version
12
12
 
13
13
  try:
14
14
  import tensorflow as tf
15
- import tensorflow_text as tf_text
16
15
  except ImportError:
17
16
  tf = None
17
+ try:
18
+ import tensorflow_text as tf_text
19
+ except ImportError:
18
20
  tf_text = None
19
21
 
20
22
 
@@ -0,0 +1,106 @@
1
+ import numpy as np
2
+
3
+ from keras_hub.src.models.dinov3.dinov3_backbone import DINOV3Backbone
4
+
5
+ backbone_cls = DINOV3Backbone
6
+
7
+
8
+ def convert_backbone_config(transformers_config):
9
+ image_size = transformers_config["image_size"]
10
+ return {
11
+ "patch_size": transformers_config["patch_size"],
12
+ "num_layers": transformers_config["num_hidden_layers"],
13
+ "hidden_dim": transformers_config["hidden_size"],
14
+ "num_heads": transformers_config["num_attention_heads"],
15
+ "intermediate_dim": transformers_config["intermediate_size"],
16
+ "layer_scale_init_value": transformers_config["layerscale_value"],
17
+ "num_register_tokens": transformers_config["num_register_tokens"],
18
+ "use_mask_token": True,
19
+ "hidden_activation": transformers_config["hidden_act"],
20
+ "use_gated_mlp": transformers_config["use_gated_mlp"],
21
+ "use_query_bias": transformers_config["query_bias"],
22
+ "use_key_bias": transformers_config["key_bias"],
23
+ "use_value_bias": transformers_config["value_bias"],
24
+ "use_proj_bias": transformers_config["proj_bias"],
25
+ "use_mlp_bias": transformers_config["mlp_bias"],
26
+ "attention_dropout": transformers_config["attention_dropout"],
27
+ "drop_path_rate": transformers_config["drop_path_rate"],
28
+ "layer_norm_eps": transformers_config["layer_norm_eps"],
29
+ "image_shape": (image_size, image_size, 3),
30
+ "rope_theta": transformers_config["rope_theta"],
31
+ "apply_layernorm": False,
32
+ }
33
+
34
+
35
+ def convert_weights(backbone, loader, transformers_config):
36
+ if not isinstance(backbone, DINOV3Backbone):
37
+ raise ValueError(
38
+ "The provided backbone must be an instance of DINOV3Backbone. "
39
+ f"Received: {type(backbone)}"
40
+ )
41
+
42
+ def port_ln(keras_variable, weight_key):
43
+ loader.port_weight(keras_variable.gamma, f"{weight_key}.weight")
44
+ loader.port_weight(keras_variable.beta, f"{weight_key}.bias")
45
+
46
+ def port_dense(keras_variable, weight_key):
47
+ loader.port_weight(
48
+ keras_variable.kernel,
49
+ f"{weight_key}.weight",
50
+ hook_fn=lambda x, _: x.T,
51
+ )
52
+ if keras_variable.bias is not None:
53
+ loader.port_weight(keras_variable.bias, f"{weight_key}.bias")
54
+
55
+ # Embedding.
56
+ loader.port_weight(
57
+ keras_variable=backbone.embeddings.cls_token,
58
+ hf_weight_key="embeddings.cls_token",
59
+ )
60
+ if backbone.use_mask_token:
61
+ loader.port_weight(
62
+ keras_variable=backbone.embeddings.mask_token,
63
+ hf_weight_key="embeddings.mask_token",
64
+ )
65
+ if backbone.num_register_tokens > 0:
66
+ loader.port_weight(
67
+ keras_variable=backbone.embeddings.register_tokens,
68
+ hf_weight_key="embeddings.register_tokens",
69
+ )
70
+ loader.port_weight(
71
+ keras_variable=backbone.embeddings.patch_embeddings.projection.kernel,
72
+ hf_weight_key="embeddings.patch_embeddings.weight",
73
+ hook_fn=lambda x, _: np.transpose(x, (2, 3, 1, 0)),
74
+ )
75
+ loader.port_weight(
76
+ keras_variable=backbone.embeddings.patch_embeddings.projection.bias,
77
+ hf_weight_key="embeddings.patch_embeddings.bias",
78
+ )
79
+
80
+ # Encoder.
81
+ for i, layer in enumerate(backbone.encoder.layers):
82
+ prefix = f"layer.{i}"
83
+ port_ln(layer.norm1, f"{prefix}.norm1")
84
+ port_dense(layer.attention.query_dense, f"{prefix}.attention.q_proj")
85
+ port_dense(layer.attention.key_dense, f"{prefix}.attention.k_proj")
86
+ port_dense(layer.attention.value_dense, f"{prefix}.attention.v_proj")
87
+ port_dense(layer.attention.output_dense, f"{prefix}.attention.o_proj")
88
+
89
+ loader.port_weight(
90
+ keras_variable=layer.layer_scale1.lambda1,
91
+ hf_weight_key=f"{prefix}.layer_scale1.lambda1",
92
+ )
93
+ port_ln(layer.norm2, f"{prefix}.norm2")
94
+ if backbone.use_gated_mlp:
95
+ port_dense(layer.mlp.gate_proj, f"{prefix}.mlp.gate_proj")
96
+ port_dense(layer.mlp.up_proj, f"{prefix}.mlp.up_proj")
97
+ port_dense(layer.mlp.down_proj, f"{prefix}.mlp.down_proj")
98
+ else:
99
+ port_dense(layer.mlp.up_proj, f"{prefix}.mlp.up_proj")
100
+ port_dense(layer.mlp.down_proj, f"{prefix}.mlp.down_proj")
101
+ loader.port_weight(
102
+ keras_variable=layer.layer_scale2.lambda1,
103
+ hf_weight_key=f"{prefix}.layer_scale2.lambda1",
104
+ )
105
+
106
+ port_ln(backbone.layernorm, "norm")
@@ -8,6 +8,7 @@ from keras_hub.src.utils.transformers import convert_bart
8
8
  from keras_hub.src.utils.transformers import convert_bert
9
9
  from keras_hub.src.utils.transformers import convert_deit
10
10
  from keras_hub.src.utils.transformers import convert_dinov2
11
+ from keras_hub.src.utils.transformers import convert_dinov3
11
12
  from keras_hub.src.utils.transformers import convert_distilbert
12
13
  from keras_hub.src.utils.transformers import convert_esm
13
14
  from keras_hub.src.utils.transformers import convert_gemma
@@ -42,6 +43,8 @@ class TransformersPresetLoader(PresetLoader):
42
43
  self.converter = convert_distilbert
43
44
  elif model_type in ("dinov2", "dinov2_with_registers"):
44
45
  self.converter = convert_dinov2
46
+ elif model_type == "dinov3_vit":
47
+ self.converter = convert_dinov3
45
48
  elif model_type == "esm":
46
49
  self.converter = convert_esm
47
50
  elif model_type in ("gemma", "gemma2"):
keras_hub/src/version.py CHANGED
@@ -1,7 +1,7 @@
1
1
  from keras_hub.src.api_export import keras_hub_export
2
2
 
3
3
  # Unique source of truth for the version number.
4
- __version__ = "0.23.0.dev0"
4
+ __version__ = "0.24.0"
5
5
 
6
6
 
7
7
  @keras_hub_export("keras_hub.version")
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: keras-hub
3
- Version: 0.23.0.dev0
3
+ Version: 0.24.0
4
4
  Summary: Pretrained models for Keras.
5
5
  Author-email: Keras team <keras-users@googlegroups.com>
6
6
  License-Expression: Apache-2.0
@@ -1,11 +1,11 @@
1
1
  keras_hub/__init__.py,sha256=bJbUZkqwhZvTb1Tqx1fbkq6mzBYiEyq-Hin3oQIkhdE,558
2
- keras_hub/layers/__init__.py,sha256=ufJKHxMTFhwp--E3ixfGCZqq89pZOUOxCQYgI5pEUA8,5944
2
+ keras_hub/layers/__init__.py,sha256=hY5hZX5oOxRTFxfPe2hGhrHWJwF1kB7QiwITSS4Xp2A,6061
3
3
  keras_hub/metrics/__init__.py,sha256=KYalsMPBnfwim9BdGHFfJ5WxUKFXOQ1QoKIMT_0lwlM,439
4
- keras_hub/models/__init__.py,sha256=yazrEg57HafE6Fgr-bfDXR3iFrArx6RytOEnV1CJJS8,32068
4
+ keras_hub/models/__init__.py,sha256=XGYkwfBVZiPw5ZjSV5S_n3FnkPf06yYNzxZjXMhiX70,32166
5
5
  keras_hub/samplers/__init__.py,sha256=aFQIkiqbZpi8vjrPp2MVII4QUfE-eQjra5fMeHsoy7k,886
6
6
  keras_hub/src/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
7
7
  keras_hub/src/api_export.py,sha256=9pQZK27JObxWZ96QPLBp1OBsjWigh1iuV6RglPGMRk0,1499
8
- keras_hub/src/version.py,sha256=l9QcZIXQ9-XS5Yfzx1lCf85XEjfplfaVqk1ucDblEIA,211
8
+ keras_hub/src/version.py,sha256=tqzz1cg-MhQ4dsd1iV-5-JnQZ5BXSCm_vABSWNuFY9k,206
9
9
  keras_hub/src/layers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
10
10
  keras_hub/src/layers/modeling/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
11
11
  keras_hub/src/layers/modeling/alibi_bias.py,sha256=1XBTHI52L_iJDhN_w5ydu_iMhCuTgQAxEPwcLA6BPuk,4411
@@ -44,7 +44,7 @@ keras_hub/src/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hS
44
44
  keras_hub/src/models/audio_to_text.py,sha256=XoOjXtKBX6K1fz-zOXcdVo3FpjuxCMnJZh2LQcYXb_0,2726
45
45
  keras_hub/src/models/audio_to_text_preprocessor.py,sha256=GS-WWyJ6aSsPRxi_0bxvxA00h2mT2FEwSdAoQXAUYVI,3249
46
46
  keras_hub/src/models/backbone.py,sha256=BdqPsne7lIITIxn6jY6AN4vZ-Rc9VnpqTxvVNR3CS7M,12210
47
- keras_hub/src/models/causal_lm.py,sha256=x86PTAzoBpAdJyenPRNNBAkazUjcRLr4wb2hMs5SrQ0,18344
47
+ keras_hub/src/models/causal_lm.py,sha256=3WVFo9WAd4ZDu-62X98JMoUw1bqfTlVR0lQPb6vmr1g,18989
48
48
  keras_hub/src/models/causal_lm_preprocessor.py,sha256=nxl-sfmCfkfl6JmVRASa878QbaZUgWSA6Jdu48x4-dY,7155
49
49
  keras_hub/src/models/depth_estimator.py,sha256=JR7wtunOPrfEoDkLspoZnL2ItWhZFDeAxxw2vue5QLs,8992
50
50
  keras_hub/src/models/depth_estimator_preprocessor.py,sha256=2iE8NAUyiD2AvjZwNoXKUaOUogcE1fRzTNXLQ75GZpQ,2822
@@ -55,7 +55,7 @@ keras_hub/src/models/image_segmenter.py,sha256=C1bzIO59pG58iist5GLn_qnlotDpcAVxP
55
55
  keras_hub/src/models/image_segmenter_preprocessor.py,sha256=d7I2Hk0SKWyKpjRS6WYccmh_CYQBpWoj0JF5RRrU6rw,3748
56
56
  keras_hub/src/models/image_to_image.py,sha256=nblRd-16n5_JxKIH6IJU7bHTFRGxyCpKUilg6VjWuek,16933
57
57
  keras_hub/src/models/inpaint.py,sha256=oqdj0Q9dNG54g6sNQ5foto8saPd5Sx8kYZuHCZPBqrY,20995
58
- keras_hub/src/models/masked_lm.py,sha256=uXO_dE_hILlOC9jNr6oK6IHi9IGUqLyNGvr6nMt8Rk0,3576
58
+ keras_hub/src/models/masked_lm.py,sha256=EPDHfMxyO_pciKuLooR6T5XYRcfvsqgfDXumXkA41No,4221
59
59
  keras_hub/src/models/masked_lm_preprocessor.py,sha256=g8vrnyYwqdnSw5xppROM1Gzo_jmMWKYZoQCsKdfrFKk,5656
60
60
  keras_hub/src/models/object_detector.py,sha256=oAK42fFBKuN0G_WM-DhygFkgQ0KsEwU_ZiU4umHywqc,3757
61
61
  keras_hub/src/models/object_detector_preprocessor.py,sha256=kOSVRNFAg-UjtrCEVBdHXUFyJy7kQtlVuGnZ1aLEfOk,2664
@@ -174,8 +174,13 @@ keras_hub/src/models/depth_anything/interpolate.py,sha256=qwrPGP6wA4jZ-XcSeulhky
174
174
  keras_hub/src/models/dinov2/__init__.py,sha256=qacZi82EfAloVND4gDLZjqgR5_yVdz_dc4mMKyCsjOA,257
175
175
  keras_hub/src/models/dinov2/dinov2_backbone.py,sha256=QH3lzE1EnxTcOSii9KS1Qx3lq0XcZMsvElB7AL_ejZY,10672
176
176
  keras_hub/src/models/dinov2/dinov2_image_converter.py,sha256=gfFROdYV5rOzo3kJFlRvRHYjek8z9YirKfrFwlVJO3g,342
177
- keras_hub/src/models/dinov2/dinov2_layers.py,sha256=UCcia2kWA1O37SMmUbyhUcSXmUpLfNjk1E6mPTPDrF0,33647
177
+ keras_hub/src/models/dinov2/dinov2_layers.py,sha256=wo80Re043Gjly-XE-sT01QAYq3h793zhmU-Nb6SFN4g,33702
178
178
  keras_hub/src/models/dinov2/dinov2_presets.py,sha256=ho493GPH98K4LH1E54UV2qZZ4h7Un9ylbBmMQjNoKh4,2937
179
+ keras_hub/src/models/dinov3/__init__.py,sha256=AI7vTZJBG6Ygb48o6pXtHzxKk0Rek3p7-HffD-Y48cc,257
180
+ keras_hub/src/models/dinov3/dinov3_backbone.py,sha256=WDHipJSG10seRzYG_hARifF52wqhj9enkhuZ6mgJmjw,10511
181
+ keras_hub/src/models/dinov3/dinov3_image_converter.py,sha256=_oHDcI2CoxjbSLxLfkK1zEPcf4Goy0S66igmrXt58cQ,342
182
+ keras_hub/src/models/dinov3/dinov3_layers.py,sha256=w5K2btblrgrULqzPQdbvtkyR5Px2UZkqcZQ7jq2K3Uk,37169
183
+ keras_hub/src/models/dinov3/dinov3_presets.py,sha256=oAAhMFbBMPmhtoDj3DMZz9zAG1DVSrf-xw0czoPwOEc,3148
179
184
  keras_hub/src/models/distil_bert/__init__.py,sha256=3Z0w-Mt3aOR0u9RGzjHQ7B3J3qBF2pGjupDGQ9yyzoc,303
180
185
  keras_hub/src/models/distil_bert/distil_bert_backbone.py,sha256=rnAf_GokB3wAeJwVZtgUKQO_bKJIa8RavhL_ykTJpNw,6440
181
186
  keras_hub/src/models/distil_bert/distil_bert_masked_lm.py,sha256=axeZd5UcxFr3_Q8H4yG10CINh93wbcyjlPLauqe5N9E,4289
@@ -233,7 +238,7 @@ keras_hub/src/models/flux/flux_text_to_image_preprocessor.py,sha256=2kI2vSZvTia5
233
238
  keras_hub/src/models/gemma/__init__.py,sha256=rVzOJMJ39bgVlT8UdC0t8PlN2c237GKTBmfHIsbPuOQ,251
234
239
  keras_hub/src/models/gemma/gemma_attention.py,sha256=wmU5FgQu1Ajg-KHKVXTLHWH7pXqN4_zVJTCp_FXMcAs,10095
235
240
  keras_hub/src/models/gemma/gemma_backbone.py,sha256=pAAVaVKB6nlA0PncVnFXvNgJV7SeZy_ko2AxoIs0jF0,13364
236
- keras_hub/src/models/gemma/gemma_causal_lm.py,sha256=3OXaIXlrKqMIuUnBk-bUz-0SYFL-XkkQTWm8qRY2YII,16770
241
+ keras_hub/src/models/gemma/gemma_causal_lm.py,sha256=5sOXph25bfwlz-o4MYuWU1BN8yTcj5l0R-x6y0sFams,17315
237
242
  keras_hub/src/models/gemma/gemma_causal_lm_preprocessor.py,sha256=bpKkEurWIfa6Kp9s4pz84-sBDSA6ZFNHP8nXG1fFQrg,2912
238
243
  keras_hub/src/models/gemma/gemma_decoder_block.py,sha256=f5UsRO-VNsKJfm_WHVJWK4UahhzYm3sKprJ8jjr-zm4,7628
239
244
  keras_hub/src/models/gemma/gemma_presets.py,sha256=wAH7mjz9tbQqqdwajU2dilGytnWK1qc-aTIVLtjpTWg,8263
@@ -247,13 +252,13 @@ keras_hub/src/models/gemma3/gemma3_causal_lm_preprocessor.py,sha256=vjt4N-zr0Eb5
247
252
  keras_hub/src/models/gemma3/gemma3_decoder_block.py,sha256=CYwYazqwakLNfhOLBl_8Q2TVZcMcOxMtiZtuVlk_hoo,11470
248
253
  keras_hub/src/models/gemma3/gemma3_image_converter.py,sha256=czi5JrTyKiK0nFzvonviBIX8jjvLHqvGNA9RyheB31k,536
249
254
  keras_hub/src/models/gemma3/gemma3_interleave_embeddings.py,sha256=CfYdudk5En9iU6vEnrcrEWIztloD1r8VzF2extqAhAM,4616
250
- keras_hub/src/models/gemma3/gemma3_presets.py,sha256=FGAHYE4HTLuiceuiCKBJtc1aNd7OgMB59KD0s6Ba_Fg,6105
255
+ keras_hub/src/models/gemma3/gemma3_presets.py,sha256=1GZSwsGRA19RllhZPR-kFjH5y9A6308V3TYfqHAnXUw,7744
251
256
  keras_hub/src/models/gemma3/gemma3_tokenizer.py,sha256=ZaBclFIwzJkSXDuZMBQLHUKV8RWEdZ_dsJMvMcc3qXw,3215
252
257
  keras_hub/src/models/gemma3/gemma3_vision_encoder.py,sha256=7XI0oBjIfJItV5w90t5bWb3C2KzjhvDnIC7wjIq4Cns,20850
253
258
  keras_hub/src/models/gemma3/rms_normalization.py,sha256=fku-JEo2sNy-ytX7ySD1sRzdhRAPmYex_z8oFk1NiG8,833
254
259
  keras_hub/src/models/gpt2/__init__.py,sha256=_hqeljpBkW8DLABy4nKBzJxXUh29WIEW27obmDCiH5Q,245
255
260
  keras_hub/src/models/gpt2/gpt2_backbone.py,sha256=H1LgDd-bavrWtdCavdI519qlaruE2Jj5H3-SMc-5d14,6961
256
- keras_hub/src/models/gpt2/gpt2_causal_lm.py,sha256=ynAcvh0-WUmwMN7vgflau4LH4YRFLf986OYRZ3M2Znk,16765
261
+ keras_hub/src/models/gpt2/gpt2_causal_lm.py,sha256=i2veiYijPTsr6RqH5pR93uDw7rU9otQttdFBuVUxA80,17380
257
262
  keras_hub/src/models/gpt2/gpt2_causal_lm_preprocessor.py,sha256=3AD1LBFJ-u6bDdrwKa1LbINlEblZkhwB2sMJx-XEUZk,2992
258
263
  keras_hub/src/models/gpt2/gpt2_preprocessor.py,sha256=eYMIXw8Oebsr14GhqBh1CEhbLbIK3WnLUxaXj25fFpQ,3179
259
264
  keras_hub/src/models/gpt2/gpt2_presets.py,sha256=1mflR1dVuEwFfNe3Fkra6vt7DrjmkAckjyP-LclNLFc,1897
@@ -392,7 +397,7 @@ keras_hub/src/models/qwen3/qwen3_causal_lm.py,sha256=cn_4WFVxhlOArtIGAaqkNzIz9Rx
392
397
  keras_hub/src/models/qwen3/qwen3_causal_lm_preprocessor.py,sha256=H4g-bgvuhAUnDwjJovydK16Kes38ZFZWPvflrgHqZis,458
393
398
  keras_hub/src/models/qwen3/qwen3_decoder.py,sha256=68s9jQj53zFmXE4-SGXKYHu546fXOyi9LUbnKk-HGYY,11595
394
399
  keras_hub/src/models/qwen3/qwen3_layernorm.py,sha256=EJxjf7Pr6ufPQnNeuYQxkExzPjPk4PQxqMsoBeSEkDo,1073
395
- keras_hub/src/models/qwen3/qwen3_presets.py,sha256=eAqRbjLyRTSXcN-jnGHqoCHejKm2gmt8_zL4EPoE-JA,2518
400
+ keras_hub/src/models/qwen3/qwen3_presets.py,sha256=3ml8rh9dmHCt_TP85xSmkoOxEd5cx5UJ1ts1yB5tTLo,3922
396
401
  keras_hub/src/models/qwen3/qwen3_tokenizer.py,sha256=LmPtg0vprMchDvYfTj8m5PraXI2QS3-YgdIIpIm5iAs,1448
397
402
  keras_hub/src/models/qwen3_moe/__init__.py,sha256=0jp5BHZ8O8cCrp4g6VWWDUwB5_fSDXvCVCSf6Q0UB6o,273
398
403
  keras_hub/src/models/qwen3_moe/qwen3_moe_attention.py,sha256=rZnzWA-cAhuWSuHSJfrNqf5_Cu0PNEe7PKbPNbhJdeM,13355
@@ -467,14 +472,16 @@ keras_hub/src/models/siglip/siglip_image_converter.py,sha256=yjYc0XOyL37WLlr-X6V
467
472
  keras_hub/src/models/siglip/siglip_layers.py,sha256=c20n6v3cFsI-Im9GBVTknhj_IpX79I4a-fajBKRMzQA,19893
468
473
  keras_hub/src/models/siglip/siglip_loss.py,sha256=n6zmOeL0o7Nwb5iaoEZfrxiAsQoqZ9yLIlaCJsAfTg4,1442
469
474
  keras_hub/src/models/siglip/siglip_preprocessor.py,sha256=r1Ej7hVwr5BudFYTHkjW5yc3lk4OYZD1s3t32lKkuec,5660
470
- keras_hub/src/models/siglip/siglip_presets.py,sha256=gOzSVhLskAthfzq8jWOtQWv14euaqS2ywcZlNfivDOI,13164
475
+ keras_hub/src/models/siglip/siglip_presets.py,sha256=jtIQrNcq14othG1QgwBEfozEmoHdXXW270qylQEmA8E,13864
471
476
  keras_hub/src/models/siglip/siglip_text_encoder.py,sha256=xOVvzyQHLX9ne30y4ussar99gNMXPXHYKlkbCX_On2Y,5380
472
477
  keras_hub/src/models/siglip/siglip_tokenizer.py,sha256=j_67JbIHJDRk-CbiemG2dgAO6lp3_0_JdnfroZ90G18,2579
473
478
  keras_hub/src/models/siglip/siglip_vision_encoder.py,sha256=CaNaFq5thBC3TUXXOf2qknk5vWsauM20ZoaDPYRnXcs,5927
479
+ keras_hub/src/models/smollm3/__init__.py,sha256=_svUTRF_mnFqe2cPP03CASKbDes6FvI76y_3TEHaxes,263
474
480
  keras_hub/src/models/smollm3/smollm3_backbone.py,sha256=9e8ydwy7X0stVEWgIJYt69vt6JYSCiYpM73w6oLxyoQ,7949
475
481
  keras_hub/src/models/smollm3/smollm3_causal_lm.py,sha256=PWn2zPu0YS3uRvmjksmXKXpxehl8lvEHAXaO0u7nweM,12641
476
482
  keras_hub/src/models/smollm3/smollm3_causal_lm_preprocessor.py,sha256=gbj7IhDbgA30AM80uG6BeI1yZmGd5yQ2VEaPWWyS9M4,3121
477
483
  keras_hub/src/models/smollm3/smollm3_layers.py,sha256=lR33IynX-1G42L3hNzOBnnIx-INOzJguSQDAwIPaSIQ,26632
484
+ keras_hub/src/models/smollm3/smollm3_presets.py,sha256=-DRT6uUEa3b7KFpgUmAToh-kXKfyGiNrxAMz-0R8R6E,499
478
485
  keras_hub/src/models/smollm3/smollm3_tokenizer.py,sha256=evOVM8pgZUkWLoXAwWiYRSNNFZ7KBv1WtFdLqpHdCQU,1877
479
486
  keras_hub/src/models/smollm3/smollm3_utils.py,sha256=zAqtZTv1G--k-Dbjvk886OcmsuIxyYicRiUQXcpyud0,1904
480
487
  keras_hub/src/models/stable_diffusion_3/__init__.py,sha256=ZKYQuaRObyhKq8GVAHmoRvlXp6FpU8ChvutVCHyXKuc,343
@@ -578,7 +585,7 @@ keras_hub/src/utils/openvino_utils.py,sha256=P1ZvedLv91LZD-UAgAo2dy6WC5305elh1qv
578
585
  keras_hub/src/utils/pipeline_model.py,sha256=jgzB6NQPSl0KOu08N-TazfOnXnUJbZjH2EXXhx25Ftg,9084
579
586
  keras_hub/src/utils/preset_utils.py,sha256=lyCg_PRcYH1Jy8lGKaO8sgpIbMrP-Ik66EbjGD4gizc,37677
580
587
  keras_hub/src/utils/python_utils.py,sha256=N8nWeO3san4YnGkffRXG3Ix7VEIMTKSN21FX5TuL7G8,202
581
- keras_hub/src/utils/tensor_utils.py,sha256=bGM0pK-x0R4640emul49GfSZ3p4OSvOaVzZZPlm6eiM,16957
588
+ keras_hub/src/utils/tensor_utils.py,sha256=tULr53SZLCczN_BD7XvbAq9c9bFVZTn7aYcLbqmbfx8,16982
582
589
  keras_hub/src/utils/coco/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
583
590
  keras_hub/src/utils/coco/coco_utils.py,sha256=x_QnUUvZ92zoFzMJugiInHORc4NrMdWVBkpp8BAYF6s,2586
584
591
  keras_hub/src/utils/imagenet/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -598,6 +605,7 @@ keras_hub/src/utils/transformers/convert_bart.py,sha256=Tk4h9Md9rwN5wjQbGIVrC7qz
598
605
  keras_hub/src/utils/transformers/convert_bert.py,sha256=4gQqXCJzC9QWdLPDUAq741K8t_kjPIET050YjUnLeDA,5977
599
606
  keras_hub/src/utils/transformers/convert_deit.py,sha256=ubcqYzMlhWTCE2S_TsXICCMmqjN9RsQPaw_70vArnjo,5306
600
607
  keras_hub/src/utils/transformers/convert_dinov2.py,sha256=Zmxz33hKJCcykQOcW8XhG_Yy1l8XqIYam1cjzM69-Mk,6986
608
+ keras_hub/src/utils/transformers/convert_dinov3.py,sha256=rZqowTASKSAQQ1HrwlD9_tY7VAQHY_C4_61ky5wUbvE,4448
601
609
  keras_hub/src/utils/transformers/convert_distilbert.py,sha256=SlfIRhSRk5c1ir2HGiDPiXa5XdOId_DbcnZO9lbwyZ8,6498
602
610
  keras_hub/src/utils/transformers/convert_esm.py,sha256=rOgGnNY37ZbYnoVC3L-Y-yGGAxTRmYtQV0nJoandH2Y,6214
603
611
  keras_hub/src/utils/transformers/convert_gemma.py,sha256=ElCgwBpSN5Q7rV5PJawTsoytPzs5ZjuwoY60YAe8y_A,6533
@@ -613,13 +621,13 @@ keras_hub/src/utils/transformers/convert_qwen_moe.py,sha256=a7R28aln-PdAcNuKAXdr
613
621
  keras_hub/src/utils/transformers/convert_smollm3.py,sha256=V2vWES85YSNXNx39I8OwAcOvSpb9KxUscrDr7ra-LPA,5281
614
622
  keras_hub/src/utils/transformers/convert_t5gemma.py,sha256=DPOwd61UhjspKuCsk3_EaNvSADGP_f8KLcZARHYVk5Y,9490
615
623
  keras_hub/src/utils/transformers/convert_vit.py,sha256=YAmXh519ecSgEO5B4g-aEQg1Bb_6ifFafLMqDTfLn_c,5259
616
- keras_hub/src/utils/transformers/preset_loader.py,sha256=PmB4wcPaMlqMhrhk2bYt74TvRHRsZgpodfKlzixfr-Q,5219
624
+ keras_hub/src/utils/transformers/preset_loader.py,sha256=alzuIEhDI6gLpEw05wPJVbOJ2LhwmLB_s7JhDqkb4ec,5364
617
625
  keras_hub/src/utils/transformers/safetensor_utils.py,sha256=CYUHyA4y-B61r7NDnCsFb4t_UmSwZ1k9L-8gzEd6KRg,3339
618
626
  keras_hub/src/utils/transformers/export/gemma.py,sha256=xX_vfQwvFZ_-lQX4kgMNOGKL7fL_1yk7QyGYV2Qyly4,4699
619
627
  keras_hub/src/utils/transformers/export/hf_exporter.py,sha256=Qk52c6LIA2eMHUNY9Vy4STJSpnhLMdJ_t-3ljqhSr4k,5081
620
628
  keras_hub/tokenizers/__init__.py,sha256=XFOxDmM1Mz9TxiE8ICZK_-yTTyRFywUUiVwRIzz2QZ8,4770
621
629
  keras_hub/utils/__init__.py,sha256=jXPqVGBpJr_PpYmqD8aDG-fRMlxH-ulqCR2SZMn288Y,646
622
- keras_hub-0.23.0.dev0.dist-info/METADATA,sha256=pcenvkHtaEmmEZfHgBiloyOMR3yiJd2rMw4s-Gak0G8,7376
623
- keras_hub-0.23.0.dev0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
624
- keras_hub-0.23.0.dev0.dist-info/top_level.txt,sha256=N4J6piIWBKa38A4uV-CnIopnOEf8mHAbkNXafXm_CuA,10
625
- keras_hub-0.23.0.dev0.dist-info/RECORD,,
630
+ keras_hub-0.24.0.dist-info/METADATA,sha256=ATsBvUOnq3XQufDS9C6QhAoNdujNzuHukNQdsxb3dVE,7371
631
+ keras_hub-0.24.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
632
+ keras_hub-0.24.0.dist-info/top_level.txt,sha256=N4J6piIWBKa38A4uV-CnIopnOEf8mHAbkNXafXm_CuA,10
633
+ keras_hub-0.24.0.dist-info/RECORD,,