keras-hub 0.21.1.dev0__py3-none-any.whl → 0.22.0.dev0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- keras_hub/layers/__init__.py +9 -0
- keras_hub/models/__init__.py +47 -0
- keras_hub/src/layers/modeling/transformer_encoder.py +6 -3
- keras_hub/src/layers/preprocessing/multi_segment_packer.py +17 -3
- keras_hub/src/layers/preprocessing/start_end_packer.py +24 -6
- keras_hub/src/models/backbone.py +13 -10
- keras_hub/src/models/clip/clip_backbone.py +3 -102
- keras_hub/src/models/clip/clip_layers.py +295 -0
- keras_hub/src/models/clip/clip_preprocessor.py +57 -48
- keras_hub/src/models/clip/clip_text_encoder.py +2 -2
- keras_hub/src/models/clip/clip_vision_encoder.py +3 -3
- keras_hub/src/models/deit/__init__.py +5 -0
- keras_hub/src/models/deit/deit_backbone.py +154 -0
- keras_hub/src/models/deit/deit_image_classifier.py +171 -0
- keras_hub/src/models/deit/deit_image_classifier_preprocessor.py +12 -0
- keras_hub/src/models/deit/deit_image_converter.py +8 -0
- keras_hub/src/models/deit/deit_layers.py +519 -0
- keras_hub/src/models/deit/deit_presets.py +49 -0
- keras_hub/src/models/dinov2/__init__.py +5 -0
- keras_hub/src/models/dinov2/dinov2_backbone.py +228 -0
- keras_hub/src/models/dinov2/dinov2_image_converter.py +8 -0
- keras_hub/src/models/dinov2/dinov2_layers.py +886 -0
- keras_hub/src/models/dinov2/dinov2_presets.py +89 -0
- keras_hub/src/models/esm/__init__.py +5 -0
- keras_hub/src/models/esm/esm_attention.py +95 -0
- keras_hub/src/models/esm/esm_backbone.py +229 -0
- keras_hub/src/models/esm/esm_classifier.py +184 -0
- keras_hub/src/models/esm/esm_classifier_preprocessor.py +135 -0
- keras_hub/src/models/esm/esm_encoder.py +134 -0
- keras_hub/src/models/esm/esm_masked_plm.py +117 -0
- keras_hub/src/models/esm/esm_masked_plm_preprocessor.py +143 -0
- keras_hub/src/models/esm/esm_presets.py +53 -0
- keras_hub/src/models/esm/esm_tokenizer.py +82 -0
- keras_hub/src/models/flux/flux_text_to_image_preprocessor.py +6 -2
- keras_hub/src/models/gemma/gemma_attention.py +1 -1
- keras_hub/src/models/gemma3/gemma3_backbone.py +2 -2
- keras_hub/src/models/gemma3/gemma3_interleave_embeddings.py +1 -1
- keras_hub/src/models/hgnetv2/__init__.py +5 -0
- keras_hub/src/models/hgnetv2/hgnetv2_backbone.py +193 -0
- keras_hub/src/models/hgnetv2/hgnetv2_encoder.py +148 -0
- keras_hub/src/models/hgnetv2/hgnetv2_image_classifier.py +216 -0
- keras_hub/src/models/hgnetv2/hgnetv2_image_classifier_preprocessor.py +14 -0
- keras_hub/src/models/hgnetv2/hgnetv2_image_converter.py +8 -0
- keras_hub/src/models/hgnetv2/hgnetv2_layers.py +918 -0
- keras_hub/src/models/hgnetv2/hgnetv2_presets.py +58 -0
- keras_hub/src/models/llama3/llama3_presets.py +3 -3
- keras_hub/src/models/mistral/mistral_presets.py +17 -1
- keras_hub/src/models/mixtral/mixtral_presets.py +2 -2
- keras_hub/src/models/mobilenet/mobilenet_presets.py +4 -4
- keras_hub/src/models/pali_gemma/pali_gemma_backbone.py +2 -2
- keras_hub/src/models/pali_gemma/pali_gemma_causal_lm.py +2 -2
- keras_hub/src/models/pali_gemma/pali_gemma_presets.py +17 -17
- keras_hub/src/models/qwen3/__init__.py +5 -0
- keras_hub/src/models/qwen3/qwen3_attention.py +369 -0
- keras_hub/src/models/qwen3/qwen3_backbone.py +191 -0
- keras_hub/src/models/qwen3/qwen3_causal_lm.py +390 -0
- keras_hub/src/models/qwen3/qwen3_causal_lm_preprocessor.py +10 -0
- keras_hub/src/models/qwen3/qwen3_decoder.py +309 -0
- keras_hub/src/models/qwen3/qwen3_layernorm.py +38 -0
- keras_hub/src/models/qwen3/qwen3_presets.py +73 -0
- keras_hub/src/models/qwen3/qwen3_tokenizer.py +48 -0
- keras_hub/src/models/qwen_moe/qwen_moe_attention.py +1 -0
- keras_hub/src/models/qwen_moe/qwen_moe_presets.py +2 -2
- keras_hub/src/models/roformer_v2/roformer_v2_attention.py +0 -2
- keras_hub/src/models/stable_diffusion_3/flow_match_euler_discrete_scheduler.py +16 -7
- keras_hub/src/models/stable_diffusion_3/mmdit.py +61 -4
- keras_hub/src/models/stable_diffusion_3/stable_diffusion_3_backbone.py +31 -32
- keras_hub/src/models/stable_diffusion_3/stable_diffusion_3_image_to_image.py +1 -0
- keras_hub/src/models/stable_diffusion_3/stable_diffusion_3_inpaint.py +1 -0
- keras_hub/src/models/stable_diffusion_3/stable_diffusion_3_text_to_image.py +1 -0
- keras_hub/src/models/stable_diffusion_3/stable_diffusion_3_text_to_image_preprocessor.py +6 -2
- keras_hub/src/models/vit/vit_backbone.py +31 -11
- keras_hub/src/models/vit/vit_image_converter.py +0 -70
- keras_hub/src/models/vit/vit_layers.py +33 -18
- keras_hub/src/models/vit/vit_presets.py +11 -11
- keras_hub/src/utils/keras_utils.py +17 -0
- keras_hub/src/utils/preset_utils.py +19 -4
- keras_hub/src/utils/tensor_utils.py +14 -0
- keras_hub/src/utils/transformers/convert_deit.py +155 -0
- keras_hub/src/utils/transformers/convert_dinov2.py +180 -0
- keras_hub/src/utils/transformers/convert_esm.py +159 -0
- keras_hub/src/utils/transformers/convert_llama3.py +6 -0
- keras_hub/src/utils/transformers/convert_qwen3.py +145 -0
- keras_hub/src/utils/transformers/export/gemma.py +89 -0
- keras_hub/src/utils/transformers/export/hf_exporter.py +98 -0
- keras_hub/src/utils/transformers/preset_loader.py +14 -2
- keras_hub/src/version.py +1 -1
- keras_hub/tokenizers/__init__.py +1 -0
- {keras_hub-0.21.1.dev0.dist-info → keras_hub-0.22.0.dev0.dist-info}/METADATA +4 -4
- {keras_hub-0.21.1.dev0.dist-info → keras_hub-0.22.0.dev0.dist-info}/RECORD +92 -48
- keras_hub/src/models/clip/clip_encoder_block.py +0 -111
- keras_hub/src/models/clip/clip_vision_embedding.py +0 -101
- {keras_hub-0.21.1.dev0.dist-info → keras_hub-0.22.0.dev0.dist-info}/WHEEL +0 -0
- {keras_hub-0.21.1.dev0.dist-info → keras_hub-0.22.0.dev0.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,89 @@
|
|
|
1
|
+
import keras.ops as ops
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
def get_gemma_config(backbone):
|
|
5
|
+
hf_config = {
|
|
6
|
+
"vocab_size": backbone.vocabulary_size,
|
|
7
|
+
"num_hidden_layers": backbone.num_layers,
|
|
8
|
+
"num_attention_heads": backbone.num_query_heads,
|
|
9
|
+
"num_key_value_heads": backbone.num_key_value_heads,
|
|
10
|
+
"hidden_size": backbone.hidden_dim,
|
|
11
|
+
"intermediate_size": backbone.intermediate_dim // 2,
|
|
12
|
+
"head_dim": backbone.head_dim,
|
|
13
|
+
"max_position_embeddings": 8192,
|
|
14
|
+
}
|
|
15
|
+
return hf_config
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
def get_gemma_weights_map(backbone):
|
|
19
|
+
weights_dict = {}
|
|
20
|
+
|
|
21
|
+
# Map token embedding
|
|
22
|
+
token_embedding_layer = backbone.get_layer("token_embedding")
|
|
23
|
+
weights_dict["model.embed_tokens.weight"] = token_embedding_layer.weights[0]
|
|
24
|
+
|
|
25
|
+
for i in range(backbone.num_layers):
|
|
26
|
+
decoder_layer = backbone.get_layer(f"decoder_block_{i}")
|
|
27
|
+
|
|
28
|
+
# Pre-attention normalization
|
|
29
|
+
weights_dict[f"model.layers.{i}.input_layernorm.weight"] = (
|
|
30
|
+
decoder_layer.pre_attention_norm.weights[0]
|
|
31
|
+
)
|
|
32
|
+
|
|
33
|
+
# Attention query projection
|
|
34
|
+
query_kernel = decoder_layer.attention.query_dense.weights[0]
|
|
35
|
+
query_kernel = ops.transpose(query_kernel, axes=(1, 0, 2))
|
|
36
|
+
query_kernel = ops.reshape(query_kernel, (-1, backbone.hidden_dim))
|
|
37
|
+
query_kernel = ops.transpose(query_kernel)
|
|
38
|
+
weights_dict[f"model.layers.{i}.self_attn.q_proj.weight"] = query_kernel
|
|
39
|
+
|
|
40
|
+
# Attention key projection
|
|
41
|
+
key_kernel = decoder_layer.attention.key_dense.weights[0][0]
|
|
42
|
+
weights_dict[f"model.layers.{i}.self_attn.k_proj.weight"] = (
|
|
43
|
+
ops.transpose(key_kernel)
|
|
44
|
+
)
|
|
45
|
+
|
|
46
|
+
# Attention value projection
|
|
47
|
+
value_kernel = decoder_layer.attention.value_dense.weights[0][0]
|
|
48
|
+
weights_dict[f"model.layers.{i}.self_attn.v_proj.weight"] = (
|
|
49
|
+
ops.transpose(value_kernel)
|
|
50
|
+
)
|
|
51
|
+
|
|
52
|
+
# Attention output projection
|
|
53
|
+
out_kernel = decoder_layer.attention.output_dense.weights[0]
|
|
54
|
+
out_kernel = ops.transpose(out_kernel, axes=(2, 0, 1))
|
|
55
|
+
out_kernel = ops.reshape(out_kernel, (backbone.hidden_dim, -1))
|
|
56
|
+
weights_dict[f"model.layers.{i}.self_attn.o_proj.weight"] = out_kernel
|
|
57
|
+
|
|
58
|
+
# Post-attention normalization
|
|
59
|
+
weights_dict[f"model.layers.{i}.post_attention_layernorm.weight"] = (
|
|
60
|
+
decoder_layer.pre_ffw_norm.weights[0]
|
|
61
|
+
)
|
|
62
|
+
|
|
63
|
+
# MLP gate projection
|
|
64
|
+
gate_kernel = decoder_layer.gating_ffw.weights[0]
|
|
65
|
+
weights_dict[f"model.layers.{i}.mlp.gate_proj.weight"] = ops.transpose(
|
|
66
|
+
gate_kernel
|
|
67
|
+
)
|
|
68
|
+
|
|
69
|
+
# MLP up projection
|
|
70
|
+
up_kernel = decoder_layer.gating_ffw_2.weights[0]
|
|
71
|
+
weights_dict[f"model.layers.{i}.mlp.up_proj.weight"] = ops.transpose(
|
|
72
|
+
up_kernel
|
|
73
|
+
)
|
|
74
|
+
|
|
75
|
+
# MLP down projection
|
|
76
|
+
down_kernel = decoder_layer.ffw_linear.weights[0]
|
|
77
|
+
weights_dict[f"model.layers.{i}.mlp.down_proj.weight"] = ops.transpose(
|
|
78
|
+
down_kernel
|
|
79
|
+
)
|
|
80
|
+
|
|
81
|
+
# Map final normalization
|
|
82
|
+
weights_dict["model.norm.weight"] = backbone.get_layer(
|
|
83
|
+
"final_normalization"
|
|
84
|
+
).weights[0]
|
|
85
|
+
|
|
86
|
+
# Tie weights, but clone to avoid sharing memory issues
|
|
87
|
+
weights_dict["lm_head.weight"] = ops.copy(token_embedding_layer.weights[0])
|
|
88
|
+
|
|
89
|
+
return weights_dict
|
|
@@ -0,0 +1,98 @@
|
|
|
1
|
+
import json
|
|
2
|
+
import os
|
|
3
|
+
import shutil
|
|
4
|
+
import warnings
|
|
5
|
+
|
|
6
|
+
import keras
|
|
7
|
+
|
|
8
|
+
from keras_hub.src.utils.transformers.export.gemma import get_gemma_config
|
|
9
|
+
from keras_hub.src.utils.transformers.export.gemma import get_gemma_weights_map
|
|
10
|
+
|
|
11
|
+
MODEL_CONFIGS = {
|
|
12
|
+
"GemmaBackbone": get_gemma_config,
|
|
13
|
+
# Add future models here, e.g., "LlamaBackbone": get_llama_config,
|
|
14
|
+
}
|
|
15
|
+
|
|
16
|
+
MODEL_EXPORTERS = {
|
|
17
|
+
"GemmaBackbone": get_gemma_weights_map,
|
|
18
|
+
# Add future models here, e.g., "LlamaBackbone": get_llama_weights_map,
|
|
19
|
+
}
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
def export_to_safetensors(keras_model, path):
|
|
23
|
+
"""Converts a Keras model to Hugging Face safetensor format.
|
|
24
|
+
|
|
25
|
+
It does the following:
|
|
26
|
+
- Extracts and maps weights from the Keras backbone to safetensors.
|
|
27
|
+
- Saves the configuration as 'config.json'.
|
|
28
|
+
- Saves weights in 'model.safetensors'.
|
|
29
|
+
- Saves tokenizer assets.
|
|
30
|
+
|
|
31
|
+
Args:
|
|
32
|
+
keras_model: The Keras model to convert.
|
|
33
|
+
path: str. Path of the directory to which the safetensors file,
|
|
34
|
+
config and tokenizer will be saved.
|
|
35
|
+
"""
|
|
36
|
+
backend = keras.config.backend()
|
|
37
|
+
backbone = keras_model.backbone
|
|
38
|
+
model_type = backbone.__class__.__name__
|
|
39
|
+
|
|
40
|
+
if model_type not in MODEL_CONFIGS:
|
|
41
|
+
raise ValueError(f"Config not implemented for {model_type}")
|
|
42
|
+
|
|
43
|
+
if model_type not in MODEL_EXPORTERS:
|
|
44
|
+
raise ValueError(f"Exporter not implemented for {model_type}")
|
|
45
|
+
|
|
46
|
+
get_config_fn = MODEL_CONFIGS[model_type]
|
|
47
|
+
hf_config = get_config_fn(backbone)
|
|
48
|
+
|
|
49
|
+
get_weights_fn = MODEL_EXPORTERS[model_type]
|
|
50
|
+
weights_dict = get_weights_fn(backbone)
|
|
51
|
+
|
|
52
|
+
if not weights_dict:
|
|
53
|
+
raise ValueError("No weights to save.")
|
|
54
|
+
|
|
55
|
+
# Save config
|
|
56
|
+
os.makedirs(path, exist_ok=True)
|
|
57
|
+
config_path = os.path.join(path, "config.json")
|
|
58
|
+
with open(config_path, "w") as f:
|
|
59
|
+
json.dump(hf_config, f)
|
|
60
|
+
|
|
61
|
+
# Save weights based on backend
|
|
62
|
+
weights_path = os.path.join(path, "model.safetensors")
|
|
63
|
+
if backend == "torch":
|
|
64
|
+
from safetensors.torch import save_file
|
|
65
|
+
|
|
66
|
+
weights_dict_contiguous = {
|
|
67
|
+
k: v.value.contiguous() if hasattr(v, "value") else v.contiguous()
|
|
68
|
+
for k, v in weights_dict.items()
|
|
69
|
+
}
|
|
70
|
+
save_file(
|
|
71
|
+
weights_dict_contiguous, weights_path, metadata={"format": "pt"}
|
|
72
|
+
)
|
|
73
|
+
elif backend == "tensorflow":
|
|
74
|
+
from safetensors.tensorflow import save_file
|
|
75
|
+
|
|
76
|
+
save_file(weights_dict, weights_path, metadata={"format": "pt"})
|
|
77
|
+
elif backend == "jax":
|
|
78
|
+
from safetensors.flax import save_file
|
|
79
|
+
|
|
80
|
+
save_file(weights_dict, weights_path, metadata={"format": "pt"})
|
|
81
|
+
else:
|
|
82
|
+
raise ValueError(f"Unsupported backend: {backend}")
|
|
83
|
+
|
|
84
|
+
# Save tokenizer assets
|
|
85
|
+
keras_model.preprocessor.tokenizer.save_assets(path)
|
|
86
|
+
|
|
87
|
+
# Rename vocabulary file
|
|
88
|
+
vocab_spm_path = os.path.join(path, "vocabulary.spm")
|
|
89
|
+
tokenizer_model_path = os.path.join(path, "tokenizer.model")
|
|
90
|
+
if os.path.exists(vocab_spm_path):
|
|
91
|
+
shutil.move(vocab_spm_path, tokenizer_model_path)
|
|
92
|
+
else:
|
|
93
|
+
warnings.warn(
|
|
94
|
+
f"{vocab_spm_path} not found. Tokenizer may not load "
|
|
95
|
+
"correctly. Ensure that the tokenizer configuration "
|
|
96
|
+
"is correct and that the vocabulary file is present "
|
|
97
|
+
"in the original model."
|
|
98
|
+
)
|
|
@@ -6,7 +6,10 @@ from keras_hub.src.utils.preset_utils import jax_memory_cleanup
|
|
|
6
6
|
from keras_hub.src.utils.transformers import convert_albert
|
|
7
7
|
from keras_hub.src.utils.transformers import convert_bart
|
|
8
8
|
from keras_hub.src.utils.transformers import convert_bert
|
|
9
|
+
from keras_hub.src.utils.transformers import convert_deit
|
|
10
|
+
from keras_hub.src.utils.transformers import convert_dinov2
|
|
9
11
|
from keras_hub.src.utils.transformers import convert_distilbert
|
|
12
|
+
from keras_hub.src.utils.transformers import convert_esm
|
|
10
13
|
from keras_hub.src.utils.transformers import convert_gemma
|
|
11
14
|
from keras_hub.src.utils.transformers import convert_gpt2
|
|
12
15
|
from keras_hub.src.utils.transformers import convert_llama3
|
|
@@ -14,6 +17,7 @@ from keras_hub.src.utils.transformers import convert_mistral
|
|
|
14
17
|
from keras_hub.src.utils.transformers import convert_mixtral
|
|
15
18
|
from keras_hub.src.utils.transformers import convert_pali_gemma
|
|
16
19
|
from keras_hub.src.utils.transformers import convert_qwen
|
|
20
|
+
from keras_hub.src.utils.transformers import convert_qwen3
|
|
17
21
|
from keras_hub.src.utils.transformers import convert_qwen_moe
|
|
18
22
|
from keras_hub.src.utils.transformers import convert_vit
|
|
19
23
|
from keras_hub.src.utils.transformers.safetensor_utils import SafetensorLoader
|
|
@@ -29,9 +33,15 @@ class TransformersPresetLoader(PresetLoader):
|
|
|
29
33
|
self.converter = convert_bart
|
|
30
34
|
elif model_type == "bert":
|
|
31
35
|
self.converter = convert_bert
|
|
36
|
+
elif model_type == "deit":
|
|
37
|
+
self.converter = convert_deit
|
|
32
38
|
elif model_type == "distilbert":
|
|
33
39
|
self.converter = convert_distilbert
|
|
34
|
-
elif model_type
|
|
40
|
+
elif model_type in ("dinov2", "dinov2_with_registers"):
|
|
41
|
+
self.converter = convert_dinov2
|
|
42
|
+
elif model_type == "esm":
|
|
43
|
+
self.converter = convert_esm
|
|
44
|
+
elif model_type in ("gemma", "gemma2"):
|
|
35
45
|
self.converter = convert_gemma
|
|
36
46
|
elif model_type == "gpt2":
|
|
37
47
|
self.converter = convert_gpt2
|
|
@@ -50,6 +60,8 @@ class TransformersPresetLoader(PresetLoader):
|
|
|
50
60
|
self.converter = convert_mixtral
|
|
51
61
|
elif model_type == "qwen2_moe":
|
|
52
62
|
self.converter = convert_qwen_moe
|
|
63
|
+
elif model_type == "qwen3":
|
|
64
|
+
self.converter = convert_qwen3
|
|
53
65
|
else:
|
|
54
66
|
raise ValueError(
|
|
55
67
|
"KerasHub has no converter for huggingface/transformers models "
|
|
@@ -79,7 +91,7 @@ class TransformersPresetLoader(PresetLoader):
|
|
|
79
91
|
cls, load_weights, load_task_weights, **kwargs
|
|
80
92
|
)
|
|
81
93
|
# Support loading the classification head for classifier models.
|
|
82
|
-
if
|
|
94
|
+
if "ForImageClassification" in architecture:
|
|
83
95
|
kwargs["num_classes"] = len(self.config["id2label"])
|
|
84
96
|
task = super().load_task(cls, load_weights, load_task_weights, **kwargs)
|
|
85
97
|
if load_task_weights:
|
keras_hub/src/version.py
CHANGED
keras_hub/tokenizers/__init__.py
CHANGED
|
@@ -28,6 +28,7 @@ from keras_hub.src.models.distil_bert.distil_bert_tokenizer import (
|
|
|
28
28
|
from keras_hub.src.models.electra.electra_tokenizer import (
|
|
29
29
|
ElectraTokenizer as ElectraTokenizer,
|
|
30
30
|
)
|
|
31
|
+
from keras_hub.src.models.esm.esm_tokenizer import ESMTokenizer as ESMTokenizer
|
|
31
32
|
from keras_hub.src.models.f_net.f_net_tokenizer import (
|
|
32
33
|
FNetTokenizer as FNetTokenizer,
|
|
33
34
|
)
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: keras-hub
|
|
3
|
-
Version: 0.
|
|
3
|
+
Version: 0.22.0.dev0
|
|
4
4
|
Summary: Pretrained models for Keras.
|
|
5
5
|
Author-email: Keras team <keras-users@googlegroups.com>
|
|
6
6
|
License-Expression: Apache-2.0
|
|
@@ -18,9 +18,9 @@ Classifier: Operating System :: MacOS
|
|
|
18
18
|
Classifier: Intended Audience :: Science/Research
|
|
19
19
|
Classifier: Topic :: Scientific/Engineering
|
|
20
20
|
Classifier: Topic :: Software Development
|
|
21
|
-
Requires-Python: >=3.
|
|
21
|
+
Requires-Python: >=3.10
|
|
22
22
|
Description-Content-Type: text/markdown
|
|
23
|
-
Requires-Dist: keras>=3.
|
|
23
|
+
Requires-Dist: keras>=3.8
|
|
24
24
|
Requires-Dist: absl-py
|
|
25
25
|
Requires-Dist: numpy
|
|
26
26
|
Requires-Dist: packaging
|
|
@@ -31,7 +31,7 @@ Requires-Dist: tensorflow-text; platform_system != "Windows"
|
|
|
31
31
|
|
|
32
32
|
# KerasHub: Multi-framework Pretrained Models
|
|
33
33
|
[](https://github.com/keras-team/keras-hub/actions?query=workflow%3ATests+branch%3Amaster)
|
|
34
|
-

|
|
35
35
|
[](https://github.com/keras-team/keras-hub/issues)
|
|
36
36
|
|
|
37
37
|
> [!IMPORTANT]
|
|
@@ -1,11 +1,11 @@
|
|
|
1
1
|
keras_hub/__init__.py,sha256=bJbUZkqwhZvTb1Tqx1fbkq6mzBYiEyq-Hin3oQIkhdE,558
|
|
2
|
-
keras_hub/layers/__init__.py,sha256=
|
|
2
|
+
keras_hub/layers/__init__.py,sha256=SMkchjCbNydCBULOFC1pzZRaD-KWZ2CaH6CEVf1MRWE,5428
|
|
3
3
|
keras_hub/metrics/__init__.py,sha256=KYalsMPBnfwim9BdGHFfJ5WxUKFXOQ1QoKIMT_0lwlM,439
|
|
4
|
-
keras_hub/models/__init__.py,sha256=
|
|
4
|
+
keras_hub/models/__init__.py,sha256=UXMwKVZ7bg-AOrq2xsl8M0idUAS89pkdCvQKhzL-D3I,28439
|
|
5
5
|
keras_hub/samplers/__init__.py,sha256=aFQIkiqbZpi8vjrPp2MVII4QUfE-eQjra5fMeHsoy7k,886
|
|
6
6
|
keras_hub/src/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
7
7
|
keras_hub/src/api_export.py,sha256=9pQZK27JObxWZ96QPLBp1OBsjWigh1iuV6RglPGMRk0,1499
|
|
8
|
-
keras_hub/src/version.py,sha256=
|
|
8
|
+
keras_hub/src/version.py,sha256=dvS7_pZSOD1VISZuD4cqYlEHVG0T9alhzm5Og63Eg2g,211
|
|
9
9
|
keras_hub/src/layers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
10
10
|
keras_hub/src/layers/modeling/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
11
11
|
keras_hub/src/layers/modeling/alibi_bias.py,sha256=1XBTHI52L_iJDhN_w5ydu_iMhCuTgQAxEPwcLA6BPuk,4411
|
|
@@ -22,17 +22,17 @@ keras_hub/src/layers/modeling/rotary_embedding.py,sha256=BuMD2dCyZi73Eokddx8Q9cF
|
|
|
22
22
|
keras_hub/src/layers/modeling/sine_position_encoding.py,sha256=NAPW9HaVTMNZgUJNzA3l1B3C_FNvaY7IW-5tQgFgnNg,3453
|
|
23
23
|
keras_hub/src/layers/modeling/token_and_position_embedding.py,sha256=Q-MhVHZSd_W2eWjDCj-s7wo3z8UHmgZ-7j7hElkaXBQ,5263
|
|
24
24
|
keras_hub/src/layers/modeling/transformer_decoder.py,sha256=50KLxaZwaQglWIcFotx3BFh6RwCMXRvGZNXHQBrJ5KM,21172
|
|
25
|
-
keras_hub/src/layers/modeling/transformer_encoder.py,sha256=
|
|
25
|
+
keras_hub/src/layers/modeling/transformer_encoder.py,sha256=kKPGfjpdhqGJs4MmRyx7fk9xU_2TAS-gLGhq9FZdU0w,10828
|
|
26
26
|
keras_hub/src/layers/modeling/transformer_layer_utils.py,sha256=FuznrW33iG50B-VDN8R1RjuA5JG72yNMJ1TBgWLxR0E,3487
|
|
27
27
|
keras_hub/src/layers/preprocessing/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
28
28
|
keras_hub/src/layers/preprocessing/audio_converter.py,sha256=YGh_kQw65a1Z6S5zzSNVP-ChyLYHq3-eOYpOS53xIN8,4156
|
|
29
29
|
keras_hub/src/layers/preprocessing/image_converter.py,sha256=p2CoSV_zfHIVZqLo1hQk2BdOL_RtBlr5wUtgpAmtwwY,15926
|
|
30
30
|
keras_hub/src/layers/preprocessing/masked_lm_mask_generator.py,sha256=itxWq3FHYlR0I7jKarQlSKbSmRLl9ut_UTSP3ZDwP0A,8162
|
|
31
|
-
keras_hub/src/layers/preprocessing/multi_segment_packer.py,sha256=
|
|
31
|
+
keras_hub/src/layers/preprocessing/multi_segment_packer.py,sha256=APP62tF9Tw4zah7oL5maSYRXMwcR4RwicZMhQq2wRxY,12509
|
|
32
32
|
keras_hub/src/layers/preprocessing/preprocessing_layer.py,sha256=WyX41b9Ev_YJ5uVQVOAqD0PQasMOPDoyDjl_PkzkAkE,687
|
|
33
33
|
keras_hub/src/layers/preprocessing/random_deletion.py,sha256=_EmBt4d8TTPLF3OQhA8HoBmej-BX_BocbjeW6jzi6Wo,9768
|
|
34
34
|
keras_hub/src/layers/preprocessing/random_swap.py,sha256=cV7HqMwu_JHTbhe9UMVAsZdOTLsukyZDteEBYp0idiM,9509
|
|
35
|
-
keras_hub/src/layers/preprocessing/start_end_packer.py,sha256=
|
|
35
|
+
keras_hub/src/layers/preprocessing/start_end_packer.py,sha256=F_yCyI6yyxAfunb37C0AzFX3lKjaZg08HMjUXOpjgwc,8642
|
|
36
36
|
keras_hub/src/metrics/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
37
37
|
keras_hub/src/metrics/bleu.py,sha256=pnid5azpAxO6vKEfUtAby3nH29OGbwYKgVGOGeoaA3I,13694
|
|
38
38
|
keras_hub/src/metrics/edit_distance.py,sha256=kjhe8uNjvv8aN49RyrKAbNi7a8_OlB8fMza0J_CfNQg,6353
|
|
@@ -43,7 +43,7 @@ keras_hub/src/metrics/rouge_n.py,sha256=JoFtmgjF4Ic263ny6bfD6vMHKreH9le3HnOOxemu
|
|
|
43
43
|
keras_hub/src/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
44
44
|
keras_hub/src/models/audio_to_text.py,sha256=XoOjXtKBX6K1fz-zOXcdVo3FpjuxCMnJZh2LQcYXb_0,2726
|
|
45
45
|
keras_hub/src/models/audio_to_text_preprocessor.py,sha256=GS-WWyJ6aSsPRxi_0bxvxA00h2mT2FEwSdAoQXAUYVI,3249
|
|
46
|
-
keras_hub/src/models/backbone.py,sha256=
|
|
46
|
+
keras_hub/src/models/backbone.py,sha256=utZP09_u5FpMGiq8jl3W98TCW8CysndwLw2VCs3BHz8,11780
|
|
47
47
|
keras_hub/src/models/causal_lm.py,sha256=ReaF-i3SHsCkHh4c28jM72QjMQ8x7yiCwG39FRb-7KE,16786
|
|
48
48
|
keras_hub/src/models/causal_lm_preprocessor.py,sha256=YY7VJZicdmnjDSWi9g4_pEpd5bdJK166GlWcapvokF0,6663
|
|
49
49
|
keras_hub/src/models/feature_pyramid_backbone.py,sha256=clEW-TTQSVJ_5qFNdDF0iABkin1p_xlBUFjJrC7T0IA,2247
|
|
@@ -102,15 +102,14 @@ keras_hub/src/models/bloom/bloom_decoder.py,sha256=fda8iX4wzx2M8AoLX7fDHkyoir89K
|
|
|
102
102
|
keras_hub/src/models/bloom/bloom_presets.py,sha256=7RptuZi__oJyiX6X4xE5ToANcEwsmLDqhuEKwFyKIPU,3215
|
|
103
103
|
keras_hub/src/models/bloom/bloom_tokenizer.py,sha256=6Konh7B_L9BqgjkA0z8-APFpr9sQmQPuAJFZSsCIClU,2574
|
|
104
104
|
keras_hub/src/models/clip/__init__.py,sha256=NcjBkTNWxLY4Ss9wV-NW9iS8k6AwMiS2ARMcxr6KEps,245
|
|
105
|
-
keras_hub/src/models/clip/clip_backbone.py,sha256=
|
|
106
|
-
keras_hub/src/models/clip/clip_encoder_block.py,sha256=4Jxqb0Pq3Joh-lHDq-Y2c8v-gcMm1sDjPID4eRGK0DE,3823
|
|
105
|
+
keras_hub/src/models/clip/clip_backbone.py,sha256=DRAXEJFVPcgf1-AeVDDmuoxplwTCl4Xt7-D4whM4w04,6619
|
|
107
106
|
keras_hub/src/models/clip/clip_image_converter.py,sha256=XyHEDB4RbYiveMN1hLQxHgGADb_goyWyE0TceAd2owM,330
|
|
108
|
-
keras_hub/src/models/clip/
|
|
107
|
+
keras_hub/src/models/clip/clip_layers.py,sha256=ns3Zzm5UzMpm-ynyU3aJu2d4i3HmzNiZKdAea624ako,10184
|
|
108
|
+
keras_hub/src/models/clip/clip_preprocessor.py,sha256=xj-FzK7gLIUyvTo2iM1zHh9f2Ff25tZCYFxsPE3dwFU,4771
|
|
109
109
|
keras_hub/src/models/clip/clip_presets.py,sha256=b9Azial1dUtuNV96Q0Ahz-bcBRmlIjnZPUzMvAMb8OY,3348
|
|
110
|
-
keras_hub/src/models/clip/clip_text_encoder.py,sha256=
|
|
110
|
+
keras_hub/src/models/clip/clip_text_encoder.py,sha256=lZa9Ditvn4DH9As3NEML_Wl6g2qeYer_LzRHGu1hqCM,5449
|
|
111
111
|
keras_hub/src/models/clip/clip_tokenizer.py,sha256=6gIm_LWRbCeBQUI9M2gA8-OXb4tXGygixkbcL6joV1c,7444
|
|
112
|
-
keras_hub/src/models/clip/
|
|
113
|
-
keras_hub/src/models/clip/clip_vision_encoder.py,sha256=q62MXySZN38uCsjqq8cttfBxD7P5abaKQV2i8_u4N6E,6385
|
|
112
|
+
keras_hub/src/models/clip/clip_vision_encoder.py,sha256=C5grKgIgFF8ls-kkGdYorpw5tbfgbmBQe6VJg_3yWII,6368
|
|
114
113
|
keras_hub/src/models/cspnet/__init__.py,sha256=TOpvk2cfOVv1bPA1BOGZj0mhmhc6E98zZmW9e0PIvhk,257
|
|
115
114
|
keras_hub/src/models/cspnet/cspnet_backbone.py,sha256=meHzxubG_9vHQHSelDfrROaQERkDiWkjTtk_gKaWsDc,42457
|
|
116
115
|
keras_hub/src/models/cspnet/cspnet_image_classifier.py,sha256=JqfBHIBTFxaLOyAWx6TdXs0aAOMbcCx1oo47RoQnytc,510
|
|
@@ -135,12 +134,24 @@ keras_hub/src/models/deeplab_v3/deeplab_v3_image_segmeter_preprocessor.py,sha256
|
|
|
135
134
|
keras_hub/src/models/deeplab_v3/deeplab_v3_layers.py,sha256=mz9nG55gdXSTDE96AXgeTCwUFB95DIpTuqrvWIt5Lco,7840
|
|
136
135
|
keras_hub/src/models/deeplab_v3/deeplab_v3_presets.py,sha256=ZKYY8A7mV2QvwXwjDUd9xAbVHo58-Hgj_IqNUbuyCIU,625
|
|
137
136
|
keras_hub/src/models/deeplab_v3/deeplab_v3_segmenter.py,sha256=pubi30sPJKLOpz9fRQff2FZt_53KBvwf2uyaJ5YL7J8,3726
|
|
137
|
+
keras_hub/src/models/deit/__init__.py,sha256=5XUICYa-poqErbmMLArBKCrSxC7wsIiQwUpuCnvGt_E,245
|
|
138
|
+
keras_hub/src/models/deit/deit_backbone.py,sha256=R5pBOqe8vcvD8VaRnsy_zIRIz6BLnUbkTeKUOoGNHPA,5942
|
|
139
|
+
keras_hub/src/models/deit/deit_image_classifier.py,sha256=pUS2638yBAxEBxcJoHyLABsgjCWv_Y0Mj_8u0YgDPdI,5758
|
|
140
|
+
keras_hub/src/models/deit/deit_image_classifier_preprocessor.py,sha256=s5pTcsUjlt1oIXFWIu-9gf2-sBesAyrjJIYmFOB96Xs,514
|
|
141
|
+
keras_hub/src/models/deit/deit_image_converter.py,sha256=wEGCLHS_i4wF9WA4m7uUXcHNbwf6TYgvPoM6C_t0rpM,330
|
|
142
|
+
keras_hub/src/models/deit/deit_layers.py,sha256=A80-UTHEUV8g5rEG-fr8OQpGe3HeoYlYwpoDCtq71ZU,17278
|
|
143
|
+
keras_hub/src/models/deit/deit_presets.py,sha256=5VwMAEg16RLWOjcdZ-BCYVlUlEzBfHz-6wCSOIhWGVQ,1786
|
|
138
144
|
keras_hub/src/models/densenet/__init__.py,sha256=r7StyamnWeeZxOk9r4ZYNbS_YVhu9YGPyXhNxljvdPg,269
|
|
139
145
|
keras_hub/src/models/densenet/densenet_backbone.py,sha256=f2nfsXyXQert2aYHq-L-JZtp8inq1fs1K47rzZQ9nTI,6744
|
|
140
146
|
keras_hub/src/models/densenet/densenet_image_classifier.py,sha256=ye-Ix3oU42pfsDoh-h1PG4di1kzldO0ZO7Nj304p_X4,544
|
|
141
147
|
keras_hub/src/models/densenet/densenet_image_classifier_preprocessor.py,sha256=xDZbTw_h6pjLDzf8QmbDyMnMsFzgh-dPX1ldg9kddhg,563
|
|
142
148
|
keras_hub/src/models/densenet/densenet_image_converter.py,sha256=DoxYlJVZ9uaabFhVjWOmzvhONoc8KNcQj2vQ6Z1AUpU,354
|
|
143
149
|
keras_hub/src/models/densenet/densenet_presets.py,sha256=d2GEB9cWYrzP8Qj1w8CWiRW976MibQBuk_YQYvgCzr4,1222
|
|
150
|
+
keras_hub/src/models/dinov2/__init__.py,sha256=qacZi82EfAloVND4gDLZjqgR5_yVdz_dc4mMKyCsjOA,257
|
|
151
|
+
keras_hub/src/models/dinov2/dinov2_backbone.py,sha256=kwzd5eqftMS0m5v1HB_4y7JiHxp13ECgG9dNsDoknWo,9491
|
|
152
|
+
keras_hub/src/models/dinov2/dinov2_image_converter.py,sha256=gfFROdYV5rOzo3kJFlRvRHYjek8z9YirKfrFwlVJO3g,342
|
|
153
|
+
keras_hub/src/models/dinov2/dinov2_layers.py,sha256=-G3elRWDy09_VPJDJa0qYS5P8vkBGgxPooMZhy2ifu0,33140
|
|
154
|
+
keras_hub/src/models/dinov2/dinov2_presets.py,sha256=ho493GPH98K4LH1E54UV2qZZ4h7Un9ylbBmMQjNoKh4,2937
|
|
144
155
|
keras_hub/src/models/distil_bert/__init__.py,sha256=3Z0w-Mt3aOR0u9RGzjHQ7B3J3qBF2pGjupDGQ9yyzoc,303
|
|
145
156
|
keras_hub/src/models/distil_bert/distil_bert_backbone.py,sha256=rnAf_GokB3wAeJwVZtgUKQO_bKJIa8RavhL_ykTJpNw,6440
|
|
146
157
|
keras_hub/src/models/distil_bert/distil_bert_masked_lm.py,sha256=axeZd5UcxFr3_Q8H4yG10CINh93wbcyjlPLauqe5N9E,4289
|
|
@@ -162,6 +173,16 @@ keras_hub/src/models/electra/__init__.py,sha256=vaXl_uQx_oLeKZWxmc1NRgCJfHpYJ35J
|
|
|
162
173
|
keras_hub/src/models/electra/electra_backbone.py,sha256=h-QuFxACBvbMktkyGV2pIgn6dQ-kudJB1i14ekwEaL4,9004
|
|
163
174
|
keras_hub/src/models/electra/electra_presets.py,sha256=6f0WAYtDx5To4gvi6btN8I8y7yfc9ANchTHRKgCyIkg,2697
|
|
164
175
|
keras_hub/src/models/electra/electra_tokenizer.py,sha256=Ll_EW-14i-OZr6appQEt5ceMUCeEadF4yPJHMwaRfVs,2729
|
|
176
|
+
keras_hub/src/models/esm/__init__.py,sha256=_IlazeBwHkpetmLIZz3fFzC8CFcSnBRVQvw9nes4TN8,239
|
|
177
|
+
keras_hub/src/models/esm/esm_attention.py,sha256=T21MVs9QDUe_8a53mcW3dJvJfaNZOg5lkMdxGhQdmFQ,3327
|
|
178
|
+
keras_hub/src/models/esm/esm_backbone.py,sha256=ADIpeiYz16fw1PNvx2tX-51HsZ_AjR2wGLkXZHErWBg,8696
|
|
179
|
+
keras_hub/src/models/esm/esm_classifier.py,sha256=35-_3U725JhzspQAO_4ZkTJ0Tuy0XKMVkSrpmFz2CaE,6049
|
|
180
|
+
keras_hub/src/models/esm/esm_classifier_preprocessor.py,sha256=TXjGH8ttElEsfBLOMLrxP24uPCYVS78iCrnpsGwurII,5532
|
|
181
|
+
keras_hub/src/models/esm/esm_encoder.py,sha256=FxqfM_amKnmzNJoTq-LKouKaf_huklbjLiQ37ip85Tc,4499
|
|
182
|
+
keras_hub/src/models/esm/esm_masked_plm.py,sha256=FTNHrr0nRiuuO0Yqf5NSM48PehXWKMZvUVLBGET8X-8,3874
|
|
183
|
+
keras_hub/src/models/esm/esm_masked_plm_preprocessor.py,sha256=jfpehbd1KN_s48KCPSUpzQf1YYeriuR6a81wmXSG8bE,6272
|
|
184
|
+
keras_hub/src/models/esm/esm_presets.py,sha256=f3O0qhHoHGx-xXS4DuSw8fqxVEKmDaj45jput7OMz9M,1792
|
|
185
|
+
keras_hub/src/models/esm/esm_tokenizer.py,sha256=6hKDWanN4Hfl6eSNXHiHJUcwSMDRL4gHEWxenaMI3Os,3079
|
|
165
186
|
keras_hub/src/models/f_net/__init__.py,sha256=a3OAwgEVy3Rv88ZlBE9RYLrPCNteImhGkW-lSAq5hyI,249
|
|
166
187
|
keras_hub/src/models/f_net/f_net_backbone.py,sha256=6vZEq2UgoJxU2-aEesdXZnyRbACxpMZQ1akyVbGH8wg,8290
|
|
167
188
|
keras_hub/src/models/f_net/f_net_masked_lm.py,sha256=GDRtPdF4K2tPtnM6NqmMeZs6PCRwtBN5Bo1qIMeqwCU,3978
|
|
@@ -184,9 +205,9 @@ keras_hub/src/models/flux/flux_maths.py,sha256=2pnHW8HW7V2JZ8HIrUwE-UU4klpFQaOko
|
|
|
184
205
|
keras_hub/src/models/flux/flux_model.py,sha256=K92PyeFHIp8SwXuxhv__XCEaQ2wqSW1jOb97I4S24Rw,8991
|
|
185
206
|
keras_hub/src/models/flux/flux_presets.py,sha256=z7C_FbI1_F5YETXuWpc7Yh_0w-5N0eBQy6Oks_X9W88,54
|
|
186
207
|
keras_hub/src/models/flux/flux_text_to_image.py,sha256=Rf5dD2EhG0bE8Gyg9sqaA8YEexS1kdraofIkxiZDjvc,4166
|
|
187
|
-
keras_hub/src/models/flux/flux_text_to_image_preprocessor.py,sha256=
|
|
208
|
+
keras_hub/src/models/flux/flux_text_to_image_preprocessor.py,sha256=2kI2vSZvTia5ISb4BVPgC_e1l5rkirLSjhm13P-UR_k,2362
|
|
188
209
|
keras_hub/src/models/gemma/__init__.py,sha256=rVzOJMJ39bgVlT8UdC0t8PlN2c237GKTBmfHIsbPuOQ,251
|
|
189
|
-
keras_hub/src/models/gemma/gemma_attention.py,sha256=
|
|
210
|
+
keras_hub/src/models/gemma/gemma_attention.py,sha256=wmU5FgQu1Ajg-KHKVXTLHWH7pXqN4_zVJTCp_FXMcAs,10095
|
|
190
211
|
keras_hub/src/models/gemma/gemma_backbone.py,sha256=GzAUSArw_pN9dtWQzTVhWDbW-XyWt4GyMcFLn9hwmh0,13391
|
|
191
212
|
keras_hub/src/models/gemma/gemma_causal_lm.py,sha256=3OXaIXlrKqMIuUnBk-bUz-0SYFL-XkkQTWm8qRY2YII,16770
|
|
192
213
|
keras_hub/src/models/gemma/gemma_causal_lm_preprocessor.py,sha256=bpKkEurWIfa6Kp9s4pz84-sBDSA6ZFNHP8nXG1fFQrg,2912
|
|
@@ -196,12 +217,12 @@ keras_hub/src/models/gemma/gemma_tokenizer.py,sha256=FhcyNL4lo63MqOhTQPFr07-u3Bd
|
|
|
196
217
|
keras_hub/src/models/gemma/rms_normalization.py,sha256=fku-JEo2sNy-ytX7ySD1sRzdhRAPmYex_z8oFk1NiG8,833
|
|
197
218
|
keras_hub/src/models/gemma3/__init__.py,sha256=oPFadkdK5DRLD6sYx83iTetY5daWuSzmJilLjokHcbU,257
|
|
198
219
|
keras_hub/src/models/gemma3/gemma3_attention.py,sha256=VstFCTVsplcDNSgnyBcSpLgKn-pktJ39D5Ri-Bb7BQA,13628
|
|
199
|
-
keras_hub/src/models/gemma3/gemma3_backbone.py,sha256=
|
|
220
|
+
keras_hub/src/models/gemma3/gemma3_backbone.py,sha256=CaVUQAKrBd1b_7gF7dyTWLjJebzzMd24_3oUipVu5gE,16445
|
|
200
221
|
keras_hub/src/models/gemma3/gemma3_causal_lm.py,sha256=U3C9TWlIz8VefAxQ0wJ6bDz18wqHBie8B26Ub_nFZs4,13843
|
|
201
222
|
keras_hub/src/models/gemma3/gemma3_causal_lm_preprocessor.py,sha256=vjt4N-zr0Eb5kvkOR-WUgskDTNe64L_6tYnhyNb6xaE,29601
|
|
202
223
|
keras_hub/src/models/gemma3/gemma3_decoder_block.py,sha256=6PLlpDxxF67stDv74fw9nNgUHBWmTLx6qGygJwyu5FY,10819
|
|
203
224
|
keras_hub/src/models/gemma3/gemma3_image_converter.py,sha256=czi5JrTyKiK0nFzvonviBIX8jjvLHqvGNA9RyheB31k,536
|
|
204
|
-
keras_hub/src/models/gemma3/gemma3_interleave_embeddings.py,sha256=
|
|
225
|
+
keras_hub/src/models/gemma3/gemma3_interleave_embeddings.py,sha256=CfYdudk5En9iU6vEnrcrEWIztloD1r8VzF2extqAhAM,4616
|
|
205
226
|
keras_hub/src/models/gemma3/gemma3_presets.py,sha256=tVxug3rX3w_lqZlFfyqUlVdOrfBjN0GJY5ooBx1Fe0M,5124
|
|
206
227
|
keras_hub/src/models/gemma3/gemma3_tokenizer.py,sha256=ZaBclFIwzJkSXDuZMBQLHUKV8RWEdZ_dsJMvMcc3qXw,3215
|
|
207
228
|
keras_hub/src/models/gemma3/gemma3_vision_encoder.py,sha256=7XI0oBjIfJItV5w90t5bWb3C2KzjhvDnIC7wjIq4Cns,20850
|
|
@@ -220,6 +241,14 @@ keras_hub/src/models/gpt_neo_x/gpt_neo_x_causal_lm.py,sha256=HriMXNVjGlFTjCIgfLR
|
|
|
220
241
|
keras_hub/src/models/gpt_neo_x/gpt_neo_x_causal_lm_preprocessor.py,sha256=YiVz9qBHjQlwKgtUVrgBTFitHcX5pbmhhfHwaulyRxY,1957
|
|
221
242
|
keras_hub/src/models/gpt_neo_x/gpt_neo_x_decoder.py,sha256=hmB81V0SuI6bEsxEuFkYgq58wbcrv1YLvmXGin5T3E0,9732
|
|
222
243
|
keras_hub/src/models/gpt_neo_x/gpt_neo_x_tokenizer.py,sha256=aKso-8yGrynn3tZ5xm2egcXIBQo3__sWZDBtjmS3ZgU,1991
|
|
244
|
+
keras_hub/src/models/hgnetv2/__init__.py,sha256=hGilfTnRPpVFS3YpRhJWEyK8CaPIzkRh6zUC1_5imaY,263
|
|
245
|
+
keras_hub/src/models/hgnetv2/hgnetv2_backbone.py,sha256=eqVrbU2EyB2ToxK1g2QRW90zd5GyvJ8I7PKVBgqRpfY,7966
|
|
246
|
+
keras_hub/src/models/hgnetv2/hgnetv2_encoder.py,sha256=VL6XCqyXieUPkqXS7fhsAT-EV6jzyN_i31EjsAizgVU,6464
|
|
247
|
+
keras_hub/src/models/hgnetv2/hgnetv2_image_classifier.py,sha256=62Xual9pRBkU6G_RUdCblx68Z827SCA_5q9utCXxwa0,7897
|
|
248
|
+
keras_hub/src/models/hgnetv2/hgnetv2_image_classifier_preprocessor.py,sha256=df7OKvJmz2UqOXrqECvI9QdVMVkVMWhK0go9sltajnI,553
|
|
249
|
+
keras_hub/src/models/hgnetv2/hgnetv2_image_converter.py,sha256=qaGRtDeQwmC0PR69KWC7GzYNdWZ5cHu_exhNzdYyYzM,348
|
|
250
|
+
keras_hub/src/models/hgnetv2/hgnetv2_layers.py,sha256=OMUKW5VWL0xkEQl7RJYGAbTTB7qeqH3FHtMMuiQ0QmI,36418
|
|
251
|
+
keras_hub/src/models/hgnetv2/hgnetv2_presets.py,sha256=kbwxp8Nh4jdDN6egSmSxxwpY7CP5AklINXlWI0K3ZYA,2078
|
|
223
252
|
keras_hub/src/models/llama/__init__.py,sha256=svVZjGi71R3lVbq0AdbqlXj909mr3Rp9EPXdiO0w0G0,251
|
|
224
253
|
keras_hub/src/models/llama/llama_attention.py,sha256=UFHOWr69vTkOxLdgSUckGaSuUUyqlJ_xYoswWHVnTOU,8977
|
|
225
254
|
keras_hub/src/models/llama/llama_backbone.py,sha256=AT8kUPHEn6DT-aGY838_sZkBhByIdh82DWW8y-Sp3mE,13614
|
|
@@ -234,7 +263,7 @@ keras_hub/src/models/llama3/__init__.py,sha256=Vqvr2E10cnANkrRQGNBJtVLNAu-Bg9Lx6
|
|
|
234
263
|
keras_hub/src/models/llama3/llama3_backbone.py,sha256=TEocD8X7GihQFGJAz3jPwLCqDb86nyeZ1DqBF7RgQLE,3366
|
|
235
264
|
keras_hub/src/models/llama3/llama3_causal_lm.py,sha256=qk_onuf7S6d7rxAntilq2Q2orggMbPEJbNHJNVe2G0U,1541
|
|
236
265
|
keras_hub/src/models/llama3/llama3_causal_lm_preprocessor.py,sha256=twbXel9hsQgGxDAoQhEQuVm2udnEybI4fAQTJzXAuBs,3064
|
|
237
|
-
keras_hub/src/models/llama3/llama3_presets.py,sha256=
|
|
266
|
+
keras_hub/src/models/llama3/llama3_presets.py,sha256=n_FFfYycZd_BiealnY6EL16haMtyGwPCQ7CAT-_Ctbg,4302
|
|
238
267
|
keras_hub/src/models/llama3/llama3_tokenizer.py,sha256=J-KxRc08vGs4olFw_4mtJs0W_dTeUyj_XxMycazBmxI,1934
|
|
239
268
|
keras_hub/src/models/mistral/__init__.py,sha256=vjBlzcrIsFSwJKnfwfTNMKstIEKGFTE3kVcdAdfwlnE,263
|
|
240
269
|
keras_hub/src/models/mistral/mistral_attention.py,sha256=nGDlD4NcIwIGlfbt3ArxdT5QAvamY7yiNEGDlTgWirU,8609
|
|
@@ -242,7 +271,7 @@ keras_hub/src/models/mistral/mistral_backbone.py,sha256=oatoqSX0z-xjKfXeSveL4P0D
|
|
|
242
271
|
keras_hub/src/models/mistral/mistral_causal_lm.py,sha256=ujCKfsbuYzr8VusqPYcnTH6rTb0MRfzsinEraVhQksc,13234
|
|
243
272
|
keras_hub/src/models/mistral/mistral_causal_lm_preprocessor.py,sha256=_4qq-uKktfIg_i081ZWjZGEIYZpedBwtBGpchQQ-qEk,3079
|
|
244
273
|
keras_hub/src/models/mistral/mistral_layer_norm.py,sha256=nimMZ5CTPK8v9eflfrGuzqmv-2vd2rGlPvcHOMwYZyg,1063
|
|
245
|
-
keras_hub/src/models/mistral/mistral_presets.py,sha256=
|
|
274
|
+
keras_hub/src/models/mistral/mistral_presets.py,sha256=AmLzczVpsz12nCQ0BCY5zNnCb9KOu0LuoaOJxD3OMHg,1507
|
|
246
275
|
keras_hub/src/models/mistral/mistral_tokenizer.py,sha256=wyzR_Y2XwrDiBV3jIeBChSPiaOkVVaxFuLxMH2F6EYA,2005
|
|
247
276
|
keras_hub/src/models/mistral/mistral_transformer_decoder.py,sha256=z5FCh9TEaznvhW3JOSKmFTotRbiuQhzJTZClW2m9sEw,9556
|
|
248
277
|
keras_hub/src/models/mit/__init__.py,sha256=F70_0PR_nPzPdMI8XOpXDRR_nxclGjcHv3iWSWUX3w8,316
|
|
@@ -259,14 +288,14 @@ keras_hub/src/models/mixtral/mixtral_causal_lm.py,sha256=JA1t6xTeaYX_fNo9ftRyvzd
|
|
|
259
288
|
keras_hub/src/models/mixtral/mixtral_causal_lm_preprocessor.py,sha256=q2qXa9QAUWBvOWv9DeNvwsBNXSORJAbQFoQsWQ7e8V8,3079
|
|
260
289
|
keras_hub/src/models/mixtral/mixtral_decoder.py,sha256=CvOjhTxPnGQ_HNknZXRI6Cx1kpuHG99_TiOh-mNcsDw,18190
|
|
261
290
|
keras_hub/src/models/mixtral/mixtral_layer_norm.py,sha256=zfbDKZEb45FTwP0zQd7WPPp8tuiGoSNfS-DRYWkZyWw,1031
|
|
262
|
-
keras_hub/src/models/mixtral/mixtral_presets.py,sha256=
|
|
291
|
+
keras_hub/src/models/mixtral/mixtral_presets.py,sha256=JzEbR0j3iK82GDWCzH58e6lwMc7IOnj-EnibsynfGCU,852
|
|
263
292
|
keras_hub/src/models/mixtral/mixtral_tokenizer.py,sha256=Kc233k879QMyX164X_CzWbqpnqEkKWNqa648guTGkBk,661
|
|
264
293
|
keras_hub/src/models/mobilenet/__init__.py,sha256=hxkNGGj_iAMu62iooUDEPA818sNOIgjG7pXMLEMOsAE,275
|
|
265
294
|
keras_hub/src/models/mobilenet/mobilenet_backbone.py,sha256=aZBSFeLUObYYoi3od9DI1KfgPCqh5GHTcAI8Y2ZHShA,29536
|
|
266
295
|
keras_hub/src/models/mobilenet/mobilenet_image_classifier.py,sha256=rgPVJeSRqyp3-Fgf5ERbg_97c4cSawRmAtoJpdBN8WA,2437
|
|
267
296
|
keras_hub/src/models/mobilenet/mobilenet_image_classifier_preprocessor.py,sha256=yqM4wQ3ae7wXTBO0aMuvJx6XqllA7Psqzjvpm2NABXM,573
|
|
268
297
|
keras_hub/src/models/mobilenet/mobilenet_image_converter.py,sha256=a3Ka0UYYK5wHSOjf2oMHSgofRazTAeUfttklVefq14w,360
|
|
269
|
-
keras_hub/src/models/mobilenet/mobilenet_presets.py,sha256
|
|
298
|
+
keras_hub/src/models/mobilenet/mobilenet_presets.py,sha256=hR_3xxI_PigE8UprXW4lAuKRa3LFGdidBaN8LklxwRQ,1895
|
|
270
299
|
keras_hub/src/models/mobilenet/util.py,sha256=S7j4UacmVIJ3fU8cymyAoK49eHcpWIKTOyUQiEjcbzQ,721
|
|
271
300
|
keras_hub/src/models/moonshine/__init__.py,sha256=WK_9Cy1dp5KplNAaTsaJbd-2DGLsiHQsIL5ZnXuCbDQ,275
|
|
272
301
|
keras_hub/src/models/moonshine/moonshine_audio_converter.py,sha256=FnvR7SP44uVOsA3g9azUhQjsVg809eJ5nqoJZQ-DAq0,11854
|
|
@@ -286,12 +315,12 @@ keras_hub/src/models/opt/opt_causal_lm_preprocessor.py,sha256=xHfslVMOZlAIj2V2jI
|
|
|
286
315
|
keras_hub/src/models/opt/opt_presets.py,sha256=LrjgI5gbq4Cvfl_pmeCnKn4hS_V_0GYTeJaDc9tbeZM,1745
|
|
287
316
|
keras_hub/src/models/opt/opt_tokenizer.py,sha256=oDHeed4xf07tm14hj_C78BkzMuuRwRP2cRHmqYnObrs,2557
|
|
288
317
|
keras_hub/src/models/pali_gemma/__init__.py,sha256=uODWTlttOOchcTLpiYHCEWMXnDxIz8ZVIeYFQN2bd8o,288
|
|
289
|
-
keras_hub/src/models/pali_gemma/pali_gemma_backbone.py,sha256=
|
|
290
|
-
keras_hub/src/models/pali_gemma/pali_gemma_causal_lm.py,sha256=
|
|
318
|
+
keras_hub/src/models/pali_gemma/pali_gemma_backbone.py,sha256=e1KAg4bmK1PrmYW-Ewx3vD7S2DlX9K8LmbRwv30VEkA,13643
|
|
319
|
+
keras_hub/src/models/pali_gemma/pali_gemma_causal_lm.py,sha256=aT075qRyFmuo5JwphKkjLt7iJ8BK8NGt-5mqfgIXYqs,11351
|
|
291
320
|
keras_hub/src/models/pali_gemma/pali_gemma_causal_lm_preprocessor.py,sha256=F57y0fZ0wYYxfGIjfrJc1W9uQpViYFx5bvFjj5CqUbI,4814
|
|
292
321
|
keras_hub/src/models/pali_gemma/pali_gemma_decoder_block.py,sha256=24ABQ1vGlppV-KfWh0YqJjzM_Lu2GIwvyJ4X2XXie_A,5616
|
|
293
322
|
keras_hub/src/models/pali_gemma/pali_gemma_image_converter.py,sha256=5yM_jUtrFsWIieiwfFBoP7mtPmQAwywkeLKbd7fhmzk,371
|
|
294
|
-
keras_hub/src/models/pali_gemma/pali_gemma_presets.py,sha256=
|
|
323
|
+
keras_hub/src/models/pali_gemma/pali_gemma_presets.py,sha256=Q_zfHEjGTtXEiCwjoJc2g6HjmoNoLgSDRNfRvIsf0dA,12989
|
|
295
324
|
keras_hub/src/models/pali_gemma/pali_gemma_tokenizer.py,sha256=ljTiADHo0Ok88q-jVzwJIle2C8xcxnudLTsBLzIySaM,2415
|
|
296
325
|
keras_hub/src/models/pali_gemma/pali_gemma_vit.py,sha256=SbWanwCoONSwgiWQsc6lFdvhqKZ-zDW42XzQt8CNMtU,18311
|
|
297
326
|
keras_hub/src/models/phi3/__init__.py,sha256=zIbf1MU-ks91mEkjTRJAsk51N3BBnXDF2JM1vO-13PQ,245
|
|
@@ -313,14 +342,23 @@ keras_hub/src/models/qwen/qwen_decoder.py,sha256=utmAvZlU7_nP-6pjGPDinK4JaMzsQSw
|
|
|
313
342
|
keras_hub/src/models/qwen/qwen_layernorm.py,sha256=DS35r3qd6g5ocL7Nhf_vNzLLMo1aI9VCSmL64dgNOYI,924
|
|
314
343
|
keras_hub/src/models/qwen/qwen_presets.py,sha256=1FkKV6M3yqJz4EP1xa7bEvfIQ721xXT-_ikjWX0xvww,1992
|
|
315
344
|
keras_hub/src/models/qwen/qwen_tokenizer.py,sha256=LCv3IyiDDHqVnM9N3lf5-BE3iwicIh0nKS1hjoPw9lE,1532
|
|
345
|
+
keras_hub/src/models/qwen3/__init__.py,sha256=fdndQouGmfNhB_Rj76A8my5FvpxOvRJ24DoUha-wlgw,251
|
|
346
|
+
keras_hub/src/models/qwen3/qwen3_attention.py,sha256=9zjuzGZa6TzaFgO4ShNCEHMPVb3r6mFZW7vzutbwUGg,13050
|
|
347
|
+
keras_hub/src/models/qwen3/qwen3_backbone.py,sha256=Ylpk_rRWWRxy8irlAPjJU-YrxYGpo8c9lSEO1zZl4gU,7456
|
|
348
|
+
keras_hub/src/models/qwen3/qwen3_causal_lm.py,sha256=cn_4WFVxhlOArtIGAaqkNzIz9Rx8IEWwCVMRFKKk26k,15531
|
|
349
|
+
keras_hub/src/models/qwen3/qwen3_causal_lm_preprocessor.py,sha256=H4g-bgvuhAUnDwjJovydK16Kes38ZFZWPvflrgHqZis,458
|
|
350
|
+
keras_hub/src/models/qwen3/qwen3_decoder.py,sha256=68s9jQj53zFmXE4-SGXKYHu546fXOyi9LUbnKk-HGYY,11595
|
|
351
|
+
keras_hub/src/models/qwen3/qwen3_layernorm.py,sha256=EJxjf7Pr6ufPQnNeuYQxkExzPjPk4PQxqMsoBeSEkDo,1073
|
|
352
|
+
keras_hub/src/models/qwen3/qwen3_presets.py,sha256=eAqRbjLyRTSXcN-jnGHqoCHejKm2gmt8_zL4EPoE-JA,2518
|
|
353
|
+
keras_hub/src/models/qwen3/qwen3_tokenizer.py,sha256=LmPtg0vprMchDvYfTj8m5PraXI2QS3-YgdIIpIm5iAs,1448
|
|
316
354
|
keras_hub/src/models/qwen_moe/__init__.py,sha256=5D8GUmVDsJs0J4sVZHcXOLkZf12U96l-WtwyVee4lu8,267
|
|
317
|
-
keras_hub/src/models/qwen_moe/qwen_moe_attention.py,sha256=
|
|
355
|
+
keras_hub/src/models/qwen_moe/qwen_moe_attention.py,sha256=o0mcVTDMtElMYq3NSYRCfuYVdF-W8YDSU5ogensrVJg,13277
|
|
318
356
|
keras_hub/src/models/qwen_moe/qwen_moe_backbone.py,sha256=nrfELvIvRLmrgKrUNXci2CrecmeI6bWzJj7HH-RcWJA,15341
|
|
319
357
|
keras_hub/src/models/qwen_moe/qwen_moe_causal_lm.py,sha256=MeP60v7GcN_SmH5_ULRpqgmFVgaYAosSecZiSQVlJvU,13256
|
|
320
358
|
keras_hub/src/models/qwen_moe/qwen_moe_causal_lm_preprocessor.py,sha256=9P6TT7W_fqf4HsXcmlHF-DW_anR-XoDrRN2ZFGA7Ai4,3168
|
|
321
359
|
keras_hub/src/models/qwen_moe/qwen_moe_decoder.py,sha256=kmUjLpYTbJQ3J_31qWhLOd0Dg2_9cl_JX_zM8ZMH1Qo,23130
|
|
322
360
|
keras_hub/src/models/qwen_moe/qwen_moe_layernorm.py,sha256=DbkWJo7U0-cwdZwHPeAnFznYwtao6o0fjpoDJ9UWnpc,927
|
|
323
|
-
keras_hub/src/models/qwen_moe/qwen_moe_presets.py,sha256=
|
|
361
|
+
keras_hub/src/models/qwen_moe/qwen_moe_presets.py,sha256=nGQ0azaOJAjBorR_6_Qtb1yCSXPdFJdRp0_ULYT4_04,451
|
|
324
362
|
keras_hub/src/models/qwen_moe/qwen_moe_tokenizer.py,sha256=2c3X8jNGO0q0UL5NtUqSgHWLqhyJGi2ohNcTeOGhd84,1407
|
|
325
363
|
keras_hub/src/models/resnet/__init__.py,sha256=C5UqlQ6apm8WSp1bnrxB6Bi3BGaknxRQs-r3b2wpaGA,257
|
|
326
364
|
keras_hub/src/models/resnet/resnet_backbone.py,sha256=Q7nlqcTXZzjqd0e-DsjHC4ok58yOX7qxseotym3uZpM,31276
|
|
@@ -346,7 +384,7 @@ keras_hub/src/models/roberta/roberta_text_classifier.py,sha256=x36hU84P-ROReZniU
|
|
|
346
384
|
keras_hub/src/models/roberta/roberta_text_classifier_preprocessor.py,sha256=gAJa8JdPUmT1N7nxBqtaIbnfXV-xlNjTtkEevQhfjNU,5993
|
|
347
385
|
keras_hub/src/models/roberta/roberta_tokenizer.py,sha256=VKPrgXVT9aMKP7et2DIWKlTN8g4tIzjya0MHqNz9BwQ,2712
|
|
348
386
|
keras_hub/src/models/roformer_v2/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
349
|
-
keras_hub/src/models/roformer_v2/roformer_v2_attention.py,sha256=
|
|
387
|
+
keras_hub/src/models/roformer_v2/roformer_v2_attention.py,sha256=C8wwCw0FpmOWQq8H1IeTn25-6_EzDRD-8UXAN77-5gk,7060
|
|
350
388
|
keras_hub/src/models/roformer_v2/roformer_v2_backbone.py,sha256=a5gG47Gvo-dFoToMe6Q3oOYJz8HypPZWIhY-cGwS9_c,7187
|
|
351
389
|
keras_hub/src/models/roformer_v2/roformer_v2_encoder.py,sha256=o_M3dDtebBtXRAxwhiRmdWA59tu1_MNKLINf4GQYfeA,4218
|
|
352
390
|
keras_hub/src/models/roformer_v2/roformer_v2_masked_lm.py,sha256=4uQ6DKFDdBOu0bHaL45bqtpL-CMZw59inXirD9zWFlI,5950
|
|
@@ -382,14 +420,14 @@ keras_hub/src/models/siglip/siglip_text_encoder.py,sha256=xOVvzyQHLX9ne30y4ussar
|
|
|
382
420
|
keras_hub/src/models/siglip/siglip_tokenizer.py,sha256=j_67JbIHJDRk-CbiemG2dgAO6lp3_0_JdnfroZ90G18,2579
|
|
383
421
|
keras_hub/src/models/siglip/siglip_vision_encoder.py,sha256=CaNaFq5thBC3TUXXOf2qknk5vWsauM20ZoaDPYRnXcs,5927
|
|
384
422
|
keras_hub/src/models/stable_diffusion_3/__init__.py,sha256=ZKYQuaRObyhKq8GVAHmoRvlXp6FpU8ChvutVCHyXKuc,343
|
|
385
|
-
keras_hub/src/models/stable_diffusion_3/flow_match_euler_discrete_scheduler.py,sha256=
|
|
386
|
-
keras_hub/src/models/stable_diffusion_3/mmdit.py,sha256=
|
|
387
|
-
keras_hub/src/models/stable_diffusion_3/stable_diffusion_3_backbone.py,sha256=
|
|
388
|
-
keras_hub/src/models/stable_diffusion_3/stable_diffusion_3_image_to_image.py,sha256=
|
|
389
|
-
keras_hub/src/models/stable_diffusion_3/stable_diffusion_3_inpaint.py,sha256=
|
|
423
|
+
keras_hub/src/models/stable_diffusion_3/flow_match_euler_discrete_scheduler.py,sha256=1K_B3d3fNn50eY84OgxVHyIHHZhmlJY03b71pMSmE9s,3246
|
|
424
|
+
keras_hub/src/models/stable_diffusion_3/mmdit.py,sha256=emyDmtpJiFU_9crSDBC5CaXoZnM1Eti8uAQtwv2v8B0,42794
|
|
425
|
+
keras_hub/src/models/stable_diffusion_3/stable_diffusion_3_backbone.py,sha256=BEtMwYaxrJxHpNT_E1wK-SPCBCp4hgbnX-UjgqGrQ7g,24362
|
|
426
|
+
keras_hub/src/models/stable_diffusion_3/stable_diffusion_3_image_to_image.py,sha256=uNsNSQ4EFceGfIMzgjYWFMuL0XdfM58rubTcrCVPrts,5532
|
|
427
|
+
keras_hub/src/models/stable_diffusion_3/stable_diffusion_3_inpaint.py,sha256=2UIRz11DRbHJ7IVbkjpBjtbkZGC3-eYhMtVUWTmWMH8,6437
|
|
390
428
|
keras_hub/src/models/stable_diffusion_3/stable_diffusion_3_presets.py,sha256=x7Ez4L955MJE4ABtBy-63YpU9XpR0Ro8QWPzYYJs1yE,2167
|
|
391
|
-
keras_hub/src/models/stable_diffusion_3/stable_diffusion_3_text_to_image.py,sha256
|
|
392
|
-
keras_hub/src/models/stable_diffusion_3/stable_diffusion_3_text_to_image_preprocessor.py,sha256=
|
|
429
|
+
keras_hub/src/models/stable_diffusion_3/stable_diffusion_3_text_to_image.py,sha256=-xmmCaoPc1ixJvyIBwVTW1yKBA-rP4nWReovcs7OLKE,4620
|
|
430
|
+
keras_hub/src/models/stable_diffusion_3/stable_diffusion_3_text_to_image_preprocessor.py,sha256=crUT82moaPx8RVKrLtUHx1zry602f8DWItek9aFkojg,2903
|
|
393
431
|
keras_hub/src/models/stable_diffusion_3/t5_encoder.py,sha256=oV7P1uwCKdGiD93zXq7kmqX0elMZQU4UvBa8wg6P1hs,5113
|
|
394
432
|
keras_hub/src/models/t5/__init__.py,sha256=OWyoUeDY3v4DnO8Ry02DWV1bNSVGcC89PF9oCftyi1s,233
|
|
395
433
|
keras_hub/src/models/t5/t5_backbone.py,sha256=MUmabugPx5_BkAHkuJXr2-8z_yZfKD19SO0KJtlcHhA,10331
|
|
@@ -409,12 +447,12 @@ keras_hub/src/models/vgg/vgg_image_classifier_preprocessor.py,sha256=M7hBbDPws5Z
|
|
|
409
447
|
keras_hub/src/models/vgg/vgg_image_converter.py,sha256=FKVrSNNBxIkiKvApzf4TZxidBb1z917Xs9nooHCcRLM,324
|
|
410
448
|
keras_hub/src/models/vgg/vgg_presets.py,sha256=UL7a8hdZ22duMADXwVypGnc20ME-ywI4QjtXu15usEI,1491
|
|
411
449
|
keras_hub/src/models/vit/__init__.py,sha256=GH7x3VjEXZLm-4F-c9-55QZE0lP2OLVICH0Hr5YCp9A,239
|
|
412
|
-
keras_hub/src/models/vit/vit_backbone.py,sha256=
|
|
450
|
+
keras_hub/src/models/vit/vit_backbone.py,sha256=VnypiTAf0ORaBTVzdDOXsnKnQxKbrIlX9z9qOumZH50,6699
|
|
413
451
|
keras_hub/src/models/vit/vit_image_classifier.py,sha256=lMVxiD1_6drx7XQ7P7YzlqnFP7kT1zlMe84f-T3SDQI,6332
|
|
414
452
|
keras_hub/src/models/vit/vit_image_classifier_preprocessor.py,sha256=wu6YcBlXMWB9sKCPvmNdGBZKTLQt_HyHWS6P9nyDwsk,504
|
|
415
|
-
keras_hub/src/models/vit/vit_image_converter.py,sha256=
|
|
416
|
-
keras_hub/src/models/vit/vit_layers.py,sha256=
|
|
417
|
-
keras_hub/src/models/vit/vit_presets.py,sha256=
|
|
453
|
+
keras_hub/src/models/vit/vit_image_converter.py,sha256=JhdXcbfKu9pKSJZiaKk7FKf_CjSXztSa2rsBFQvlgAo,324
|
|
454
|
+
keras_hub/src/models/vit/vit_layers.py,sha256=c0ApxF7cMqeEEa0LcWrBhc6zIolwOFVb2HjzLV-q98k,13940
|
|
455
|
+
keras_hub/src/models/vit/vit_presets.py,sha256=mlLBJxxonru14fBiMnMF4ud-JgbJHclpVV3FsoIubrk,4479
|
|
418
456
|
keras_hub/src/models/vit_det/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
419
457
|
keras_hub/src/models/vit_det/vit_det_backbone.py,sha256=DOZ5J7c1t5PAZ6y0pMmBoQTMOUup7UoUrYVfCs69ltY,7697
|
|
420
458
|
keras_hub/src/models/vit_det/vit_layers.py,sha256=mnwu56chMc6zxmfp_hsLdR7TXYy1_YsWy1KwGX9M5Ic,19840
|
|
@@ -467,11 +505,11 @@ keras_hub/src/tokenizers/unicode_codepoint_tokenizer.py,sha256=hRv_XxoPIPDpHfO0Z
|
|
|
467
505
|
keras_hub/src/tokenizers/word_piece_tokenizer.py,sha256=vP6AZgbzsRiuPCt3W_n94nsF7XiERnagWcH_rqJHtVU,19943
|
|
468
506
|
keras_hub/src/tokenizers/word_piece_tokenizer_trainer.py,sha256=cylrs02ZrYQ1TuZr9oyS3NrVbDwGctA3VXbIh1pFJMQ,6743
|
|
469
507
|
keras_hub/src/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
470
|
-
keras_hub/src/utils/keras_utils.py,sha256=
|
|
508
|
+
keras_hub/src/utils/keras_utils.py,sha256=IWsbg-p-XVLuOkba8PAYNf9zDo4G2RkINLr58p12MhA,5291
|
|
471
509
|
keras_hub/src/utils/pipeline_model.py,sha256=jgzB6NQPSl0KOu08N-TazfOnXnUJbZjH2EXXhx25Ftg,9084
|
|
472
|
-
keras_hub/src/utils/preset_utils.py,sha256=
|
|
510
|
+
keras_hub/src/utils/preset_utils.py,sha256=dEOAGjkjnu69nhWuS1wnHVyMmkYnlzUQAUPzbLexLhY,35142
|
|
473
511
|
keras_hub/src/utils/python_utils.py,sha256=N8nWeO3san4YnGkffRXG3Ix7VEIMTKSN21FX5TuL7G8,202
|
|
474
|
-
keras_hub/src/utils/tensor_utils.py,sha256=
|
|
512
|
+
keras_hub/src/utils/tensor_utils.py,sha256=WrohV6-hvxtLE6rRRhtN4hy8GkHikV-NrRnVEYUwJQo,16133
|
|
475
513
|
keras_hub/src/utils/coco/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
476
514
|
keras_hub/src/utils/coco/coco_utils.py,sha256=x_QnUUvZ92zoFzMJugiInHORc4NrMdWVBkpp8BAYF6s,2586
|
|
477
515
|
keras_hub/src/utils/imagenet/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
@@ -488,21 +526,27 @@ keras_hub/src/utils/transformers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRk
|
|
|
488
526
|
keras_hub/src/utils/transformers/convert_albert.py,sha256=VdKclZpCxtDWq3UbUUQZf4fR9DJK_JYZ73B4O_G9skg,7695
|
|
489
527
|
keras_hub/src/utils/transformers/convert_bart.py,sha256=Tk4h9Md9rwN5wjQbGIVrC7qzDpF8kI8qm-FKL8HlUok,14411
|
|
490
528
|
keras_hub/src/utils/transformers/convert_bert.py,sha256=4gQqXCJzC9QWdLPDUAq741K8t_kjPIET050YjUnLeDA,5977
|
|
529
|
+
keras_hub/src/utils/transformers/convert_deit.py,sha256=ubcqYzMlhWTCE2S_TsXICCMmqjN9RsQPaw_70vArnjo,5306
|
|
530
|
+
keras_hub/src/utils/transformers/convert_dinov2.py,sha256=FvmB3ggEgowVFPSO5WOzC2hKkG2JvjSb-DeVffu78iU,6908
|
|
491
531
|
keras_hub/src/utils/transformers/convert_distilbert.py,sha256=SlfIRhSRk5c1ir2HGiDPiXa5XdOId_DbcnZO9lbwyZ8,6498
|
|
532
|
+
keras_hub/src/utils/transformers/convert_esm.py,sha256=rOgGnNY37ZbYnoVC3L-Y-yGGAxTRmYtQV0nJoandH2Y,6214
|
|
492
533
|
keras_hub/src/utils/transformers/convert_gemma.py,sha256=ElCgwBpSN5Q7rV5PJawTsoytPzs5ZjuwoY60YAe8y_A,6533
|
|
493
534
|
keras_hub/src/utils/transformers/convert_gpt2.py,sha256=HCeHN_-GiQJRxLCM9OCJJ1watPVpIBF8ujS8pGbBOWc,5703
|
|
494
|
-
keras_hub/src/utils/transformers/convert_llama3.py,sha256=
|
|
535
|
+
keras_hub/src/utils/transformers/convert_llama3.py,sha256=DjVUyQbl4AV-h8VqSIzmxiCd7cYOKIJTYoLM__NtyY0,6413
|
|
495
536
|
keras_hub/src/utils/transformers/convert_mistral.py,sha256=kVhN9h1ZFVhwkNW8p3wnS7eANJUXIsNy1RxWXy20Gqw,4760
|
|
496
537
|
keras_hub/src/utils/transformers/convert_mixtral.py,sha256=PxeCY8Xe7U_caICugwOCEjuSZ51ZUtmef6rUxh-Wt54,5508
|
|
497
538
|
keras_hub/src/utils/transformers/convert_pali_gemma.py,sha256=B1leeDw96Yvu81hYumf66hIid07k5NLqoeWAJgPnaLs,10649
|
|
498
539
|
keras_hub/src/utils/transformers/convert_qwen.py,sha256=WUxMAEFVqRs7TRw7QU5TH3_ev4yf02R1xFVliMvTQqg,5886
|
|
540
|
+
keras_hub/src/utils/transformers/convert_qwen3.py,sha256=LIormvCMWPq6X9Wo2eNbADjtFZ0nI7tFGZFBxmo4GKw,5700
|
|
499
541
|
keras_hub/src/utils/transformers/convert_qwen_moe.py,sha256=a7R28aln-PdAcNuKAXdrtzvslho2Co6GypChxLMKPpc,10618
|
|
500
542
|
keras_hub/src/utils/transformers/convert_vit.py,sha256=9SUZ9utNJhW_5cj3acMn9cRy47u2eIcDsrhmzj77o9k,5187
|
|
501
|
-
keras_hub/src/utils/transformers/preset_loader.py,sha256=
|
|
543
|
+
keras_hub/src/utils/transformers/preset_loader.py,sha256=YZVpeNhFITHdauY3MWESrZLNUIJt9ilHJ1jUhvITNT8,4781
|
|
502
544
|
keras_hub/src/utils/transformers/safetensor_utils.py,sha256=CYUHyA4y-B61r7NDnCsFb4t_UmSwZ1k9L-8gzEd6KRg,3339
|
|
503
|
-
keras_hub/
|
|
545
|
+
keras_hub/src/utils/transformers/export/gemma.py,sha256=NpTSgRUSWp3WXQil1CjYUVFVyyVhpO-4-3q2en2Wxwg,3264
|
|
546
|
+
keras_hub/src/utils/transformers/export/hf_exporter.py,sha256=oTdRS8SalPCbi_cZPC55aZUBc-1_pdviUIp0XysA4cI,3234
|
|
547
|
+
keras_hub/tokenizers/__init__.py,sha256=gQIESc4erRLuwxHyxtYy_Z0ePQXw_uhXAa4GVHMffYk,4244
|
|
504
548
|
keras_hub/utils/__init__.py,sha256=jXPqVGBpJr_PpYmqD8aDG-fRMlxH-ulqCR2SZMn288Y,646
|
|
505
|
-
keras_hub-0.
|
|
506
|
-
keras_hub-0.
|
|
507
|
-
keras_hub-0.
|
|
508
|
-
keras_hub-0.
|
|
549
|
+
keras_hub-0.22.0.dev0.dist-info/METADATA,sha256=2rdYtZelBHCDm7AIZA2CkPMxiHO4H-WyLNH2uEtibjs,7376
|
|
550
|
+
keras_hub-0.22.0.dev0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
551
|
+
keras_hub-0.22.0.dev0.dist-info/top_level.txt,sha256=N4J6piIWBKa38A4uV-CnIopnOEf8mHAbkNXafXm_CuA,10
|
|
552
|
+
keras_hub-0.22.0.dev0.dist-info/RECORD,,
|