keras-hub-nightly 0.22.0.dev202508170419__py3-none-any.whl → 0.24.0.dev202511090424__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of keras-hub-nightly might be problematic. Click here for more details.
- keras_hub/layers/__init__.py +15 -0
- keras_hub/models/__init__.py +93 -0
- keras_hub/src/layers/modeling/position_embedding.py +21 -6
- keras_hub/src/layers/modeling/reversible_embedding.py +8 -1
- keras_hub/src/layers/modeling/rotary_embedding.py +16 -6
- keras_hub/src/layers/modeling/sine_position_encoding.py +21 -8
- keras_hub/src/layers/modeling/token_and_position_embedding.py +2 -1
- keras_hub/src/models/backbone.py +28 -16
- keras_hub/src/models/causal_lm.py +37 -0
- keras_hub/src/models/causal_lm_preprocessor.py +14 -0
- keras_hub/src/models/clip/clip_presets.py +8 -8
- keras_hub/src/models/d_fine/__init__.py +5 -0
- keras_hub/src/models/d_fine/d_fine_attention.py +461 -0
- keras_hub/src/models/d_fine/d_fine_backbone.py +891 -0
- keras_hub/src/models/d_fine/d_fine_decoder.py +944 -0
- keras_hub/src/models/d_fine/d_fine_encoder.py +365 -0
- keras_hub/src/models/d_fine/d_fine_hybrid_encoder.py +642 -0
- keras_hub/src/models/d_fine/d_fine_image_converter.py +8 -0
- keras_hub/src/models/d_fine/d_fine_layers.py +1828 -0
- keras_hub/src/models/d_fine/d_fine_loss.py +938 -0
- keras_hub/src/models/d_fine/d_fine_object_detector.py +875 -0
- keras_hub/src/models/d_fine/d_fine_object_detector_preprocessor.py +14 -0
- keras_hub/src/models/d_fine/d_fine_presets.py +155 -0
- keras_hub/src/models/d_fine/d_fine_utils.py +827 -0
- keras_hub/src/models/deberta_v3/disentangled_self_attention.py +7 -2
- keras_hub/src/models/depth_anything/__init__.py +9 -0
- keras_hub/src/models/depth_anything/depth_anything_backbone.py +232 -0
- keras_hub/src/models/depth_anything/depth_anything_depth_estimator.py +70 -0
- keras_hub/src/models/depth_anything/depth_anything_depth_estimator_preprocessor.py +16 -0
- keras_hub/src/models/depth_anything/depth_anything_image_converter.py +10 -0
- keras_hub/src/models/depth_anything/depth_anything_layers.py +725 -0
- keras_hub/src/models/depth_anything/depth_anything_loss.py +89 -0
- keras_hub/src/models/depth_anything/depth_anything_presets.py +41 -0
- keras_hub/src/models/depth_anything/interpolate.py +62 -0
- keras_hub/src/models/depth_estimator.py +239 -0
- keras_hub/src/models/depth_estimator_preprocessor.py +78 -0
- keras_hub/src/models/dinov2/dinov2_backbone.py +29 -3
- keras_hub/src/models/dinov2/dinov2_layers.py +16 -4
- keras_hub/src/models/dinov3/__init__.py +5 -0
- keras_hub/src/models/dinov3/dinov3_backbone.py +263 -0
- keras_hub/src/models/dinov3/dinov3_image_converter.py +8 -0
- keras_hub/src/models/dinov3/dinov3_layers.py +1013 -0
- keras_hub/src/models/dinov3/dinov3_presets.py +4 -0
- keras_hub/src/models/gemma/gemma_backbone.py +0 -1
- keras_hub/src/models/gemma/gemma_presets.py +30 -0
- keras_hub/src/models/gemma3/gemma3_attention.py +48 -0
- keras_hub/src/models/gemma3/gemma3_backbone.py +4 -1
- keras_hub/src/models/gemma3/gemma3_decoder_block.py +12 -0
- keras_hub/src/models/gemma3/gemma3_presets.py +39 -0
- keras_hub/src/models/hgnetv2/hgnetv2_backbone.py +4 -1
- keras_hub/src/models/hgnetv2/hgnetv2_encoder.py +3 -2
- keras_hub/src/models/hgnetv2/hgnetv2_layers.py +27 -11
- keras_hub/src/models/image_to_image.py +5 -0
- keras_hub/src/models/inpaint.py +5 -0
- keras_hub/src/models/mobilenetv5/__init__.py +9 -0
- keras_hub/src/models/mobilenetv5/mobilenetv5_attention.py +699 -0
- keras_hub/src/models/mobilenetv5/mobilenetv5_backbone.py +396 -0
- keras_hub/src/models/mobilenetv5/mobilenetv5_blocks.py +890 -0
- keras_hub/src/models/mobilenetv5/mobilenetv5_builder.py +436 -0
- keras_hub/src/models/mobilenetv5/mobilenetv5_image_classifier.py +157 -0
- keras_hub/src/models/mobilenetv5/mobilenetv5_image_classifier_preprocessor.py +16 -0
- keras_hub/src/models/mobilenetv5/mobilenetv5_image_converter.py +10 -0
- keras_hub/src/models/mobilenetv5/mobilenetv5_layers.py +462 -0
- keras_hub/src/models/mobilenetv5/mobilenetv5_presets.py +15 -0
- keras_hub/src/models/mobilenetv5/mobilenetv5_utils.py +146 -0
- keras_hub/src/models/parseq/__init__.py +5 -0
- keras_hub/src/models/parseq/parseq_backbone.py +134 -0
- keras_hub/src/models/parseq/parseq_causal_lm.py +466 -0
- keras_hub/src/models/parseq/parseq_causal_lm_preprocessor.py +168 -0
- keras_hub/src/models/parseq/parseq_decoder.py +418 -0
- keras_hub/src/models/parseq/parseq_image_converter.py +8 -0
- keras_hub/src/models/parseq/parseq_presets.py +15 -0
- keras_hub/src/models/parseq/parseq_tokenizer.py +221 -0
- keras_hub/src/models/qwen3_moe/__init__.py +5 -0
- keras_hub/src/models/qwen3_moe/qwen3_moe_attention.py +371 -0
- keras_hub/src/models/qwen3_moe/qwen3_moe_backbone.py +365 -0
- keras_hub/src/models/qwen3_moe/qwen3_moe_causal_lm.py +357 -0
- keras_hub/src/models/qwen3_moe/qwen3_moe_causal_lm_preprocessor.py +12 -0
- keras_hub/src/models/qwen3_moe/qwen3_moe_decoder.py +672 -0
- keras_hub/src/models/qwen3_moe/qwen3_moe_layernorm.py +45 -0
- keras_hub/src/models/qwen3_moe/qwen3_moe_presets.py +30 -0
- keras_hub/src/models/qwen3_moe/qwen3_moe_tokenizer.py +48 -0
- keras_hub/src/models/sam/sam_prompt_encoder.py +3 -1
- keras_hub/src/models/siglip/siglip_presets.py +15 -0
- keras_hub/src/models/smollm3/smollm3_backbone.py +211 -0
- keras_hub/src/models/smollm3/smollm3_causal_lm.py +310 -0
- keras_hub/src/models/smollm3/smollm3_causal_lm_preprocessor.py +84 -0
- keras_hub/src/models/smollm3/smollm3_layers.py +757 -0
- keras_hub/src/models/smollm3/smollm3_tokenizer.py +60 -0
- keras_hub/src/models/smollm3/smollm3_utils.py +56 -0
- keras_hub/src/models/stable_diffusion_3/stable_diffusion_3_presets.py +3 -3
- keras_hub/src/models/t5gemma/__init__.py +5 -0
- keras_hub/src/models/t5gemma/t5gemma_attention.py +370 -0
- keras_hub/src/models/t5gemma/t5gemma_backbone.py +366 -0
- keras_hub/src/models/t5gemma/t5gemma_decoder.py +355 -0
- keras_hub/src/models/t5gemma/t5gemma_encoder.py +214 -0
- keras_hub/src/models/t5gemma/t5gemma_layers.py +118 -0
- keras_hub/src/models/t5gemma/t5gemma_presets.py +374 -0
- keras_hub/src/models/t5gemma/t5gemma_seq_2_seq_lm.py +442 -0
- keras_hub/src/models/t5gemma/t5gemma_seq_2_seq_lm_preprocessor.py +216 -0
- keras_hub/src/models/t5gemma/t5gemma_tokenizer.py +84 -0
- keras_hub/src/models/text_to_image.py +5 -0
- keras_hub/src/samplers/beam_sampler.py +6 -6
- keras_hub/src/samplers/sampler.py +8 -6
- keras_hub/src/tests/test_case.py +40 -3
- keras_hub/src/tokenizers/tokenizer.py +15 -0
- keras_hub/src/utils/openvino_utils.py +141 -0
- keras_hub/src/utils/preset_utils.py +58 -2
- keras_hub/src/utils/tensor_utils.py +26 -2
- keras_hub/src/utils/timm/convert_mobilenetv5.py +321 -0
- keras_hub/src/utils/timm/preset_loader.py +8 -4
- keras_hub/src/utils/transformers/convert_dinov2.py +1 -0
- keras_hub/src/utils/transformers/convert_dinov3.py +106 -0
- keras_hub/src/utils/transformers/convert_qwen3_moe.py +216 -0
- keras_hub/src/utils/transformers/convert_smollm3.py +139 -0
- keras_hub/src/utils/transformers/convert_t5gemma.py +229 -0
- keras_hub/src/utils/transformers/convert_vit.py +4 -1
- keras_hub/src/utils/transformers/export/gemma.py +49 -4
- keras_hub/src/utils/transformers/export/hf_exporter.py +71 -25
- keras_hub/src/utils/transformers/preset_loader.py +12 -0
- keras_hub/src/version.py +1 -1
- keras_hub/tokenizers/__init__.py +15 -0
- {keras_hub_nightly-0.22.0.dev202508170419.dist-info → keras_hub_nightly-0.24.0.dev202511090424.dist-info}/METADATA +1 -1
- {keras_hub_nightly-0.22.0.dev202508170419.dist-info → keras_hub_nightly-0.24.0.dev202511090424.dist-info}/RECORD +126 -47
- {keras_hub_nightly-0.22.0.dev202508170419.dist-info → keras_hub_nightly-0.24.0.dev202511090424.dist-info}/WHEEL +0 -0
- {keras_hub_nightly-0.22.0.dev202508170419.dist-info → keras_hub_nightly-0.24.0.dev202511090424.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
from keras_hub.src.api_export import keras_hub_export
|
|
2
|
+
from keras_hub.src.models.d_fine.d_fine_backbone import DFineBackbone
|
|
3
|
+
from keras_hub.src.models.d_fine.d_fine_image_converter import (
|
|
4
|
+
DFineImageConverter,
|
|
5
|
+
)
|
|
6
|
+
from keras_hub.src.models.object_detector_preprocessor import (
|
|
7
|
+
ObjectDetectorPreprocessor,
|
|
8
|
+
)
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
@keras_hub_export("keras_hub.models.DFineObjectDetectorPreprocessor")
|
|
12
|
+
class DFineObjectDetectorPreprocessor(ObjectDetectorPreprocessor):
|
|
13
|
+
backbone_cls = DFineBackbone
|
|
14
|
+
image_converter_cls = DFineImageConverter
|
|
@@ -0,0 +1,155 @@
|
|
|
1
|
+
# Metadata for loading pretrained model weights.
|
|
2
|
+
backbone_presets = {
|
|
3
|
+
"dfine_nano_coco": {
|
|
4
|
+
"metadata": {
|
|
5
|
+
"description": (
|
|
6
|
+
"D-FINE Nano model, the smallest variant in the family, "
|
|
7
|
+
"pretrained on the COCO dataset. Ideal for applications "
|
|
8
|
+
"where computational resources are limited."
|
|
9
|
+
),
|
|
10
|
+
"params": 3788625,
|
|
11
|
+
"path": "d_fine",
|
|
12
|
+
},
|
|
13
|
+
"kaggle_handle": "kaggle://keras/d-fine/keras/dfine_nano_coco/1",
|
|
14
|
+
},
|
|
15
|
+
"dfine_small_coco": {
|
|
16
|
+
"metadata": {
|
|
17
|
+
"description": (
|
|
18
|
+
"D-FINE Small model pretrained on the COCO dataset. Offers a "
|
|
19
|
+
"balance between performance and computational efficiency."
|
|
20
|
+
),
|
|
21
|
+
"params": 10329321,
|
|
22
|
+
"path": "d_fine",
|
|
23
|
+
},
|
|
24
|
+
"kaggle_handle": "kaggle://keras/d-fine/keras/dfine_small_coco/1",
|
|
25
|
+
},
|
|
26
|
+
"dfine_medium_coco": {
|
|
27
|
+
"metadata": {
|
|
28
|
+
"description": (
|
|
29
|
+
"D-FINE Medium model pretrained on the COCO dataset. A solid "
|
|
30
|
+
"baseline with strong performance for general-purpose "
|
|
31
|
+
"object detection."
|
|
32
|
+
),
|
|
33
|
+
"params": 19621160,
|
|
34
|
+
"path": "d_fine",
|
|
35
|
+
},
|
|
36
|
+
"kaggle_handle": "kaggle://keras/d-fine/keras/dfine_medium_coco/1",
|
|
37
|
+
},
|
|
38
|
+
"dfine_large_coco": {
|
|
39
|
+
"metadata": {
|
|
40
|
+
"description": (
|
|
41
|
+
"D-FINE Large model pretrained on the COCO dataset. Provides "
|
|
42
|
+
"high accuracy and is suitable for more demanding tasks."
|
|
43
|
+
),
|
|
44
|
+
"params": 31344064,
|
|
45
|
+
"path": "d_fine",
|
|
46
|
+
},
|
|
47
|
+
"kaggle_handle": "kaggle://keras/d-fine/keras/dfine_large_coco/1",
|
|
48
|
+
},
|
|
49
|
+
"dfine_xlarge_coco": {
|
|
50
|
+
"metadata": {
|
|
51
|
+
"description": (
|
|
52
|
+
"D-FINE X-Large model, the largest COCO-pretrained variant, "
|
|
53
|
+
"designed for state-of-the-art performance where accuracy "
|
|
54
|
+
"is the top priority."
|
|
55
|
+
),
|
|
56
|
+
"params": 62834048,
|
|
57
|
+
"path": "d_fine",
|
|
58
|
+
},
|
|
59
|
+
"kaggle_handle": "kaggle://keras/d-fine/keras/dfine_xlarge_coco/1",
|
|
60
|
+
},
|
|
61
|
+
"dfine_small_obj365": {
|
|
62
|
+
"metadata": {
|
|
63
|
+
"description": (
|
|
64
|
+
"D-FINE Small model pretrained on the large-scale Objects365 "
|
|
65
|
+
"dataset, enhancing its ability to recognize a wider "
|
|
66
|
+
"variety of objects."
|
|
67
|
+
),
|
|
68
|
+
"params": 10623329,
|
|
69
|
+
"path": "d_fine",
|
|
70
|
+
},
|
|
71
|
+
"kaggle_handle": "kaggle://keras/d-fine/keras/dfine_small_obj365/1",
|
|
72
|
+
},
|
|
73
|
+
"dfine_medium_obj365": {
|
|
74
|
+
"metadata": {
|
|
75
|
+
"description": (
|
|
76
|
+
"D-FINE Medium model pretrained on the Objects365 dataset. "
|
|
77
|
+
"Benefits from a larger and more diverse pretraining corpus."
|
|
78
|
+
),
|
|
79
|
+
"params": 19988670,
|
|
80
|
+
"path": "d_fine",
|
|
81
|
+
},
|
|
82
|
+
"kaggle_handle": "kaggle://keras/d-fine/keras/dfine_medium_obj365/1",
|
|
83
|
+
},
|
|
84
|
+
"dfine_large_obj365": {
|
|
85
|
+
"metadata": {
|
|
86
|
+
"description": (
|
|
87
|
+
"D-FINE Large model pretrained on the Objects365 dataset for "
|
|
88
|
+
"improved generalization and performance on diverse object "
|
|
89
|
+
"categories."
|
|
90
|
+
),
|
|
91
|
+
"params": 31858578,
|
|
92
|
+
"path": "d_fine",
|
|
93
|
+
},
|
|
94
|
+
"kaggle_handle": "kaggle://keras/d-fine/keras/dfine_large_obj365/1",
|
|
95
|
+
},
|
|
96
|
+
"dfine_xlarge_obj365": {
|
|
97
|
+
"metadata": {
|
|
98
|
+
"description": (
|
|
99
|
+
"D-FINE X-Large model pretrained on the Objects365 dataset, "
|
|
100
|
+
"offering maximum performance by leveraging a vast number "
|
|
101
|
+
"of object categories during pretraining."
|
|
102
|
+
),
|
|
103
|
+
"params": 63348562,
|
|
104
|
+
"path": "d_fine",
|
|
105
|
+
},
|
|
106
|
+
"kaggle_handle": "kaggle://keras/d-fine/keras/dfine_xlarge_obj365/1",
|
|
107
|
+
},
|
|
108
|
+
"dfine_small_obj2coco": {
|
|
109
|
+
"metadata": {
|
|
110
|
+
"description": (
|
|
111
|
+
"D-FINE Small model first pretrained on Objects365 and then "
|
|
112
|
+
"fine-tuned on COCO, combining broad feature learning with "
|
|
113
|
+
"benchmark-specific adaptation."
|
|
114
|
+
),
|
|
115
|
+
"params": 10329321,
|
|
116
|
+
"path": "d_fine",
|
|
117
|
+
},
|
|
118
|
+
"kaggle_handle": "kaggle://keras/d-fine/keras/dfine_small_obj2coco/1",
|
|
119
|
+
},
|
|
120
|
+
"dfine_medium_obj2coco": {
|
|
121
|
+
"metadata": {
|
|
122
|
+
"description": (
|
|
123
|
+
"D-FINE Medium model using a two-stage training process: "
|
|
124
|
+
"pretraining on Objects365 followed by fine-tuning on COCO."
|
|
125
|
+
),
|
|
126
|
+
"params": 19621160,
|
|
127
|
+
"path": "d_fine",
|
|
128
|
+
},
|
|
129
|
+
"kaggle_handle": "kaggle://keras/d-fine/keras/dfine_medium_obj2coco/1",
|
|
130
|
+
},
|
|
131
|
+
"dfine_large_obj2coco_e25": {
|
|
132
|
+
"metadata": {
|
|
133
|
+
"description": (
|
|
134
|
+
"D-FINE Large model pretrained on Objects365 and then "
|
|
135
|
+
"fine-tuned on COCO for 25 epochs. A high-performance model "
|
|
136
|
+
"with specialized tuning."
|
|
137
|
+
),
|
|
138
|
+
"params": 31344064,
|
|
139
|
+
"path": "d_fine",
|
|
140
|
+
},
|
|
141
|
+
"kaggle_handle": "kaggle://keras/d-fine/keras/dfine_large_obj2coco_e25/1",
|
|
142
|
+
},
|
|
143
|
+
"dfine_xlarge_obj2coco": {
|
|
144
|
+
"metadata": {
|
|
145
|
+
"description": (
|
|
146
|
+
"D-FINE X-Large model, pretrained on Objects365 and fine-tuned "
|
|
147
|
+
"on COCO, representing the most powerful model in this "
|
|
148
|
+
"series for COCO-style tasks."
|
|
149
|
+
),
|
|
150
|
+
"params": 62834048,
|
|
151
|
+
"path": "d_fine",
|
|
152
|
+
},
|
|
153
|
+
"kaggle_handle": "kaggle://keras/d-fine/keras/dfine_xlarge_obj2coco/1",
|
|
154
|
+
},
|
|
155
|
+
}
|