keras-hub-nightly 0.15.0.dev20240823171555__py3-none-any.whl → 0.16.0.dev2024092017__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- keras_hub/__init__.py +0 -6
- keras_hub/api/__init__.py +2 -0
- keras_hub/api/bounding_box/__init__.py +36 -0
- keras_hub/api/layers/__init__.py +14 -0
- keras_hub/api/models/__init__.py +97 -48
- keras_hub/api/tokenizers/__init__.py +30 -0
- keras_hub/api/utils/__init__.py +22 -0
- keras_hub/src/api_export.py +15 -9
- keras_hub/src/bounding_box/__init__.py +13 -0
- keras_hub/src/bounding_box/converters.py +529 -0
- keras_hub/src/bounding_box/formats.py +162 -0
- keras_hub/src/bounding_box/iou.py +263 -0
- keras_hub/src/bounding_box/to_dense.py +95 -0
- keras_hub/src/bounding_box/to_ragged.py +99 -0
- keras_hub/src/bounding_box/utils.py +194 -0
- keras_hub/src/bounding_box/validate_format.py +99 -0
- keras_hub/src/layers/preprocessing/audio_converter.py +121 -0
- keras_hub/src/layers/preprocessing/image_converter.py +130 -0
- keras_hub/src/layers/preprocessing/masked_lm_mask_generator.py +2 -0
- keras_hub/src/layers/preprocessing/multi_segment_packer.py +9 -8
- keras_hub/src/layers/preprocessing/preprocessing_layer.py +2 -29
- keras_hub/src/layers/preprocessing/random_deletion.py +33 -31
- keras_hub/src/layers/preprocessing/random_swap.py +33 -31
- keras_hub/src/layers/preprocessing/resizing_image_converter.py +101 -0
- keras_hub/src/layers/preprocessing/start_end_packer.py +3 -2
- keras_hub/src/models/albert/__init__.py +1 -2
- keras_hub/src/models/albert/albert_masked_lm_preprocessor.py +6 -86
- keras_hub/src/models/albert/{albert_classifier.py → albert_text_classifier.py} +34 -10
- keras_hub/src/models/albert/{albert_preprocessor.py → albert_text_classifier_preprocessor.py} +14 -70
- keras_hub/src/models/albert/albert_tokenizer.py +17 -36
- keras_hub/src/models/backbone.py +12 -34
- keras_hub/src/models/bart/__init__.py +1 -2
- keras_hub/src/models/bart/bart_seq_2_seq_lm_preprocessor.py +21 -148
- keras_hub/src/models/bart/bart_tokenizer.py +12 -39
- keras_hub/src/models/bert/__init__.py +1 -5
- keras_hub/src/models/bert/bert_masked_lm_preprocessor.py +6 -87
- keras_hub/src/models/bert/bert_presets.py +1 -4
- keras_hub/src/models/bert/{bert_classifier.py → bert_text_classifier.py} +19 -12
- keras_hub/src/models/bert/{bert_preprocessor.py → bert_text_classifier_preprocessor.py} +14 -70
- keras_hub/src/models/bert/bert_tokenizer.py +17 -35
- keras_hub/src/models/bloom/__init__.py +1 -2
- keras_hub/src/models/bloom/bloom_causal_lm_preprocessor.py +6 -91
- keras_hub/src/models/bloom/bloom_tokenizer.py +12 -41
- keras_hub/src/models/causal_lm.py +10 -29
- keras_hub/src/models/causal_lm_preprocessor.py +195 -0
- keras_hub/src/models/csp_darknet/csp_darknet_backbone.py +54 -15
- keras_hub/src/models/deberta_v3/__init__.py +1 -4
- keras_hub/src/models/deberta_v3/deberta_v3_masked_lm_preprocessor.py +14 -77
- keras_hub/src/models/deberta_v3/{deberta_v3_classifier.py → deberta_v3_text_classifier.py} +16 -11
- keras_hub/src/models/deberta_v3/{deberta_v3_preprocessor.py → deberta_v3_text_classifier_preprocessor.py} +23 -64
- keras_hub/src/models/deberta_v3/deberta_v3_tokenizer.py +30 -25
- keras_hub/src/models/densenet/densenet_backbone.py +46 -22
- keras_hub/src/models/distil_bert/__init__.py +1 -4
- keras_hub/src/models/distil_bert/distil_bert_masked_lm_preprocessor.py +14 -76
- keras_hub/src/models/distil_bert/{distil_bert_classifier.py → distil_bert_text_classifier.py} +17 -12
- keras_hub/src/models/distil_bert/{distil_bert_preprocessor.py → distil_bert_text_classifier_preprocessor.py} +23 -63
- keras_hub/src/models/distil_bert/distil_bert_tokenizer.py +19 -35
- keras_hub/src/models/efficientnet/__init__.py +13 -0
- keras_hub/src/models/efficientnet/efficientnet_backbone.py +569 -0
- keras_hub/src/models/efficientnet/fusedmbconv.py +229 -0
- keras_hub/src/models/efficientnet/mbconv.py +238 -0
- keras_hub/src/models/electra/__init__.py +1 -2
- keras_hub/src/models/electra/electra_tokenizer.py +17 -32
- keras_hub/src/models/f_net/__init__.py +1 -2
- keras_hub/src/models/f_net/f_net_masked_lm_preprocessor.py +12 -78
- keras_hub/src/models/f_net/{f_net_classifier.py → f_net_text_classifier.py} +17 -10
- keras_hub/src/models/f_net/{f_net_preprocessor.py → f_net_text_classifier_preprocessor.py} +19 -63
- keras_hub/src/models/f_net/f_net_tokenizer.py +17 -35
- keras_hub/src/models/falcon/__init__.py +1 -2
- keras_hub/src/models/falcon/falcon_causal_lm_preprocessor.py +6 -89
- keras_hub/src/models/falcon/falcon_tokenizer.py +12 -35
- keras_hub/src/models/gemma/__init__.py +1 -2
- keras_hub/src/models/gemma/gemma_causal_lm_preprocessor.py +6 -90
- keras_hub/src/models/gemma/gemma_decoder_block.py +1 -1
- keras_hub/src/models/gemma/gemma_tokenizer.py +12 -23
- keras_hub/src/models/gpt2/__init__.py +1 -2
- keras_hub/src/models/gpt2/gpt2_causal_lm_preprocessor.py +6 -89
- keras_hub/src/models/gpt2/gpt2_preprocessor.py +12 -90
- keras_hub/src/models/gpt2/gpt2_tokenizer.py +12 -34
- keras_hub/src/models/gpt_neo_x/gpt_neo_x_causal_lm_preprocessor.py +6 -91
- keras_hub/src/models/gpt_neo_x/gpt_neo_x_tokenizer.py +12 -34
- keras_hub/src/models/image_classifier.py +0 -5
- keras_hub/src/models/image_classifier_preprocessor.py +83 -0
- keras_hub/src/models/llama/__init__.py +1 -2
- keras_hub/src/models/llama/llama_causal_lm_preprocessor.py +6 -85
- keras_hub/src/models/llama/llama_tokenizer.py +12 -25
- keras_hub/src/models/llama3/__init__.py +1 -2
- keras_hub/src/models/llama3/llama3_causal_lm_preprocessor.py +6 -89
- keras_hub/src/models/llama3/llama3_tokenizer.py +12 -33
- keras_hub/src/models/masked_lm.py +0 -2
- keras_hub/src/models/masked_lm_preprocessor.py +156 -0
- keras_hub/src/models/mistral/__init__.py +1 -2
- keras_hub/src/models/mistral/mistral_causal_lm_preprocessor.py +6 -91
- keras_hub/src/models/mistral/mistral_tokenizer.py +12 -23
- keras_hub/src/models/mix_transformer/mix_transformer_backbone.py +2 -2
- keras_hub/src/models/mobilenet/__init__.py +13 -0
- keras_hub/src/models/mobilenet/mobilenet_backbone.py +530 -0
- keras_hub/src/models/mobilenet/mobilenet_image_classifier.py +114 -0
- keras_hub/src/models/opt/__init__.py +1 -2
- keras_hub/src/models/opt/opt_causal_lm_preprocessor.py +6 -93
- keras_hub/src/models/opt/opt_tokenizer.py +12 -41
- keras_hub/src/models/pali_gemma/__init__.py +1 -4
- keras_hub/src/models/pali_gemma/pali_gemma_causal_lm_preprocessor.py +28 -28
- keras_hub/src/models/pali_gemma/pali_gemma_image_converter.py +25 -0
- keras_hub/src/models/pali_gemma/pali_gemma_presets.py +5 -5
- keras_hub/src/models/pali_gemma/pali_gemma_tokenizer.py +11 -3
- keras_hub/src/models/phi3/__init__.py +1 -2
- keras_hub/src/models/phi3/phi3_causal_lm.py +3 -9
- keras_hub/src/models/phi3/phi3_causal_lm_preprocessor.py +6 -89
- keras_hub/src/models/phi3/phi3_tokenizer.py +12 -36
- keras_hub/src/models/preprocessor.py +72 -83
- keras_hub/src/models/resnet/__init__.py +6 -0
- keras_hub/src/models/resnet/resnet_backbone.py +390 -42
- keras_hub/src/models/resnet/resnet_image_classifier.py +33 -6
- keras_hub/src/models/resnet/resnet_image_classifier_preprocessor.py +28 -0
- keras_hub/src/models/{llama3/llama3_preprocessor.py → resnet/resnet_image_converter.py} +7 -5
- keras_hub/src/models/resnet/resnet_presets.py +95 -0
- keras_hub/src/models/retinanet/__init__.py +13 -0
- keras_hub/src/models/retinanet/anchor_generator.py +175 -0
- keras_hub/src/models/retinanet/box_matcher.py +259 -0
- keras_hub/src/models/retinanet/non_max_supression.py +578 -0
- keras_hub/src/models/roberta/__init__.py +1 -2
- keras_hub/src/models/roberta/roberta_masked_lm_preprocessor.py +22 -74
- keras_hub/src/models/roberta/{roberta_classifier.py → roberta_text_classifier.py} +16 -11
- keras_hub/src/models/roberta/{roberta_preprocessor.py → roberta_text_classifier_preprocessor.py} +21 -53
- keras_hub/src/models/roberta/roberta_tokenizer.py +13 -52
- keras_hub/src/models/seq_2_seq_lm_preprocessor.py +269 -0
- keras_hub/src/models/stable_diffusion_v3/__init__.py +13 -0
- keras_hub/src/models/stable_diffusion_v3/clip_encoder_block.py +103 -0
- keras_hub/src/models/stable_diffusion_v3/clip_preprocessor.py +93 -0
- keras_hub/src/models/stable_diffusion_v3/clip_text_encoder.py +149 -0
- keras_hub/src/models/stable_diffusion_v3/clip_tokenizer.py +167 -0
- keras_hub/src/models/stable_diffusion_v3/mmdit.py +427 -0
- keras_hub/src/models/stable_diffusion_v3/mmdit_block.py +317 -0
- keras_hub/src/models/stable_diffusion_v3/t5_xxl_preprocessor.py +74 -0
- keras_hub/src/models/stable_diffusion_v3/t5_xxl_text_encoder.py +155 -0
- keras_hub/src/models/stable_diffusion_v3/vae_attention.py +126 -0
- keras_hub/src/models/stable_diffusion_v3/vae_image_decoder.py +186 -0
- keras_hub/src/models/t5/__init__.py +1 -2
- keras_hub/src/models/t5/t5_tokenizer.py +13 -23
- keras_hub/src/models/task.py +71 -116
- keras_hub/src/models/{classifier.py → text_classifier.py} +19 -13
- keras_hub/src/models/text_classifier_preprocessor.py +138 -0
- keras_hub/src/models/whisper/__init__.py +1 -2
- keras_hub/src/models/whisper/{whisper_audio_feature_extractor.py → whisper_audio_converter.py} +20 -18
- keras_hub/src/models/whisper/whisper_backbone.py +0 -3
- keras_hub/src/models/whisper/whisper_presets.py +10 -10
- keras_hub/src/models/whisper/whisper_tokenizer.py +20 -16
- keras_hub/src/models/xlm_roberta/__init__.py +1 -4
- keras_hub/src/models/xlm_roberta/xlm_roberta_masked_lm_preprocessor.py +26 -72
- keras_hub/src/models/xlm_roberta/{xlm_roberta_classifier.py → xlm_roberta_text_classifier.py} +16 -11
- keras_hub/src/models/xlm_roberta/{xlm_roberta_preprocessor.py → xlm_roberta_text_classifier_preprocessor.py} +26 -53
- keras_hub/src/models/xlm_roberta/xlm_roberta_tokenizer.py +25 -10
- keras_hub/src/tests/test_case.py +46 -0
- keras_hub/src/tokenizers/byte_pair_tokenizer.py +30 -17
- keras_hub/src/tokenizers/byte_tokenizer.py +14 -15
- keras_hub/src/tokenizers/sentence_piece_tokenizer.py +20 -7
- keras_hub/src/tokenizers/tokenizer.py +67 -32
- keras_hub/src/tokenizers/unicode_codepoint_tokenizer.py +14 -15
- keras_hub/src/tokenizers/word_piece_tokenizer.py +34 -47
- keras_hub/src/utils/imagenet/__init__.py +13 -0
- keras_hub/src/utils/imagenet/imagenet_utils.py +1067 -0
- keras_hub/src/utils/keras_utils.py +0 -50
- keras_hub/src/utils/preset_utils.py +230 -68
- keras_hub/src/utils/tensor_utils.py +187 -69
- keras_hub/src/utils/timm/convert_resnet.py +19 -16
- keras_hub/src/utils/timm/preset_loader.py +66 -0
- keras_hub/src/utils/transformers/convert_albert.py +193 -0
- keras_hub/src/utils/transformers/convert_bart.py +373 -0
- keras_hub/src/utils/transformers/convert_bert.py +7 -17
- keras_hub/src/utils/transformers/convert_distilbert.py +10 -20
- keras_hub/src/utils/transformers/convert_gemma.py +5 -19
- keras_hub/src/utils/transformers/convert_gpt2.py +5 -18
- keras_hub/src/utils/transformers/convert_llama3.py +7 -18
- keras_hub/src/utils/transformers/convert_mistral.py +129 -0
- keras_hub/src/utils/transformers/convert_pali_gemma.py +7 -29
- keras_hub/src/utils/transformers/preset_loader.py +77 -0
- keras_hub/src/utils/transformers/safetensor_utils.py +2 -2
- keras_hub/src/version_utils.py +1 -1
- keras_hub_nightly-0.16.0.dev2024092017.dist-info/METADATA +202 -0
- keras_hub_nightly-0.16.0.dev2024092017.dist-info/RECORD +334 -0
- {keras_hub_nightly-0.15.0.dev20240823171555.dist-info → keras_hub_nightly-0.16.0.dev2024092017.dist-info}/WHEEL +1 -1
- keras_hub/src/models/bart/bart_preprocessor.py +0 -276
- keras_hub/src/models/bloom/bloom_preprocessor.py +0 -185
- keras_hub/src/models/electra/electra_preprocessor.py +0 -154
- keras_hub/src/models/falcon/falcon_preprocessor.py +0 -187
- keras_hub/src/models/gemma/gemma_preprocessor.py +0 -191
- keras_hub/src/models/gpt_neo_x/gpt_neo_x_preprocessor.py +0 -145
- keras_hub/src/models/llama/llama_preprocessor.py +0 -189
- keras_hub/src/models/mistral/mistral_preprocessor.py +0 -190
- keras_hub/src/models/opt/opt_preprocessor.py +0 -188
- keras_hub/src/models/phi3/phi3_preprocessor.py +0 -190
- keras_hub/src/models/whisper/whisper_preprocessor.py +0 -326
- keras_hub/src/utils/timm/convert.py +0 -37
- keras_hub/src/utils/transformers/convert.py +0 -101
- keras_hub_nightly-0.15.0.dev20240823171555.dist-info/METADATA +0 -34
- keras_hub_nightly-0.15.0.dev20240823171555.dist-info/RECORD +0 -297
- {keras_hub_nightly-0.15.0.dev20240823171555.dist-info → keras_hub_nightly-0.16.0.dev2024092017.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,114 @@
|
|
1
|
+
# Copyright 2024 The KerasHub Authors
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# https://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
import keras
|
15
|
+
|
16
|
+
from keras_hub.src.api_export import keras_hub_export
|
17
|
+
from keras_hub.src.models.image_classifier import ImageClassifier
|
18
|
+
from keras_hub.src.models.mobilenet.mobilenet_backbone import MobileNetBackbone
|
19
|
+
|
20
|
+
|
21
|
+
@keras_hub_export("keras_hub.models.MobileNetImageClassifier")
|
22
|
+
class MobileNetImageClassifier(ImageClassifier):
|
23
|
+
"""MobileNetV3 image classifier task model.
|
24
|
+
|
25
|
+
To fine-tune with `fit()`, pass a dataset containing tuples of `(x, y)`
|
26
|
+
where `x` is a tensor and `y` is a integer from `[0, num_classes)`.
|
27
|
+
All `ImageClassifier` tasks include a `from_preset()` constructor which can
|
28
|
+
be used to load a pre-trained config and weights.
|
29
|
+
|
30
|
+
Args:
|
31
|
+
backbone: A `keras_hub.models.MobileNetBackbone` instance.
|
32
|
+
num_classes: int. The number of classes to predict.
|
33
|
+
activation: `None`, str or callable. The activation function to use on
|
34
|
+
the `Dense` layer. Set `activation=None` to return the output
|
35
|
+
logits. Defaults to `"softmax"`.
|
36
|
+
|
37
|
+
Examples:
|
38
|
+
|
39
|
+
Call `predict()` to run inference.
|
40
|
+
```python
|
41
|
+
# Load preset and train
|
42
|
+
images = np.ones((2, 224, 224, 3), dtype="float32")
|
43
|
+
classifier = keras_hub.models.MobileNetImageClassifier.from_preset(
|
44
|
+
"mobilenet_v3_small_imagenet")
|
45
|
+
classifier.predict(images)
|
46
|
+
```
|
47
|
+
|
48
|
+
Custom backbone.
|
49
|
+
```python
|
50
|
+
images = np.ones((2, 224, 224, 3), dtype="float32")
|
51
|
+
labels = [0, 3]
|
52
|
+
model = MobileNetBackbone(
|
53
|
+
stackwise_expansion = [1, 4, 6],
|
54
|
+
stackwise_filters = [4, 8, 16],
|
55
|
+
stackwise_kernel_size = [3, 3, 5],
|
56
|
+
stackwise_stride = [2, 2, 1],
|
57
|
+
stackwise_se_ratio = [ 0.25, None, 0.25],
|
58
|
+
stackwise_activation = ["relu", "relu", "hard_swish"],
|
59
|
+
include_rescaling = False,
|
60
|
+
output_filter=1280,
|
61
|
+
activation="hard_swish",
|
62
|
+
inverted_res_block=True,
|
63
|
+
)
|
64
|
+
classifier = keras_hub.models.MobileNetImageClassifier(
|
65
|
+
backbone=backbone,
|
66
|
+
num_classes=4,
|
67
|
+
)
|
68
|
+
classifier.fit(x=images, y=labels, batch_size=2)
|
69
|
+
```
|
70
|
+
"""
|
71
|
+
|
72
|
+
backbone_cls = MobileNetBackbone
|
73
|
+
|
74
|
+
def __init__(
|
75
|
+
self,
|
76
|
+
backbone,
|
77
|
+
num_classes,
|
78
|
+
activation="softmax",
|
79
|
+
preprocessor=None, # adding this dummy arg for saved model test
|
80
|
+
# TODO: once preprocessor flow is figured out, this needs to be updated
|
81
|
+
**kwargs,
|
82
|
+
):
|
83
|
+
# === Layers ===
|
84
|
+
self.backbone = backbone
|
85
|
+
self.output_dense = keras.layers.Dense(
|
86
|
+
num_classes,
|
87
|
+
activation=activation,
|
88
|
+
name="predictions",
|
89
|
+
)
|
90
|
+
|
91
|
+
# === Functional Model ===
|
92
|
+
inputs = self.backbone.input
|
93
|
+
x = self.backbone(inputs)
|
94
|
+
outputs = self.output_dense(x)
|
95
|
+
super().__init__(
|
96
|
+
inputs=inputs,
|
97
|
+
outputs=outputs,
|
98
|
+
**kwargs,
|
99
|
+
)
|
100
|
+
|
101
|
+
# === Config ===
|
102
|
+
self.num_classes = num_classes
|
103
|
+
self.activation = activation
|
104
|
+
|
105
|
+
def get_config(self):
|
106
|
+
# Backbone serialized in `super`
|
107
|
+
config = super().get_config()
|
108
|
+
config.update(
|
109
|
+
{
|
110
|
+
"num_classes": self.num_classes,
|
111
|
+
"activation": self.activation,
|
112
|
+
}
|
113
|
+
)
|
114
|
+
return config
|
@@ -14,7 +14,6 @@
|
|
14
14
|
|
15
15
|
from keras_hub.src.models.opt.opt_backbone import OPTBackbone
|
16
16
|
from keras_hub.src.models.opt.opt_presets import backbone_presets
|
17
|
-
from keras_hub.src.models.opt.opt_tokenizer import OPTTokenizer
|
18
17
|
from keras_hub.src.utils.preset_utils import register_presets
|
19
18
|
|
20
|
-
register_presets(backbone_presets,
|
19
|
+
register_presets(backbone_presets, OPTBackbone)
|
@@ -11,20 +11,14 @@
|
|
11
11
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
12
|
# See the License for the specific language governing permissions and
|
13
13
|
# limitations under the License.
|
14
|
-
|
15
|
-
import keras
|
16
|
-
from absl import logging
|
17
|
-
|
18
14
|
from keras_hub.src.api_export import keras_hub_export
|
19
|
-
from keras_hub.src.models.
|
20
|
-
from keras_hub.src.
|
21
|
-
|
22
|
-
)
|
23
|
-
from keras_hub.src.utils.tensor_utils import strip_to_ragged
|
15
|
+
from keras_hub.src.models.causal_lm_preprocessor import CausalLMPreprocessor
|
16
|
+
from keras_hub.src.models.opt.opt_backbone import OPTBackbone
|
17
|
+
from keras_hub.src.models.opt.opt_tokenizer import OPTTokenizer
|
24
18
|
|
25
19
|
|
26
20
|
@keras_hub_export("keras_hub.models.OPTCausalLMPreprocessor")
|
27
|
-
class OPTCausalLMPreprocessor(
|
21
|
+
class OPTCausalLMPreprocessor(CausalLMPreprocessor):
|
28
22
|
"""OPT Causal LM preprocessor.
|
29
23
|
|
30
24
|
This preprocessing layer is primarily meant to be used with
|
@@ -92,86 +86,5 @@ class OPTCausalLMPreprocessor(OPTPreprocessor):
|
|
92
86
|
```
|
93
87
|
"""
|
94
88
|
|
95
|
-
|
96
|
-
|
97
|
-
x,
|
98
|
-
y=None,
|
99
|
-
sample_weight=None,
|
100
|
-
sequence_length=None,
|
101
|
-
):
|
102
|
-
if y is not None or sample_weight is not None:
|
103
|
-
logging.warning(
|
104
|
-
"`GPT2CausalLMPreprocessor` generates `y` and `sample_weight` "
|
105
|
-
"based on your input data, but your data already contains `y` "
|
106
|
-
"or `sample_weight`. Your `y` and `sample_weight` will be "
|
107
|
-
"ignored."
|
108
|
-
)
|
109
|
-
sequence_length = sequence_length or self.sequence_length
|
110
|
-
|
111
|
-
x = convert_inputs_to_list_of_tensor_segments(x)[0]
|
112
|
-
x = self.tokenizer(x)
|
113
|
-
# Pad with one extra token to account for the truncation below.
|
114
|
-
token_ids, padding_mask = self.packer(
|
115
|
-
x,
|
116
|
-
sequence_length=sequence_length + 1,
|
117
|
-
add_start_value=self.add_start_token,
|
118
|
-
add_end_value=self.add_end_token,
|
119
|
-
)
|
120
|
-
# The last token does not have a next token, so we truncate it out.
|
121
|
-
x = {
|
122
|
-
"token_ids": token_ids[..., :-1],
|
123
|
-
"padding_mask": padding_mask[..., :-1],
|
124
|
-
}
|
125
|
-
# Target `y` will be the next token.
|
126
|
-
y, sample_weight = token_ids[..., 1:], padding_mask[..., 1:]
|
127
|
-
return keras.utils.pack_x_y_sample_weight(x, y, sample_weight)
|
128
|
-
|
129
|
-
def generate_preprocess(
|
130
|
-
self,
|
131
|
-
x,
|
132
|
-
sequence_length=None,
|
133
|
-
):
|
134
|
-
"""Convert strings to integer token input for generation.
|
135
|
-
|
136
|
-
Similar to calling the layer for training, this method takes in strings
|
137
|
-
or tensor strings, tokenizes and packs the input, and computes a padding
|
138
|
-
mask masking all inputs not filled in with a padded value.
|
139
|
-
|
140
|
-
Unlike calling the layer for training, this method does not compute
|
141
|
-
labels and will never append a `tokenizer.end_token_id` to the end of
|
142
|
-
the sequence (as generation is expected to continue at the end of the
|
143
|
-
inputted prompt).
|
144
|
-
"""
|
145
|
-
if not self.built:
|
146
|
-
self.build(None)
|
147
|
-
|
148
|
-
x = convert_inputs_to_list_of_tensor_segments(x)[0]
|
149
|
-
x = self.tokenizer(x)
|
150
|
-
token_ids, padding_mask = self.packer(
|
151
|
-
x, sequence_length=sequence_length, add_end_value=False
|
152
|
-
)
|
153
|
-
return {
|
154
|
-
"token_ids": token_ids,
|
155
|
-
"padding_mask": padding_mask,
|
156
|
-
}
|
157
|
-
|
158
|
-
def generate_postprocess(
|
159
|
-
self,
|
160
|
-
x,
|
161
|
-
):
|
162
|
-
"""Convert integer token output to strings for generation.
|
163
|
-
|
164
|
-
This method reverses `generate_preprocess()`, by first removing all
|
165
|
-
padding and start/end tokens, and then converting the integer sequence
|
166
|
-
back to a string.
|
167
|
-
"""
|
168
|
-
if not self.built:
|
169
|
-
self.build(None)
|
170
|
-
|
171
|
-
token_ids, padding_mask = x["token_ids"], x["padding_mask"]
|
172
|
-
ids_to_strip = (
|
173
|
-
self.tokenizer.end_token_id,
|
174
|
-
self.tokenizer.pad_token_id,
|
175
|
-
)
|
176
|
-
token_ids = strip_to_ragged(token_ids, padding_mask, ids_to_strip)
|
177
|
-
return self.tokenizer.detokenize(token_ids)
|
89
|
+
backbone_cls = OPTBackbone
|
90
|
+
tokenizer_cls = OPTTokenizer
|
@@ -14,10 +14,16 @@
|
|
14
14
|
|
15
15
|
|
16
16
|
from keras_hub.src.api_export import keras_hub_export
|
17
|
+
from keras_hub.src.models.opt.opt_backbone import OPTBackbone
|
17
18
|
from keras_hub.src.tokenizers.byte_pair_tokenizer import BytePairTokenizer
|
18
19
|
|
19
20
|
|
20
|
-
@keras_hub_export(
|
21
|
+
@keras_hub_export(
|
22
|
+
[
|
23
|
+
"keras_hub.tokenizers.OPTTokenizer",
|
24
|
+
"keras_hub.models.OPTTokenizer",
|
25
|
+
]
|
26
|
+
)
|
21
27
|
class OPTTokenizer(BytePairTokenizer):
|
22
28
|
"""An OPT tokenizer using Byte-Pair Encoding subword segmentation.
|
23
29
|
|
@@ -27,8 +33,6 @@ class OPTTokenizer(BytePairTokenizer):
|
|
27
33
|
models and provides a `from_preset()` method to automatically download
|
28
34
|
a matching vocabulary for a OPT preset.
|
29
35
|
|
30
|
-
This tokenizer does not provide truncation or padding of inputs.
|
31
|
-
|
32
36
|
If input is a batch of strings (rank > 0), the layer will output a
|
33
37
|
`tf.RaggedTensor` where the last dimension of the output is ragged.
|
34
38
|
If input is a scalar string (rank == 0), the layer will output a dense
|
@@ -65,52 +69,19 @@ class OPTTokenizer(BytePairTokenizer):
|
|
65
69
|
```
|
66
70
|
"""
|
67
71
|
|
72
|
+
backbone_cls = OPTBackbone
|
73
|
+
|
68
74
|
def __init__(
|
69
75
|
self,
|
70
76
|
vocabulary=None,
|
71
77
|
merges=None,
|
72
78
|
**kwargs,
|
73
79
|
):
|
74
|
-
self.
|
75
|
-
self.
|
76
|
-
self.
|
77
|
-
|
80
|
+
self._add_special_token("</s>", "end_token")
|
81
|
+
self._add_special_token("</s>", "start_token")
|
82
|
+
self._add_special_token("<pad>", "pad_token")
|
78
83
|
super().__init__(
|
79
84
|
vocabulary=vocabulary,
|
80
85
|
merges=merges,
|
81
|
-
unsplittable_tokens=[
|
82
|
-
self.start_token,
|
83
|
-
self.pad_token,
|
84
|
-
self.end_token,
|
85
|
-
],
|
86
86
|
**kwargs,
|
87
87
|
)
|
88
|
-
|
89
|
-
def set_vocabulary_and_merges(self, vocabulary, merges):
|
90
|
-
super().set_vocabulary_and_merges(vocabulary, merges)
|
91
|
-
|
92
|
-
if vocabulary is not None:
|
93
|
-
# Check for necessary special tokens.
|
94
|
-
for token in [self.start_token, self.pad_token, self.end_token]:
|
95
|
-
if token not in self.vocabulary:
|
96
|
-
raise ValueError(
|
97
|
-
f"Cannot find token `'{token}'` in the provided "
|
98
|
-
f"`vocabulary`. Please provide `'{token}'` in your "
|
99
|
-
"`vocabulary` or use a pretrained `vocabulary` name."
|
100
|
-
)
|
101
|
-
|
102
|
-
self.start_token_id = self.token_to_id(self.start_token)
|
103
|
-
self.pad_token_id = self.token_to_id(self.pad_token)
|
104
|
-
self.end_token_id = self.token_to_id(self.end_token)
|
105
|
-
else:
|
106
|
-
self.start_token_id = None
|
107
|
-
self.pad_token_id = None
|
108
|
-
self.end_token_id = None
|
109
|
-
|
110
|
-
def get_config(self):
|
111
|
-
config = super().get_config()
|
112
|
-
# In the constructor, we pass the list of special tokens to the
|
113
|
-
# `unsplittable_tokens` arg of the superclass' constructor. Hence, we
|
114
|
-
# delete it from the config here.
|
115
|
-
del config["unsplittable_tokens"]
|
116
|
-
return config
|
@@ -15,9 +15,6 @@ from keras_hub.src.models.pali_gemma.pali_gemma_backbone import (
|
|
15
15
|
PaliGemmaBackbone,
|
16
16
|
)
|
17
17
|
from keras_hub.src.models.pali_gemma.pali_gemma_presets import backbone_presets
|
18
|
-
from keras_hub.src.models.pali_gemma.pali_gemma_tokenizer import (
|
19
|
-
PaliGemmaTokenizer,
|
20
|
-
)
|
21
18
|
from keras_hub.src.utils.preset_utils import register_presets
|
22
19
|
|
23
|
-
register_presets(backbone_presets,
|
20
|
+
register_presets(backbone_presets, PaliGemmaBackbone)
|
@@ -12,39 +12,47 @@
|
|
12
12
|
# See the License for the specific language governing permissions and
|
13
13
|
# limitations under the License.
|
14
14
|
import keras
|
15
|
-
from absl import logging
|
16
|
-
from keras import ops
|
17
15
|
|
18
16
|
from keras_hub.src.api_export import keras_hub_export
|
19
17
|
from keras_hub.src.layers.preprocessing.multi_segment_packer import (
|
20
18
|
MultiSegmentPacker,
|
21
19
|
)
|
22
|
-
from keras_hub.src.models.
|
23
|
-
|
20
|
+
from keras_hub.src.models.causal_lm_preprocessor import CausalLMPreprocessor
|
21
|
+
from keras_hub.src.models.pali_gemma.pali_gemma_backbone import (
|
22
|
+
PaliGemmaBackbone,
|
23
|
+
)
|
24
|
+
from keras_hub.src.models.pali_gemma.pali_gemma_image_converter import (
|
25
|
+
PaliGemmaImageConverter,
|
24
26
|
)
|
25
27
|
from keras_hub.src.models.pali_gemma.pali_gemma_tokenizer import (
|
26
28
|
PaliGemmaTokenizer,
|
27
29
|
)
|
28
|
-
from keras_hub.src.utils.
|
29
|
-
convert_inputs_to_list_of_tensor_segments,
|
30
|
-
)
|
30
|
+
from keras_hub.src.utils.tensor_utils import preprocessing_function
|
31
31
|
|
32
32
|
|
33
33
|
@keras_hub_export("keras_hub.models.PaliGemmaCausalLMPreprocessor")
|
34
|
-
class PaliGemmaCausalLMPreprocessor(
|
34
|
+
class PaliGemmaCausalLMPreprocessor(CausalLMPreprocessor):
|
35
|
+
backbone_cls = PaliGemmaBackbone
|
35
36
|
tokenizer_cls = PaliGemmaTokenizer
|
37
|
+
image_converter_cls = PaliGemmaImageConverter
|
36
38
|
|
37
39
|
def __init__(
|
38
40
|
self,
|
39
41
|
tokenizer,
|
40
|
-
|
42
|
+
image_converter=None,
|
43
|
+
sequence_length=1024,
|
41
44
|
add_start_token=True,
|
42
45
|
add_end_token=True,
|
43
46
|
**kwargs,
|
44
47
|
):
|
45
48
|
super().__init__(
|
46
|
-
tokenizer,
|
49
|
+
tokenizer=tokenizer,
|
50
|
+
sequence_length=sequence_length,
|
51
|
+
add_start_token=add_start_token,
|
52
|
+
add_end_token=add_end_token,
|
53
|
+
**kwargs,
|
47
54
|
)
|
55
|
+
self.image_converter = image_converter
|
48
56
|
|
49
57
|
def build(self, input_shape):
|
50
58
|
# Defer packer creation to `build()` so that we can be sure tokenizer
|
@@ -58,6 +66,7 @@ class PaliGemmaCausalLMPreprocessor(GemmaCausalLMPreprocessor):
|
|
58
66
|
)
|
59
67
|
self.built = True
|
60
68
|
|
69
|
+
@preprocessing_function
|
61
70
|
def call(
|
62
71
|
self,
|
63
72
|
x,
|
@@ -65,23 +74,12 @@ class PaliGemmaCausalLMPreprocessor(GemmaCausalLMPreprocessor):
|
|
65
74
|
sample_weight=None,
|
66
75
|
sequence_length=None,
|
67
76
|
):
|
68
|
-
if y is not None or sample_weight is not None:
|
69
|
-
logging.warning(
|
70
|
-
"`PaliGemmaCausalLMPreprocessor` generates `y` and `sample_weight` "
|
71
|
-
"based on your input data, but your data already contains `y` "
|
72
|
-
"or `sample_weight`. Your `y` and `sample_weight` will be "
|
73
|
-
"ignored."
|
74
|
-
)
|
75
77
|
sequence_length = sequence_length or self.sequence_length
|
76
|
-
|
77
78
|
images, prompts, responses = x["images"], x["prompts"], x["responses"]
|
78
|
-
if keras.config.backend() == "tensorflow":
|
79
|
-
# Tensorflow backend needs uniform ouput types.
|
80
|
-
images = ops.convert_to_tensor(images)
|
81
|
-
prompts = convert_inputs_to_list_of_tensor_segments(prompts)[0]
|
82
79
|
prompts = self.tokenizer(prompts)
|
83
|
-
responses = convert_inputs_to_list_of_tensor_segments(responses)[0]
|
84
80
|
responses = self.tokenizer(responses)
|
81
|
+
if self.image_converter:
|
82
|
+
images = self.image_converter(images)
|
85
83
|
# Pad with one extra token to account for the truncation below.
|
86
84
|
token_ids, segment_ids = self.packer(
|
87
85
|
(prompts, responses),
|
@@ -104,6 +102,7 @@ class PaliGemmaCausalLMPreprocessor(GemmaCausalLMPreprocessor):
|
|
104
102
|
sample_weight = response_mask[..., 1:]
|
105
103
|
return keras.utils.pack_x_y_sample_weight(x, y, sample_weight)
|
106
104
|
|
105
|
+
@preprocessing_function
|
107
106
|
def generate_preprocess(
|
108
107
|
self,
|
109
108
|
x,
|
@@ -125,13 +124,14 @@ class PaliGemmaCausalLMPreprocessor(GemmaCausalLMPreprocessor):
|
|
125
124
|
sequence_length = sequence_length or self.sequence_length
|
126
125
|
|
127
126
|
images, prompts = x["images"], x["prompts"]
|
128
|
-
prompts = convert_inputs_to_list_of_tensor_segments(prompts)[0]
|
129
127
|
prompts = self.tokenizer(prompts)
|
130
|
-
|
128
|
+
if self.image_converter:
|
129
|
+
images = self.image_converter(images)
|
131
130
|
if "responses" in x:
|
132
|
-
responses = x["responses"]
|
133
|
-
|
134
|
-
|
131
|
+
responses = self.tokenizer(x["responses"])
|
132
|
+
segments = (prompts, responses)
|
133
|
+
else:
|
134
|
+
segments = (prompts,)
|
135
135
|
token_ids, segment_ids = self.packer(
|
136
136
|
segments,
|
137
137
|
sequence_length=sequence_length,
|
@@ -0,0 +1,25 @@
|
|
1
|
+
# Copyright 2024 The KerasHub Authors
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# https://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
from keras_hub.src.api_export import keras_hub_export
|
15
|
+
from keras_hub.src.layers.preprocessing.resizing_image_converter import (
|
16
|
+
ResizingImageConverter,
|
17
|
+
)
|
18
|
+
from keras_hub.src.models.pali_gemma.pali_gemma_backbone import (
|
19
|
+
PaliGemmaBackbone,
|
20
|
+
)
|
21
|
+
|
22
|
+
|
23
|
+
@keras_hub_export("keras_hub.layers.PaliGemmaImageConverter")
|
24
|
+
class PaliGemmaImageConverter(ResizingImageConverter):
|
25
|
+
backbone_cls = PaliGemmaBackbone
|
@@ -25,7 +25,7 @@ backbone_presets = {
|
|
25
25
|
"path": "pali_gemma",
|
26
26
|
"model_card": "https://www.kaggle.com/models/google/paligemma",
|
27
27
|
},
|
28
|
-
"kaggle_handle": "kaggle://keras/paligemma/keras/pali_gemma_3b_mix_224/
|
28
|
+
"kaggle_handle": "kaggle://keras/paligemma/keras/pali_gemma_3b_mix_224/2",
|
29
29
|
},
|
30
30
|
"pali_gemma_3b_mix_448": {
|
31
31
|
"metadata": {
|
@@ -37,7 +37,7 @@ backbone_presets = {
|
|
37
37
|
"path": "pali_gemma",
|
38
38
|
"model_card": "https://www.kaggle.com/models/google/paligemma",
|
39
39
|
},
|
40
|
-
"kaggle_handle": "kaggle://keras/paligemma/keras/pali_gemma_3b_mix_448/
|
40
|
+
"kaggle_handle": "kaggle://keras/paligemma/keras/pali_gemma_3b_mix_448/2",
|
41
41
|
},
|
42
42
|
"pali_gemma_3b_224": {
|
43
43
|
"metadata": {
|
@@ -49,7 +49,7 @@ backbone_presets = {
|
|
49
49
|
"path": "pali_gemma",
|
50
50
|
"model_card": "https://www.kaggle.com/models/google/paligemma",
|
51
51
|
},
|
52
|
-
"kaggle_handle": "kaggle://keras/paligemma/keras/pali_gemma_3b_224/
|
52
|
+
"kaggle_handle": "kaggle://keras/paligemma/keras/pali_gemma_3b_224/2",
|
53
53
|
},
|
54
54
|
"pali_gemma_3b_448": {
|
55
55
|
"metadata": {
|
@@ -61,7 +61,7 @@ backbone_presets = {
|
|
61
61
|
"path": "pali_gemma",
|
62
62
|
"model_card": "https://www.kaggle.com/models/google/paligemma",
|
63
63
|
},
|
64
|
-
"kaggle_handle": "kaggle://keras/paligemma/keras/pali_gemma_3b_448/
|
64
|
+
"kaggle_handle": "kaggle://keras/paligemma/keras/pali_gemma_3b_448/2",
|
65
65
|
},
|
66
66
|
"pali_gemma_3b_896": {
|
67
67
|
"metadata": {
|
@@ -73,6 +73,6 @@ backbone_presets = {
|
|
73
73
|
"path": "pali_gemma",
|
74
74
|
"model_card": "https://www.kaggle.com/models/google/paligemma",
|
75
75
|
},
|
76
|
-
"kaggle_handle": "kaggle://keras/paligemma/keras/pali_gemma_3b_896/
|
76
|
+
"kaggle_handle": "kaggle://keras/paligemma/keras/pali_gemma_3b_896/2",
|
77
77
|
},
|
78
78
|
}
|
@@ -12,10 +12,18 @@
|
|
12
12
|
# See the License for the specific language governing permissions and
|
13
13
|
# limitations under the License.
|
14
14
|
from keras_hub.src.api_export import keras_hub_export
|
15
|
-
from keras_hub.src.models.gemma.
|
15
|
+
from keras_hub.src.models.gemma.gemma_tokenizer import GemmaTokenizer
|
16
|
+
from keras_hub.src.models.pali_gemma.pali_gemma_backbone import (
|
17
|
+
PaliGemmaBackbone,
|
18
|
+
)
|
16
19
|
|
17
20
|
|
18
|
-
@keras_hub_export(
|
21
|
+
@keras_hub_export(
|
22
|
+
[
|
23
|
+
"keras_hub.tokenizers.PaliGemmaTokenizer",
|
24
|
+
"keras_hub.models.PaliGemmaTokenizer",
|
25
|
+
]
|
26
|
+
)
|
19
27
|
class PaliGemmaTokenizer(GemmaTokenizer):
|
20
28
|
"""PaliGemma tokenizer layer based on SentencePiece.
|
21
29
|
|
@@ -76,4 +84,4 @@ class PaliGemmaTokenizer(GemmaTokenizer):
|
|
76
84
|
```
|
77
85
|
"""
|
78
86
|
|
79
|
-
|
87
|
+
backbone_cls = PaliGemmaBackbone
|
@@ -14,7 +14,6 @@
|
|
14
14
|
|
15
15
|
from keras_hub.src.models.phi3.phi3_backbone import Phi3Backbone
|
16
16
|
from keras_hub.src.models.phi3.phi3_presets import backbone_presets
|
17
|
-
from keras_hub.src.models.phi3.phi3_tokenizer import Phi3Tokenizer
|
18
17
|
from keras_hub.src.utils.preset_utils import register_presets
|
19
18
|
|
20
|
-
register_presets(backbone_presets,
|
19
|
+
register_presets(backbone_presets, Phi3Backbone)
|
@@ -19,7 +19,6 @@ from keras_hub.src.models.phi3.phi3_backbone import Phi3Backbone
|
|
19
19
|
from keras_hub.src.models.phi3.phi3_causal_lm_preprocessor import (
|
20
20
|
Phi3CausalLMPreprocessor,
|
21
21
|
)
|
22
|
-
from keras_hub.src.utils.python_utils import classproperty
|
23
22
|
from keras_hub.src.utils.tensor_utils import any_equal
|
24
23
|
|
25
24
|
|
@@ -46,6 +45,9 @@ class Phi3CausalLM(CausalLM):
|
|
46
45
|
should be preprocessed before calling the model.
|
47
46
|
"""
|
48
47
|
|
48
|
+
backbone_cls = Phi3Backbone
|
49
|
+
preprocessor_cls = Phi3CausalLMPreprocessor
|
50
|
+
|
49
51
|
def __init__(self, backbone, preprocessor=None, **kwargs):
|
50
52
|
# === Layers ===
|
51
53
|
self.backbone = backbone
|
@@ -61,14 +63,6 @@ class Phi3CausalLM(CausalLM):
|
|
61
63
|
**kwargs,
|
62
64
|
)
|
63
65
|
|
64
|
-
@classproperty
|
65
|
-
def backbone_cls(cls):
|
66
|
-
return Phi3Backbone
|
67
|
-
|
68
|
-
@classproperty
|
69
|
-
def preprocessor_cls(cls):
|
70
|
-
return Phi3CausalLMPreprocessor
|
71
|
-
|
72
66
|
def call_with_cache(
|
73
67
|
self,
|
74
68
|
token_ids,
|