keras-hub-nightly 0.16.1.dev202409250340__py3-none-any.whl → 0.16.1.dev202409260340__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- keras_hub/api/layers/__init__.py +3 -0
- keras_hub/api/models/__init__.py +16 -0
- keras_hub/api/tokenizers/__init__.py +1 -0
- keras_hub/src/models/{stable_diffusion_v3 → clip}/clip_encoder_block.py +8 -2
- keras_hub/src/models/clip/clip_preprocessor.py +147 -0
- keras_hub/src/models/{stable_diffusion_v3 → clip}/clip_text_encoder.py +60 -57
- keras_hub/src/models/{stable_diffusion_v3 → clip}/clip_tokenizer.py +69 -30
- keras_hub/src/models/densenet/__init__.py +6 -0
- keras_hub/src/models/densenet/densenet_backbone.py +11 -8
- keras_hub/src/models/densenet/densenet_image_classifier.py +27 -4
- keras_hub/src/models/densenet/densenet_image_classifier_preprocessor.py +27 -0
- keras_hub/src/models/densenet/densenet_image_converter.py +23 -0
- keras_hub/src/models/densenet/densenet_presets.py +56 -0
- keras_hub/src/models/stable_diffusion_3/__init__.py +13 -0
- keras_hub/src/models/stable_diffusion_3/flow_match_euler_discrete_scheduler.py +93 -0
- keras_hub/src/models/{stable_diffusion_v3 → stable_diffusion_3}/mmdit.py +351 -26
- keras_hub/src/models/stable_diffusion_3/stable_diffusion_3_backbone.py +630 -0
- keras_hub/src/models/stable_diffusion_3/stable_diffusion_3_text_to_image.py +151 -0
- keras_hub/src/models/stable_diffusion_3/stable_diffusion_3_text_to_image_preprocessor.py +77 -0
- keras_hub/src/models/{stable_diffusion_v3/t5_xxl_text_encoder.py → stable_diffusion_3/t5_encoder.py} +7 -7
- keras_hub/src/models/stable_diffusion_3/vae_image_decoder.py +333 -0
- keras_hub/src/models/{stable_diffusion_v3/t5_xxl_preprocessor.py → t5/t5_preprocessor.py} +12 -3
- keras_hub/src/models/text_to_image.py +295 -0
- keras_hub/src/utils/timm/convert_densenet.py +107 -0
- keras_hub/src/utils/timm/preset_loader.py +3 -0
- keras_hub/src/version_utils.py +1 -1
- {keras_hub_nightly-0.16.1.dev202409250340.dist-info → keras_hub_nightly-0.16.1.dev202409260340.dist-info}/METADATA +1 -1
- {keras_hub_nightly-0.16.1.dev202409250340.dist-info → keras_hub_nightly-0.16.1.dev202409260340.dist-info}/RECORD +31 -23
- keras_hub/src/models/stable_diffusion_v3/clip_preprocessor.py +0 -93
- keras_hub/src/models/stable_diffusion_v3/mmdit_block.py +0 -317
- keras_hub/src/models/stable_diffusion_v3/vae_attention.py +0 -126
- keras_hub/src/models/stable_diffusion_v3/vae_image_decoder.py +0 -186
- /keras_hub/src/models/{stable_diffusion_v3 → clip}/__init__.py +0 -0
- {keras_hub_nightly-0.16.1.dev202409250340.dist-info → keras_hub_nightly-0.16.1.dev202409260340.dist-info}/WHEEL +0 -0
- {keras_hub_nightly-0.16.1.dev202409250340.dist-info → keras_hub_nightly-0.16.1.dev202409260340.dist-info}/top_level.txt +0 -0
keras_hub/api/layers/__init__.py
CHANGED
@@ -50,6 +50,9 @@ from keras_hub.src.layers.preprocessing.resizing_image_converter import (
|
|
50
50
|
ResizingImageConverter,
|
51
51
|
)
|
52
52
|
from keras_hub.src.layers.preprocessing.start_end_packer import StartEndPacker
|
53
|
+
from keras_hub.src.models.densenet.densenet_image_converter import (
|
54
|
+
DenseNetImageConverter,
|
55
|
+
)
|
53
56
|
from keras_hub.src.models.pali_gemma.pali_gemma_image_converter import (
|
54
57
|
PaliGemmaImageConverter,
|
55
58
|
)
|
keras_hub/api/models/__init__.py
CHANGED
@@ -66,6 +66,8 @@ from keras_hub.src.models.bloom.bloom_causal_lm_preprocessor import (
|
|
66
66
|
from keras_hub.src.models.bloom.bloom_tokenizer import BloomTokenizer
|
67
67
|
from keras_hub.src.models.causal_lm import CausalLM
|
68
68
|
from keras_hub.src.models.causal_lm_preprocessor import CausalLMPreprocessor
|
69
|
+
from keras_hub.src.models.clip.clip_preprocessor import CLIPPreprocessor
|
70
|
+
from keras_hub.src.models.clip.clip_tokenizer import CLIPTokenizer
|
69
71
|
from keras_hub.src.models.csp_darknet.csp_darknet_backbone import (
|
70
72
|
CSPDarkNetBackbone,
|
71
73
|
)
|
@@ -100,6 +102,9 @@ from keras_hub.src.models.densenet.densenet_backbone import DenseNetBackbone
|
|
100
102
|
from keras_hub.src.models.densenet.densenet_image_classifier import (
|
101
103
|
DenseNetImageClassifier,
|
102
104
|
)
|
105
|
+
from keras_hub.src.models.densenet.densenet_image_classifier_preprocessor import (
|
106
|
+
DenseNetImageClassifierPreprocessor,
|
107
|
+
)
|
103
108
|
from keras_hub.src.models.distil_bert.distil_bert_backbone import (
|
104
109
|
DistilBertBackbone,
|
105
110
|
)
|
@@ -260,7 +265,17 @@ from keras_hub.src.models.sam.sam_backbone import SAMBackbone
|
|
260
265
|
from keras_hub.src.models.sam.sam_image_segmenter import SAMImageSegmenter
|
261
266
|
from keras_hub.src.models.seq_2_seq_lm import Seq2SeqLM
|
262
267
|
from keras_hub.src.models.seq_2_seq_lm_preprocessor import Seq2SeqLMPreprocessor
|
268
|
+
from keras_hub.src.models.stable_diffusion_3.stable_diffusion_3_backbone import (
|
269
|
+
StableDiffusion3Backbone,
|
270
|
+
)
|
271
|
+
from keras_hub.src.models.stable_diffusion_3.stable_diffusion_3_text_to_image import (
|
272
|
+
StableDiffusion3TextToImage,
|
273
|
+
)
|
274
|
+
from keras_hub.src.models.stable_diffusion_3.stable_diffusion_3_text_to_image_preprocessor import (
|
275
|
+
StableDiffusion3TextToImagePreprocessor,
|
276
|
+
)
|
263
277
|
from keras_hub.src.models.t5.t5_backbone import T5Backbone
|
278
|
+
from keras_hub.src.models.t5.t5_preprocessor import T5Preprocessor
|
264
279
|
from keras_hub.src.models.t5.t5_tokenizer import T5Tokenizer
|
265
280
|
from keras_hub.src.models.task import Task
|
266
281
|
from keras_hub.src.models.text_classifier import TextClassifier
|
@@ -268,6 +283,7 @@ from keras_hub.src.models.text_classifier import TextClassifier as Classifier
|
|
268
283
|
from keras_hub.src.models.text_classifier_preprocessor import (
|
269
284
|
TextClassifierPreprocessor,
|
270
285
|
)
|
286
|
+
from keras_hub.src.models.text_to_image import TextToImage
|
271
287
|
from keras_hub.src.models.vgg.vgg_backbone import VGGBackbone
|
272
288
|
from keras_hub.src.models.vgg.vgg_image_classifier import VGGImageClassifier
|
273
289
|
from keras_hub.src.models.vit_det.vit_det_backbone import ViTDetBackbone
|
@@ -21,6 +21,7 @@ from keras_hub.src.models.albert.albert_tokenizer import AlbertTokenizer
|
|
21
21
|
from keras_hub.src.models.bart.bart_tokenizer import BartTokenizer
|
22
22
|
from keras_hub.src.models.bert.bert_tokenizer import BertTokenizer
|
23
23
|
from keras_hub.src.models.bloom.bloom_tokenizer import BloomTokenizer
|
24
|
+
from keras_hub.src.models.clip.clip_tokenizer import CLIPTokenizer
|
24
25
|
from keras_hub.src.models.deberta_v3.deberta_v3_tokenizer import (
|
25
26
|
DebertaV3Tokenizer,
|
26
27
|
)
|
@@ -11,6 +11,7 @@
|
|
11
11
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
12
|
# See the License for the specific language governing permissions and
|
13
13
|
# limitations under the License.
|
14
|
+
from keras import dtype_policies
|
14
15
|
from keras import layers
|
15
16
|
from keras import ops
|
16
17
|
|
@@ -43,7 +44,7 @@ class CLIPEncoderBlock(layers.Layer):
|
|
43
44
|
intermediate_activation = quick_gelu
|
44
45
|
|
45
46
|
self.layer_norm_1 = layers.LayerNormalization(
|
46
|
-
epsilon=
|
47
|
+
epsilon=1e-5, dtype="float32", name="layer_norm_1"
|
47
48
|
)
|
48
49
|
self.attention = layers.MultiHeadAttention(
|
49
50
|
num_heads,
|
@@ -52,7 +53,7 @@ class CLIPEncoderBlock(layers.Layer):
|
|
52
53
|
name="attention",
|
53
54
|
)
|
54
55
|
self.layer_norm_2 = layers.LayerNormalization(
|
55
|
-
epsilon=
|
56
|
+
epsilon=1e-5, dtype="float32", name="layer_norm_2"
|
56
57
|
)
|
57
58
|
self.dense_1 = layers.Dense(
|
58
59
|
self.intermediate_dim, dtype=self.dtype_policy, name="dense_1"
|
@@ -67,6 +68,11 @@ class CLIPEncoderBlock(layers.Layer):
|
|
67
68
|
def build(self, input_shape):
|
68
69
|
self.layer_norm_1.build(input_shape)
|
69
70
|
self.attention.build(input_shape, input_shape, input_shape)
|
71
|
+
# Before Keras 3.2, there was no setter for `dtype_policy`. Directly
|
72
|
+
# assign a `DTypePolicy` instead.
|
73
|
+
self.attention._softmax.dtype_policy = dtype_policies.DTypePolicy(
|
74
|
+
"float32"
|
75
|
+
)
|
70
76
|
self.layer_norm_2.build(input_shape)
|
71
77
|
self.dense_1.build(input_shape)
|
72
78
|
input_shape = self.dense_1.compute_output_shape(input_shape)
|
@@ -0,0 +1,147 @@
|
|
1
|
+
# Copyright 2024 The KerasHub Authors
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# https://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
import keras
|
15
|
+
|
16
|
+
from keras_hub.src.api_export import keras_hub_export
|
17
|
+
from keras_hub.src.layers.preprocessing.start_end_packer import StartEndPacker
|
18
|
+
from keras_hub.src.models.clip.clip_tokenizer import CLIPTokenizer
|
19
|
+
from keras_hub.src.models.preprocessor import Preprocessor
|
20
|
+
from keras_hub.src.utils.tensor_utils import preprocessing_function
|
21
|
+
|
22
|
+
try:
|
23
|
+
import tensorflow as tf
|
24
|
+
except ImportError:
|
25
|
+
tf = None
|
26
|
+
|
27
|
+
|
28
|
+
@keras_hub_export("keras_hub.models.CLIPPreprocessor")
|
29
|
+
class CLIPPreprocessor(Preprocessor):
|
30
|
+
"""CLIP preprocessing layer which tokenizes and packs inputs.
|
31
|
+
|
32
|
+
This preprocessing layer will do 2 things:
|
33
|
+
|
34
|
+
- Tokenize the inputs using the `tokenizer`.
|
35
|
+
- Construct a dictionary with keys `"token_ids"`, `"padding_mask"`.
|
36
|
+
|
37
|
+
This layer can be used directly with `tf.data.Dataset.map` to preprocess
|
38
|
+
string data in the `(x, y, sample_weight)` format used by
|
39
|
+
`keras.Model.fit`.
|
40
|
+
|
41
|
+
The call method of this layer accepts three arguments, `x`, `y`, and
|
42
|
+
`sample_weight`. `x` can be a python string or tensor representing a single
|
43
|
+
segment, a list of python strings representing a batch of single segments,
|
44
|
+
or a list of tensors representing multiple segments to be packed together.
|
45
|
+
`y` and `sample_weight` are both optional, can have any format, and will be
|
46
|
+
passed through unaltered.
|
47
|
+
|
48
|
+
`CLIPPreprocessor` forces the input to have only one segment, as CLIP is
|
49
|
+
mainly used for generation tasks. For tasks having multi-segment inputs
|
50
|
+
like "glue/mnli", please use a model designed for classification purposes
|
51
|
+
such as BERT or RoBERTa.
|
52
|
+
|
53
|
+
Args:
|
54
|
+
tokenizer: A `keras_hub.models.CLIPTokenizer` instance.
|
55
|
+
sequence_length: The length of the packed inputs.
|
56
|
+
add_start_token: If `True`, the preprocessor will prepend the tokenizer
|
57
|
+
start token to each input sequence.
|
58
|
+
add_end_token: If `True`, the preprocessor will append the tokenizer
|
59
|
+
end token to each input sequence.
|
60
|
+
to_lower: bool. Whether to lower the inputs.
|
61
|
+
|
62
|
+
Call arguments:
|
63
|
+
x: A string, `tf.Tensor` or list of python strings.
|
64
|
+
y: Any label data. Will be passed through unaltered.
|
65
|
+
sample_weight: Any label weight data. Will be passed through unaltered.
|
66
|
+
sequence_length: Pass to override the configured `sequence_length` of
|
67
|
+
the layer.
|
68
|
+
"""
|
69
|
+
|
70
|
+
# TODO: Add example once we have a CLIP model.
|
71
|
+
|
72
|
+
tokenizer_cls = CLIPTokenizer
|
73
|
+
|
74
|
+
def __init__(
|
75
|
+
self,
|
76
|
+
tokenizer,
|
77
|
+
sequence_length=77,
|
78
|
+
add_start_token=True,
|
79
|
+
add_end_token=True,
|
80
|
+
to_lower=True,
|
81
|
+
**kwargs,
|
82
|
+
):
|
83
|
+
super().__init__(**kwargs)
|
84
|
+
self.tokenizer = tokenizer
|
85
|
+
self.packer = None
|
86
|
+
self.sequence_length = sequence_length
|
87
|
+
self.add_start_token = add_start_token
|
88
|
+
self.add_end_token = add_end_token
|
89
|
+
self.to_lower = to_lower
|
90
|
+
|
91
|
+
def build(self, input_shape):
|
92
|
+
# Defer packer creation to `build()` so that we can be sure tokenizer
|
93
|
+
# assets have loaded when restoring a saved model.
|
94
|
+
self.packer = StartEndPacker(
|
95
|
+
start_value=self.tokenizer.start_token_id,
|
96
|
+
end_value=self.tokenizer.end_token_id,
|
97
|
+
pad_value=self.tokenizer.end_token_id,
|
98
|
+
sequence_length=self.sequence_length,
|
99
|
+
return_padding_mask=True,
|
100
|
+
)
|
101
|
+
self.built = True
|
102
|
+
|
103
|
+
@preprocessing_function
|
104
|
+
def call(
|
105
|
+
self,
|
106
|
+
x,
|
107
|
+
y=None,
|
108
|
+
sample_weight=None,
|
109
|
+
sequence_length=None,
|
110
|
+
):
|
111
|
+
sequence_length = sequence_length or self.sequence_length
|
112
|
+
if self.to_lower:
|
113
|
+
x = tf.strings.lower(x)
|
114
|
+
token_ids, padding_mask = self.packer(
|
115
|
+
self.tokenizer(x),
|
116
|
+
sequence_length=sequence_length,
|
117
|
+
add_start_value=self.add_start_token,
|
118
|
+
add_end_value=self.add_end_token,
|
119
|
+
)
|
120
|
+
x = {
|
121
|
+
"token_ids": token_ids,
|
122
|
+
"padding_mask": padding_mask,
|
123
|
+
}
|
124
|
+
return keras.utils.pack_x_y_sample_weight(x, y, sample_weight)
|
125
|
+
|
126
|
+
def get_config(self):
|
127
|
+
config = super().get_config()
|
128
|
+
config.update(
|
129
|
+
{
|
130
|
+
"sequence_length": self.sequence_length,
|
131
|
+
"add_start_token": self.add_start_token,
|
132
|
+
"add_end_token": self.add_end_token,
|
133
|
+
"to_lower": self.to_lower,
|
134
|
+
}
|
135
|
+
)
|
136
|
+
return config
|
137
|
+
|
138
|
+
@property
|
139
|
+
def sequence_length(self):
|
140
|
+
"""The padded length of model input sequences."""
|
141
|
+
return self._sequence_length
|
142
|
+
|
143
|
+
@sequence_length.setter
|
144
|
+
def sequence_length(self, value):
|
145
|
+
self._sequence_length = value
|
146
|
+
if self.packer is not None:
|
147
|
+
self.packer.sequence_length = value
|
@@ -11,21 +11,46 @@
|
|
11
11
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
12
|
# See the License for the specific language governing permissions and
|
13
13
|
# limitations under the License.
|
14
|
-
import keras
|
15
14
|
from keras import layers
|
16
|
-
from keras import ops
|
17
15
|
|
18
16
|
from keras_hub.src.layers.modeling.token_and_position_embedding import (
|
19
17
|
TokenAndPositionEmbedding,
|
20
18
|
)
|
21
|
-
from keras_hub.src.models.
|
22
|
-
|
23
|
-
|
19
|
+
from keras_hub.src.models.backbone import Backbone
|
20
|
+
from keras_hub.src.models.clip.clip_encoder_block import CLIPEncoderBlock
|
21
|
+
|
24
22
|
|
23
|
+
class CLIPTextEncoder(Backbone):
|
24
|
+
"""CLIP text core network with hyperparameters.
|
25
|
+
|
26
|
+
Args:
|
27
|
+
vocabulary_size: int. The size of the token vocabulary.
|
28
|
+
embedding_dim: int. The output dimension of the embedding layer.
|
29
|
+
hidden_dim: int. The size of the transformer hidden state at the end
|
30
|
+
of each transformer layer.
|
31
|
+
num_layers: int. The number of transformer layers.
|
32
|
+
num_heads: int. The number of attention heads for each transformer.
|
33
|
+
intermediate_dim: int. The output dimension of the first Dense layer in
|
34
|
+
a two-layer feedforward network for each transformer.
|
35
|
+
intermediate_dim: int. The output dimension of the first Dense layer in
|
36
|
+
a two-layer feedforward network for each transformer.
|
37
|
+
intermediate_activation: activation function. The activation that
|
38
|
+
is used for the first Dense layer in a two-layer feedforward network
|
39
|
+
for each transformer.
|
40
|
+
intermediate_output_index: optional int. The index of the intermediate
|
41
|
+
output. If specified, the output will become a dictionary with two
|
42
|
+
keys `"sequence_output"` and `"intermediate_output"`.
|
43
|
+
max_sequence_length: int. The maximum sequence length that this encoder
|
44
|
+
can consume.
|
45
|
+
dtype: string or `keras.mixed_precision.DTypePolicy`. The dtype to use
|
46
|
+
for the models computations and weights. Note that some
|
47
|
+
computations, such as softmax and layer normalization will always
|
48
|
+
be done a float32 precision regardless of dtype.
|
49
|
+
"""
|
25
50
|
|
26
|
-
class CLIPTextEncoder(keras.Model):
|
27
51
|
def __init__(
|
28
52
|
self,
|
53
|
+
vocabulary_size,
|
29
54
|
embedding_dim,
|
30
55
|
hidden_dim,
|
31
56
|
num_layers,
|
@@ -33,9 +58,9 @@ class CLIPTextEncoder(keras.Model):
|
|
33
58
|
intermediate_dim,
|
34
59
|
intermediate_activation="quick_gelu",
|
35
60
|
intermediate_output_index=None,
|
36
|
-
|
37
|
-
sequence_length=77,
|
61
|
+
max_sequence_length=77,
|
38
62
|
dtype=None,
|
63
|
+
name=None,
|
39
64
|
**kwargs,
|
40
65
|
):
|
41
66
|
if (
|
@@ -44,13 +69,17 @@ class CLIPTextEncoder(keras.Model):
|
|
44
69
|
):
|
45
70
|
intermediate_output_index += num_layers
|
46
71
|
|
72
|
+
# `prefix` is used to prevent duplicate name when utilizing multiple
|
73
|
+
# CLIP models within a single model, such as in StableDiffusion3.
|
74
|
+
prefix = str(name) + "_" if name is not None else ""
|
75
|
+
|
47
76
|
# === Layers ===
|
48
77
|
self.embedding = TokenAndPositionEmbedding(
|
49
78
|
vocabulary_size=vocabulary_size,
|
50
|
-
sequence_length=
|
79
|
+
sequence_length=max_sequence_length,
|
51
80
|
embedding_dim=embedding_dim,
|
52
81
|
dtype=dtype,
|
53
|
-
name="embedding",
|
82
|
+
name=f"{prefix}embedding",
|
54
83
|
)
|
55
84
|
self.encoder_layers = [
|
56
85
|
CLIPEncoderBlock(
|
@@ -59,59 +88,44 @@ class CLIPTextEncoder(keras.Model):
|
|
59
88
|
intermediate_dim,
|
60
89
|
intermediate_activation,
|
61
90
|
dtype=dtype,
|
91
|
+
name=f"{prefix}encoder_block_{i}",
|
62
92
|
)
|
63
|
-
for
|
93
|
+
for i in range(num_layers)
|
64
94
|
]
|
65
95
|
self.layer_norm = layers.LayerNormalization(
|
66
|
-
epsilon=
|
67
|
-
)
|
68
|
-
self.text_projection = layers.Dense(
|
69
|
-
hidden_dim,
|
70
|
-
use_bias=False,
|
71
|
-
dtype=dtype,
|
72
|
-
name="text_projection",
|
96
|
+
epsilon=1e-6, dtype="float32", name=f"{prefix}layer_norm"
|
73
97
|
)
|
74
98
|
|
75
99
|
# === Functional Model ===
|
76
|
-
|
77
|
-
shape=(
|
100
|
+
token_id_input = layers.Input(
|
101
|
+
shape=(None,), dtype="int32", name="token_ids"
|
78
102
|
)
|
79
|
-
x = self.embedding(
|
80
|
-
|
81
|
-
# Encoder.
|
103
|
+
x = self.embedding(token_id_input)
|
104
|
+
intermediate_output = None
|
82
105
|
for i, block in enumerate(self.encoder_layers):
|
83
106
|
x = block(x)
|
84
107
|
if i == intermediate_output_index:
|
85
|
-
|
108
|
+
intermediate_output = x
|
86
109
|
x = self.layer_norm(x)
|
87
|
-
|
88
|
-
if encoder_intermediate_output is not None:
|
89
|
-
encoder_intermediate_output = self.layer_norm(
|
90
|
-
encoder_intermediate_output
|
91
|
-
)
|
92
|
-
# Projection.
|
93
|
-
indices = ops.expand_dims(
|
94
|
-
ops.cast(ops.argmax(encoder_token_ids, axis=-1), "int32"), axis=-1
|
95
|
-
)
|
96
|
-
pooled_output = ops.take_along_axis(x, indices[:, :, None], axis=1)
|
97
|
-
pooled_output = ops.squeeze(pooled_output, axis=1)
|
98
|
-
projection_output = self.text_projection(pooled_output)
|
110
|
+
sequence_output = x
|
99
111
|
|
100
|
-
outputs = {
|
101
|
-
"encoder_sequence_output": encoder_output,
|
102
|
-
"encoder_pooled_output": pooled_output,
|
103
|
-
"encoder_projection_output": projection_output,
|
104
|
-
}
|
105
112
|
if intermediate_output_index is not None:
|
106
|
-
outputs
|
107
|
-
|
113
|
+
outputs = {
|
114
|
+
"sequence_output": sequence_output,
|
115
|
+
"intermediate_output": intermediate_output,
|
116
|
+
}
|
117
|
+
else:
|
118
|
+
outputs = sequence_output
|
108
119
|
super().__init__(
|
109
|
-
inputs={"
|
120
|
+
inputs={"token_ids": token_id_input},
|
110
121
|
outputs=outputs,
|
122
|
+
name=name,
|
111
123
|
**kwargs,
|
112
124
|
)
|
113
125
|
|
114
126
|
# === Config ===
|
127
|
+
self.vocabulary_size = vocabulary_size
|
128
|
+
self.max_sequence_length = max_sequence_length
|
115
129
|
self.embedding_dim = embedding_dim
|
116
130
|
self.hidden_dim = hidden_dim
|
117
131
|
self.num_layers = num_layers
|
@@ -119,22 +133,12 @@ class CLIPTextEncoder(keras.Model):
|
|
119
133
|
self.intermediate_dim = intermediate_dim
|
120
134
|
self.intermediate_activation = intermediate_activation
|
121
135
|
self.intermediate_output_index = intermediate_output_index
|
122
|
-
self.vocabulary_size = vocabulary_size
|
123
|
-
self.sequence_length = sequence_length
|
124
|
-
|
125
|
-
if dtype is not None:
|
126
|
-
try:
|
127
|
-
self.dtype_policy = keras.dtype_policies.get(dtype)
|
128
|
-
# Before Keras 3.2, there is no `keras.dtype_policies.get`.
|
129
|
-
except AttributeError:
|
130
|
-
if isinstance(dtype, keras.DTypePolicy):
|
131
|
-
dtype = dtype.name
|
132
|
-
self.dtype_policy = keras.DTypePolicy(dtype)
|
133
136
|
|
134
137
|
def get_config(self):
|
135
138
|
config = super().get_config()
|
136
139
|
config.update(
|
137
140
|
{
|
141
|
+
"vocabulary_size": self.vocabulary_size,
|
138
142
|
"embedding_dim": self.embedding_dim,
|
139
143
|
"hidden_dim": self.hidden_dim,
|
140
144
|
"num_layers": self.num_layers,
|
@@ -142,8 +146,7 @@ class CLIPTextEncoder(keras.Model):
|
|
142
146
|
"intermediate_dim": self.intermediate_dim,
|
143
147
|
"intermediate_activation": self.intermediate_activation,
|
144
148
|
"intermediate_output_index": self.intermediate_output_index,
|
145
|
-
"
|
146
|
-
"sequence_length": self.sequence_length,
|
149
|
+
"max_sequence_length": self.max_sequence_length,
|
147
150
|
}
|
148
151
|
)
|
149
152
|
return config
|
@@ -11,9 +11,12 @@
|
|
11
11
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
12
|
# See the License for the specific language governing permissions and
|
13
13
|
# limitations under the License.
|
14
|
+
|
15
|
+
from keras_hub.src.api_export import keras_hub_export
|
14
16
|
from keras_hub.src.tokenizers.byte_pair_tokenizer import BytePairTokenizer
|
15
17
|
from keras_hub.src.tokenizers.byte_pair_tokenizer import convert_to_ragged_batch
|
16
18
|
from keras_hub.src.tokenizers.byte_pair_tokenizer import split_strings_for_bpe
|
19
|
+
from keras_hub.src.utils.tensor_utils import preprocessing_function
|
17
20
|
|
18
21
|
try:
|
19
22
|
import tensorflow as tf
|
@@ -21,10 +24,51 @@ except ImportError:
|
|
21
24
|
tf = None
|
22
25
|
|
23
26
|
|
27
|
+
@keras_hub_export(
|
28
|
+
[
|
29
|
+
"keras_hub.tokenizers.CLIPTokenizer",
|
30
|
+
"keras_hub.models.CLIPTokenizer",
|
31
|
+
]
|
32
|
+
)
|
24
33
|
class CLIPTokenizer(BytePairTokenizer):
|
25
|
-
|
26
|
-
|
27
|
-
|
34
|
+
"""A CLIP tokenizer using Byte-Pair Encoding subword segmentation.
|
35
|
+
|
36
|
+
This tokenizer class will tokenize raw strings into integer sequences and
|
37
|
+
is based on `keras_hub.tokenizers.BytePairTokenizer`. Unlike the
|
38
|
+
underlying tokenizer, it will check for all special tokens needed by CLIP
|
39
|
+
models and provides a `from_preset()` method to automatically download
|
40
|
+
a matching vocabulary for a CLIP preset.
|
41
|
+
|
42
|
+
If input is a batch of strings (rank > 0), the layer will output a
|
43
|
+
`tf.RaggedTensor` where the last dimension of the output is ragged.
|
44
|
+
|
45
|
+
If input is a scalar string (rank == 0), the layer will output a dense
|
46
|
+
`tf.Tensor` with static shape `[None]`.
|
47
|
+
|
48
|
+
Args:
|
49
|
+
vocabulary: string or dict, maps token to integer ids. If it is a
|
50
|
+
string, it should be the file path to a json file.
|
51
|
+
merges: string or list, contains the merge rule. If it is a string,
|
52
|
+
it should be the file path to merge rules. The merge rule file
|
53
|
+
should have one merge rule per line. Every merge rule contains
|
54
|
+
merge entities separated by a space.
|
55
|
+
pad_with_end_token: bool. Whether to pad the output with `end_token`.
|
56
|
+
"""
|
57
|
+
|
58
|
+
# TODO: Add example and `backbone_cls` once we have a CLIP model.
|
59
|
+
|
60
|
+
backbone_cls = None
|
61
|
+
|
62
|
+
def __init__(
|
63
|
+
self,
|
64
|
+
vocabulary=None,
|
65
|
+
merges=None,
|
66
|
+
pad_with_end_token=False,
|
67
|
+
**kwargs,
|
68
|
+
):
|
69
|
+
self._add_special_token("<|startoftext|>", "start_token")
|
70
|
+
self._add_special_token("<|endoftext|>", "end_token")
|
71
|
+
self.pad_token_id = 0
|
28
72
|
|
29
73
|
super().__init__(
|
30
74
|
vocabulary=vocabulary,
|
@@ -33,35 +77,21 @@ class CLIPTokenizer(BytePairTokenizer):
|
|
33
77
|
**kwargs,
|
34
78
|
)
|
35
79
|
|
36
|
-
|
37
|
-
|
38
|
-
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
raise ValueError(
|
43
|
-
f"Cannot find token `'{self.end_token}'` in the provided "
|
44
|
-
f"`vocabulary`. Please provide `'{self.end_token}'` in "
|
45
|
-
"your `vocabulary` or use a pretrained `vocabulary` name."
|
46
|
-
)
|
47
|
-
|
48
|
-
self.start_token_id = self.token_to_id(self.start_token)
|
49
|
-
self.end_token_id = self.token_to_id(self.end_token)
|
50
|
-
self.pad_token_id = 0
|
51
|
-
else:
|
52
|
-
self.end_token_id = None
|
53
|
-
self.start_token_id = None
|
54
|
-
self.pad_token_id = None
|
80
|
+
# When `pad_with_end_token` is True, we need to access the vocabulary,
|
81
|
+
# so the check is required.
|
82
|
+
if pad_with_end_token:
|
83
|
+
self._check_vocabulary()
|
84
|
+
self.pad_token_id = self.end_token_id
|
85
|
+
self.pad_with_end_token = pad_with_end_token
|
55
86
|
|
56
87
|
def _bpe_merge_and_update_cache(self, tokens):
|
57
88
|
"""Process unseen tokens and add to cache."""
|
58
89
|
words = self._transform_bytes(tokens)
|
59
90
|
|
60
|
-
# In
|
91
|
+
# In CLIP, we need to add `</w>` to the last word.
|
61
92
|
words = tf.strings.reduce_join(words, axis=1, separator=" ")
|
62
93
|
words = tf.strings.join([words, "</w>"])
|
63
94
|
words = tf.strings.split(words, sep=" ")
|
64
|
-
|
65
95
|
tokenized_words = self._bpe_merge(words)
|
66
96
|
|
67
97
|
# For each word, join all its token by a whitespace,
|
@@ -71,17 +101,20 @@ class CLIPTokenizer(BytePairTokenizer):
|
|
71
101
|
)
|
72
102
|
self.cache.insert(tokens, tokenized_words)
|
73
103
|
|
104
|
+
@preprocessing_function
|
74
105
|
def tokenize(self, inputs):
|
75
106
|
self._check_vocabulary()
|
76
|
-
if not isinstance(inputs, (tf.Tensor, tf.RaggedTensor)):
|
77
|
-
inputs = tf.convert_to_tensor(inputs)
|
78
|
-
|
79
107
|
if self.add_prefix_space:
|
80
108
|
inputs = tf.strings.join([" ", inputs])
|
81
109
|
|
82
|
-
|
83
|
-
if
|
110
|
+
unbatched = inputs.shape.rank == 0
|
111
|
+
if unbatched:
|
84
112
|
inputs = tf.expand_dims(inputs, 0)
|
113
|
+
if inputs.shape.rank > 1:
|
114
|
+
raise ValueError(
|
115
|
+
"`tokenize()` inputs should be a string, list of strings, or "
|
116
|
+
f"string tensor with rank < 2. Received: {inputs}"
|
117
|
+
)
|
85
118
|
|
86
119
|
raw_tokens = split_strings_for_bpe(inputs, self.unsplittable_tokens)
|
87
120
|
|
@@ -131,12 +164,13 @@ class CLIPTokenizer(BytePairTokenizer):
|
|
131
164
|
tokens = tokens.to_tensor(shape=output_shape)
|
132
165
|
|
133
166
|
# Convert to a dense output if input in scalar
|
134
|
-
if
|
167
|
+
if unbatched:
|
135
168
|
tokens = tf.squeeze(tokens, 0)
|
136
169
|
tf.ensure_shape(tokens, shape=[self.sequence_length])
|
137
170
|
|
138
171
|
return tokens
|
139
172
|
|
173
|
+
@preprocessing_function
|
140
174
|
def detokenize(self, inputs):
|
141
175
|
self._check_vocabulary()
|
142
176
|
inputs, unbatched, _ = convert_to_ragged_batch(inputs)
|
@@ -160,6 +194,11 @@ class CLIPTokenizer(BytePairTokenizer):
|
|
160
194
|
|
161
195
|
def get_config(self):
|
162
196
|
config = super().get_config()
|
197
|
+
config.update(
|
198
|
+
{
|
199
|
+
"pad_with_end_token": self.pad_with_end_token,
|
200
|
+
}
|
201
|
+
)
|
163
202
|
# In the constructor, we pass the list of special tokens to the
|
164
203
|
# `unsplittable_tokens` arg of the superclass' constructor. Hence, we
|
165
204
|
# delete it from the config here.
|
@@ -11,3 +11,9 @@
|
|
11
11
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
12
|
# See the License for the specific language governing permissions and
|
13
13
|
# limitations under the License.
|
14
|
+
|
15
|
+
from keras_hub.src.models.densenet.densenet_backbone import DenseNetBackbone
|
16
|
+
from keras_hub.src.models.densenet.densenet_presets import backbone_presets
|
17
|
+
from keras_hub.src.utils.preset_utils import register_presets
|
18
|
+
|
19
|
+
register_presets(backbone_presets, DenseNetBackbone)
|
@@ -92,11 +92,14 @@ class DenseNetBackbone(FeaturePyramidBackbone):
|
|
92
92
|
channel_axis,
|
93
93
|
stackwise_num_repeats[stack_index],
|
94
94
|
growth_rate,
|
95
|
-
name=f"
|
95
|
+
name=f"stack{stack_index+1}",
|
96
96
|
)
|
97
97
|
pyramid_outputs[f"P{index}"] = x
|
98
98
|
x = apply_transition_block(
|
99
|
-
x,
|
99
|
+
x,
|
100
|
+
channel_axis,
|
101
|
+
compression_ratio,
|
102
|
+
name=f"transition{stack_index+1}",
|
100
103
|
)
|
101
104
|
|
102
105
|
x = apply_dense_block(
|
@@ -104,7 +107,7 @@ class DenseNetBackbone(FeaturePyramidBackbone):
|
|
104
107
|
channel_axis,
|
105
108
|
stackwise_num_repeats[-1],
|
106
109
|
growth_rate,
|
107
|
-
name=f"
|
110
|
+
name=f"stack{len(stackwise_num_repeats)}",
|
108
111
|
)
|
109
112
|
pyramid_outputs[f"P{len(stackwise_num_repeats) + 1}"] = x
|
110
113
|
x = keras.layers.BatchNormalization(
|
@@ -148,7 +151,7 @@ def apply_dense_block(x, channel_axis, num_repeats, growth_rate, name=None):
|
|
148
151
|
|
149
152
|
for i in range(num_repeats):
|
150
153
|
x = apply_conv_block(
|
151
|
-
x, channel_axis, growth_rate, name=f"{name}
|
154
|
+
x, channel_axis, growth_rate, name=f"{name}_block{i+1}"
|
152
155
|
)
|
153
156
|
return x
|
154
157
|
|
@@ -196,9 +199,9 @@ def apply_conv_block(x, channel_axis, growth_rate, name=None):
|
|
196
199
|
|
197
200
|
shortcut = x
|
198
201
|
x = keras.layers.BatchNormalization(
|
199
|
-
axis=channel_axis, epsilon=BN_EPSILON, name=f"{name}
|
202
|
+
axis=channel_axis, epsilon=BN_EPSILON, name=f"{name}_1_bn"
|
200
203
|
)(x)
|
201
|
-
x = keras.layers.Activation("relu", name=f"{name}
|
204
|
+
x = keras.layers.Activation("relu", name=f"{name}_1_relu")(x)
|
202
205
|
x = keras.layers.Conv2D(
|
203
206
|
4 * growth_rate,
|
204
207
|
1,
|
@@ -207,9 +210,9 @@ def apply_conv_block(x, channel_axis, growth_rate, name=None):
|
|
207
210
|
name=f"{name}_1_conv",
|
208
211
|
)(x)
|
209
212
|
x = keras.layers.BatchNormalization(
|
210
|
-
axis=channel_axis, epsilon=BN_EPSILON, name=f"{name}
|
213
|
+
axis=channel_axis, epsilon=BN_EPSILON, name=f"{name}_2_bn"
|
211
214
|
)(x)
|
212
|
-
x = keras.layers.Activation("relu", name=f"{name}
|
215
|
+
x = keras.layers.Activation("relu", name=f"{name}_2_relu")(x)
|
213
216
|
x = keras.layers.Conv2D(
|
214
217
|
growth_rate,
|
215
218
|
3,
|