keras-hub-nightly 0.15.0.dev20240823171555__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- keras_hub/__init__.py +52 -0
- keras_hub/api/__init__.py +27 -0
- keras_hub/api/layers/__init__.py +47 -0
- keras_hub/api/metrics/__init__.py +24 -0
- keras_hub/api/models/__init__.py +249 -0
- keras_hub/api/samplers/__init__.py +29 -0
- keras_hub/api/tokenizers/__init__.py +35 -0
- keras_hub/src/__init__.py +13 -0
- keras_hub/src/api_export.py +53 -0
- keras_hub/src/layers/__init__.py +13 -0
- keras_hub/src/layers/modeling/__init__.py +13 -0
- keras_hub/src/layers/modeling/alibi_bias.py +143 -0
- keras_hub/src/layers/modeling/cached_multi_head_attention.py +137 -0
- keras_hub/src/layers/modeling/f_net_encoder.py +200 -0
- keras_hub/src/layers/modeling/masked_lm_head.py +239 -0
- keras_hub/src/layers/modeling/position_embedding.py +123 -0
- keras_hub/src/layers/modeling/reversible_embedding.py +311 -0
- keras_hub/src/layers/modeling/rotary_embedding.py +169 -0
- keras_hub/src/layers/modeling/sine_position_encoding.py +108 -0
- keras_hub/src/layers/modeling/token_and_position_embedding.py +150 -0
- keras_hub/src/layers/modeling/transformer_decoder.py +496 -0
- keras_hub/src/layers/modeling/transformer_encoder.py +262 -0
- keras_hub/src/layers/modeling/transformer_layer_utils.py +106 -0
- keras_hub/src/layers/preprocessing/__init__.py +13 -0
- keras_hub/src/layers/preprocessing/masked_lm_mask_generator.py +220 -0
- keras_hub/src/layers/preprocessing/multi_segment_packer.py +319 -0
- keras_hub/src/layers/preprocessing/preprocessing_layer.py +62 -0
- keras_hub/src/layers/preprocessing/random_deletion.py +271 -0
- keras_hub/src/layers/preprocessing/random_swap.py +267 -0
- keras_hub/src/layers/preprocessing/start_end_packer.py +219 -0
- keras_hub/src/metrics/__init__.py +13 -0
- keras_hub/src/metrics/bleu.py +394 -0
- keras_hub/src/metrics/edit_distance.py +197 -0
- keras_hub/src/metrics/perplexity.py +181 -0
- keras_hub/src/metrics/rouge_base.py +204 -0
- keras_hub/src/metrics/rouge_l.py +97 -0
- keras_hub/src/metrics/rouge_n.py +125 -0
- keras_hub/src/models/__init__.py +13 -0
- keras_hub/src/models/albert/__init__.py +20 -0
- keras_hub/src/models/albert/albert_backbone.py +267 -0
- keras_hub/src/models/albert/albert_classifier.py +202 -0
- keras_hub/src/models/albert/albert_masked_lm.py +129 -0
- keras_hub/src/models/albert/albert_masked_lm_preprocessor.py +194 -0
- keras_hub/src/models/albert/albert_preprocessor.py +206 -0
- keras_hub/src/models/albert/albert_presets.py +70 -0
- keras_hub/src/models/albert/albert_tokenizer.py +119 -0
- keras_hub/src/models/backbone.py +311 -0
- keras_hub/src/models/bart/__init__.py +20 -0
- keras_hub/src/models/bart/bart_backbone.py +261 -0
- keras_hub/src/models/bart/bart_preprocessor.py +276 -0
- keras_hub/src/models/bart/bart_presets.py +74 -0
- keras_hub/src/models/bart/bart_seq_2_seq_lm.py +490 -0
- keras_hub/src/models/bart/bart_seq_2_seq_lm_preprocessor.py +262 -0
- keras_hub/src/models/bart/bart_tokenizer.py +124 -0
- keras_hub/src/models/bert/__init__.py +23 -0
- keras_hub/src/models/bert/bert_backbone.py +227 -0
- keras_hub/src/models/bert/bert_classifier.py +183 -0
- keras_hub/src/models/bert/bert_masked_lm.py +131 -0
- keras_hub/src/models/bert/bert_masked_lm_preprocessor.py +198 -0
- keras_hub/src/models/bert/bert_preprocessor.py +184 -0
- keras_hub/src/models/bert/bert_presets.py +147 -0
- keras_hub/src/models/bert/bert_tokenizer.py +112 -0
- keras_hub/src/models/bloom/__init__.py +20 -0
- keras_hub/src/models/bloom/bloom_attention.py +186 -0
- keras_hub/src/models/bloom/bloom_backbone.py +173 -0
- keras_hub/src/models/bloom/bloom_causal_lm.py +298 -0
- keras_hub/src/models/bloom/bloom_causal_lm_preprocessor.py +176 -0
- keras_hub/src/models/bloom/bloom_decoder.py +206 -0
- keras_hub/src/models/bloom/bloom_preprocessor.py +185 -0
- keras_hub/src/models/bloom/bloom_presets.py +121 -0
- keras_hub/src/models/bloom/bloom_tokenizer.py +116 -0
- keras_hub/src/models/causal_lm.py +383 -0
- keras_hub/src/models/classifier.py +109 -0
- keras_hub/src/models/csp_darknet/__init__.py +13 -0
- keras_hub/src/models/csp_darknet/csp_darknet_backbone.py +410 -0
- keras_hub/src/models/csp_darknet/csp_darknet_image_classifier.py +133 -0
- keras_hub/src/models/deberta_v3/__init__.py +24 -0
- keras_hub/src/models/deberta_v3/deberta_v3_backbone.py +210 -0
- keras_hub/src/models/deberta_v3/deberta_v3_classifier.py +228 -0
- keras_hub/src/models/deberta_v3/deberta_v3_masked_lm.py +135 -0
- keras_hub/src/models/deberta_v3/deberta_v3_masked_lm_preprocessor.py +191 -0
- keras_hub/src/models/deberta_v3/deberta_v3_preprocessor.py +206 -0
- keras_hub/src/models/deberta_v3/deberta_v3_presets.py +82 -0
- keras_hub/src/models/deberta_v3/deberta_v3_tokenizer.py +155 -0
- keras_hub/src/models/deberta_v3/disentangled_attention_encoder.py +227 -0
- keras_hub/src/models/deberta_v3/disentangled_self_attention.py +412 -0
- keras_hub/src/models/deberta_v3/relative_embedding.py +94 -0
- keras_hub/src/models/densenet/__init__.py +13 -0
- keras_hub/src/models/densenet/densenet_backbone.py +210 -0
- keras_hub/src/models/densenet/densenet_image_classifier.py +131 -0
- keras_hub/src/models/distil_bert/__init__.py +26 -0
- keras_hub/src/models/distil_bert/distil_bert_backbone.py +187 -0
- keras_hub/src/models/distil_bert/distil_bert_classifier.py +208 -0
- keras_hub/src/models/distil_bert/distil_bert_masked_lm.py +137 -0
- keras_hub/src/models/distil_bert/distil_bert_masked_lm_preprocessor.py +194 -0
- keras_hub/src/models/distil_bert/distil_bert_preprocessor.py +175 -0
- keras_hub/src/models/distil_bert/distil_bert_presets.py +57 -0
- keras_hub/src/models/distil_bert/distil_bert_tokenizer.py +114 -0
- keras_hub/src/models/electra/__init__.py +20 -0
- keras_hub/src/models/electra/electra_backbone.py +247 -0
- keras_hub/src/models/electra/electra_preprocessor.py +154 -0
- keras_hub/src/models/electra/electra_presets.py +95 -0
- keras_hub/src/models/electra/electra_tokenizer.py +104 -0
- keras_hub/src/models/f_net/__init__.py +20 -0
- keras_hub/src/models/f_net/f_net_backbone.py +236 -0
- keras_hub/src/models/f_net/f_net_classifier.py +154 -0
- keras_hub/src/models/f_net/f_net_masked_lm.py +132 -0
- keras_hub/src/models/f_net/f_net_masked_lm_preprocessor.py +196 -0
- keras_hub/src/models/f_net/f_net_preprocessor.py +177 -0
- keras_hub/src/models/f_net/f_net_presets.py +43 -0
- keras_hub/src/models/f_net/f_net_tokenizer.py +95 -0
- keras_hub/src/models/falcon/__init__.py +20 -0
- keras_hub/src/models/falcon/falcon_attention.py +156 -0
- keras_hub/src/models/falcon/falcon_backbone.py +164 -0
- keras_hub/src/models/falcon/falcon_causal_lm.py +291 -0
- keras_hub/src/models/falcon/falcon_causal_lm_preprocessor.py +173 -0
- keras_hub/src/models/falcon/falcon_preprocessor.py +187 -0
- keras_hub/src/models/falcon/falcon_presets.py +30 -0
- keras_hub/src/models/falcon/falcon_tokenizer.py +110 -0
- keras_hub/src/models/falcon/falcon_transformer_decoder.py +255 -0
- keras_hub/src/models/feature_pyramid_backbone.py +73 -0
- keras_hub/src/models/gemma/__init__.py +20 -0
- keras_hub/src/models/gemma/gemma_attention.py +250 -0
- keras_hub/src/models/gemma/gemma_backbone.py +316 -0
- keras_hub/src/models/gemma/gemma_causal_lm.py +448 -0
- keras_hub/src/models/gemma/gemma_causal_lm_preprocessor.py +167 -0
- keras_hub/src/models/gemma/gemma_decoder_block.py +241 -0
- keras_hub/src/models/gemma/gemma_preprocessor.py +191 -0
- keras_hub/src/models/gemma/gemma_presets.py +248 -0
- keras_hub/src/models/gemma/gemma_tokenizer.py +103 -0
- keras_hub/src/models/gemma/rms_normalization.py +40 -0
- keras_hub/src/models/gpt2/__init__.py +20 -0
- keras_hub/src/models/gpt2/gpt2_backbone.py +199 -0
- keras_hub/src/models/gpt2/gpt2_causal_lm.py +437 -0
- keras_hub/src/models/gpt2/gpt2_causal_lm_preprocessor.py +173 -0
- keras_hub/src/models/gpt2/gpt2_preprocessor.py +187 -0
- keras_hub/src/models/gpt2/gpt2_presets.py +82 -0
- keras_hub/src/models/gpt2/gpt2_tokenizer.py +110 -0
- keras_hub/src/models/gpt_neo_x/__init__.py +13 -0
- keras_hub/src/models/gpt_neo_x/gpt_neo_x_attention.py +251 -0
- keras_hub/src/models/gpt_neo_x/gpt_neo_x_backbone.py +175 -0
- keras_hub/src/models/gpt_neo_x/gpt_neo_x_causal_lm.py +201 -0
- keras_hub/src/models/gpt_neo_x/gpt_neo_x_causal_lm_preprocessor.py +141 -0
- keras_hub/src/models/gpt_neo_x/gpt_neo_x_decoder.py +258 -0
- keras_hub/src/models/gpt_neo_x/gpt_neo_x_preprocessor.py +145 -0
- keras_hub/src/models/gpt_neo_x/gpt_neo_x_tokenizer.py +88 -0
- keras_hub/src/models/image_classifier.py +90 -0
- keras_hub/src/models/llama/__init__.py +20 -0
- keras_hub/src/models/llama/llama_attention.py +225 -0
- keras_hub/src/models/llama/llama_backbone.py +188 -0
- keras_hub/src/models/llama/llama_causal_lm.py +327 -0
- keras_hub/src/models/llama/llama_causal_lm_preprocessor.py +170 -0
- keras_hub/src/models/llama/llama_decoder.py +246 -0
- keras_hub/src/models/llama/llama_layernorm.py +48 -0
- keras_hub/src/models/llama/llama_preprocessor.py +189 -0
- keras_hub/src/models/llama/llama_presets.py +80 -0
- keras_hub/src/models/llama/llama_tokenizer.py +84 -0
- keras_hub/src/models/llama3/__init__.py +20 -0
- keras_hub/src/models/llama3/llama3_backbone.py +84 -0
- keras_hub/src/models/llama3/llama3_causal_lm.py +46 -0
- keras_hub/src/models/llama3/llama3_causal_lm_preprocessor.py +173 -0
- keras_hub/src/models/llama3/llama3_preprocessor.py +21 -0
- keras_hub/src/models/llama3/llama3_presets.py +69 -0
- keras_hub/src/models/llama3/llama3_tokenizer.py +63 -0
- keras_hub/src/models/masked_lm.py +101 -0
- keras_hub/src/models/mistral/__init__.py +20 -0
- keras_hub/src/models/mistral/mistral_attention.py +238 -0
- keras_hub/src/models/mistral/mistral_backbone.py +203 -0
- keras_hub/src/models/mistral/mistral_causal_lm.py +328 -0
- keras_hub/src/models/mistral/mistral_causal_lm_preprocessor.py +175 -0
- keras_hub/src/models/mistral/mistral_layer_norm.py +48 -0
- keras_hub/src/models/mistral/mistral_preprocessor.py +190 -0
- keras_hub/src/models/mistral/mistral_presets.py +48 -0
- keras_hub/src/models/mistral/mistral_tokenizer.py +82 -0
- keras_hub/src/models/mistral/mistral_transformer_decoder.py +265 -0
- keras_hub/src/models/mix_transformer/__init__.py +13 -0
- keras_hub/src/models/mix_transformer/mix_transformer_backbone.py +181 -0
- keras_hub/src/models/mix_transformer/mix_transformer_classifier.py +133 -0
- keras_hub/src/models/mix_transformer/mix_transformer_layers.py +300 -0
- keras_hub/src/models/opt/__init__.py +20 -0
- keras_hub/src/models/opt/opt_backbone.py +173 -0
- keras_hub/src/models/opt/opt_causal_lm.py +301 -0
- keras_hub/src/models/opt/opt_causal_lm_preprocessor.py +177 -0
- keras_hub/src/models/opt/opt_preprocessor.py +188 -0
- keras_hub/src/models/opt/opt_presets.py +72 -0
- keras_hub/src/models/opt/opt_tokenizer.py +116 -0
- keras_hub/src/models/pali_gemma/__init__.py +23 -0
- keras_hub/src/models/pali_gemma/pali_gemma_backbone.py +277 -0
- keras_hub/src/models/pali_gemma/pali_gemma_causal_lm.py +313 -0
- keras_hub/src/models/pali_gemma/pali_gemma_causal_lm_preprocessor.py +147 -0
- keras_hub/src/models/pali_gemma/pali_gemma_decoder_block.py +160 -0
- keras_hub/src/models/pali_gemma/pali_gemma_presets.py +78 -0
- keras_hub/src/models/pali_gemma/pali_gemma_tokenizer.py +79 -0
- keras_hub/src/models/pali_gemma/pali_gemma_vit.py +566 -0
- keras_hub/src/models/phi3/__init__.py +20 -0
- keras_hub/src/models/phi3/phi3_attention.py +260 -0
- keras_hub/src/models/phi3/phi3_backbone.py +224 -0
- keras_hub/src/models/phi3/phi3_causal_lm.py +218 -0
- keras_hub/src/models/phi3/phi3_causal_lm_preprocessor.py +173 -0
- keras_hub/src/models/phi3/phi3_decoder.py +260 -0
- keras_hub/src/models/phi3/phi3_layernorm.py +48 -0
- keras_hub/src/models/phi3/phi3_preprocessor.py +190 -0
- keras_hub/src/models/phi3/phi3_presets.py +50 -0
- keras_hub/src/models/phi3/phi3_rotary_embedding.py +137 -0
- keras_hub/src/models/phi3/phi3_tokenizer.py +94 -0
- keras_hub/src/models/preprocessor.py +207 -0
- keras_hub/src/models/resnet/__init__.py +13 -0
- keras_hub/src/models/resnet/resnet_backbone.py +612 -0
- keras_hub/src/models/resnet/resnet_image_classifier.py +136 -0
- keras_hub/src/models/roberta/__init__.py +20 -0
- keras_hub/src/models/roberta/roberta_backbone.py +184 -0
- keras_hub/src/models/roberta/roberta_classifier.py +209 -0
- keras_hub/src/models/roberta/roberta_masked_lm.py +136 -0
- keras_hub/src/models/roberta/roberta_masked_lm_preprocessor.py +198 -0
- keras_hub/src/models/roberta/roberta_preprocessor.py +192 -0
- keras_hub/src/models/roberta/roberta_presets.py +43 -0
- keras_hub/src/models/roberta/roberta_tokenizer.py +132 -0
- keras_hub/src/models/seq_2_seq_lm.py +54 -0
- keras_hub/src/models/t5/__init__.py +20 -0
- keras_hub/src/models/t5/t5_backbone.py +261 -0
- keras_hub/src/models/t5/t5_layer_norm.py +35 -0
- keras_hub/src/models/t5/t5_multi_head_attention.py +324 -0
- keras_hub/src/models/t5/t5_presets.py +95 -0
- keras_hub/src/models/t5/t5_tokenizer.py +100 -0
- keras_hub/src/models/t5/t5_transformer_layer.py +178 -0
- keras_hub/src/models/task.py +419 -0
- keras_hub/src/models/vgg/__init__.py +13 -0
- keras_hub/src/models/vgg/vgg_backbone.py +158 -0
- keras_hub/src/models/vgg/vgg_image_classifier.py +124 -0
- keras_hub/src/models/vit_det/__init__.py +13 -0
- keras_hub/src/models/vit_det/vit_det_backbone.py +204 -0
- keras_hub/src/models/vit_det/vit_layers.py +565 -0
- keras_hub/src/models/whisper/__init__.py +20 -0
- keras_hub/src/models/whisper/whisper_audio_feature_extractor.py +260 -0
- keras_hub/src/models/whisper/whisper_backbone.py +305 -0
- keras_hub/src/models/whisper/whisper_cached_multi_head_attention.py +153 -0
- keras_hub/src/models/whisper/whisper_decoder.py +141 -0
- keras_hub/src/models/whisper/whisper_encoder.py +106 -0
- keras_hub/src/models/whisper/whisper_preprocessor.py +326 -0
- keras_hub/src/models/whisper/whisper_presets.py +148 -0
- keras_hub/src/models/whisper/whisper_tokenizer.py +163 -0
- keras_hub/src/models/xlm_roberta/__init__.py +26 -0
- keras_hub/src/models/xlm_roberta/xlm_roberta_backbone.py +81 -0
- keras_hub/src/models/xlm_roberta/xlm_roberta_classifier.py +225 -0
- keras_hub/src/models/xlm_roberta/xlm_roberta_masked_lm.py +141 -0
- keras_hub/src/models/xlm_roberta/xlm_roberta_masked_lm_preprocessor.py +195 -0
- keras_hub/src/models/xlm_roberta/xlm_roberta_preprocessor.py +205 -0
- keras_hub/src/models/xlm_roberta/xlm_roberta_presets.py +43 -0
- keras_hub/src/models/xlm_roberta/xlm_roberta_tokenizer.py +191 -0
- keras_hub/src/models/xlnet/__init__.py +13 -0
- keras_hub/src/models/xlnet/relative_attention.py +459 -0
- keras_hub/src/models/xlnet/xlnet_backbone.py +222 -0
- keras_hub/src/models/xlnet/xlnet_content_and_query_embedding.py +133 -0
- keras_hub/src/models/xlnet/xlnet_encoder.py +378 -0
- keras_hub/src/samplers/__init__.py +13 -0
- keras_hub/src/samplers/beam_sampler.py +207 -0
- keras_hub/src/samplers/contrastive_sampler.py +231 -0
- keras_hub/src/samplers/greedy_sampler.py +50 -0
- keras_hub/src/samplers/random_sampler.py +77 -0
- keras_hub/src/samplers/sampler.py +237 -0
- keras_hub/src/samplers/serialization.py +97 -0
- keras_hub/src/samplers/top_k_sampler.py +92 -0
- keras_hub/src/samplers/top_p_sampler.py +113 -0
- keras_hub/src/tests/__init__.py +13 -0
- keras_hub/src/tests/test_case.py +608 -0
- keras_hub/src/tokenizers/__init__.py +13 -0
- keras_hub/src/tokenizers/byte_pair_tokenizer.py +638 -0
- keras_hub/src/tokenizers/byte_tokenizer.py +299 -0
- keras_hub/src/tokenizers/sentence_piece_tokenizer.py +267 -0
- keras_hub/src/tokenizers/sentence_piece_tokenizer_trainer.py +150 -0
- keras_hub/src/tokenizers/tokenizer.py +235 -0
- keras_hub/src/tokenizers/unicode_codepoint_tokenizer.py +355 -0
- keras_hub/src/tokenizers/word_piece_tokenizer.py +544 -0
- keras_hub/src/tokenizers/word_piece_tokenizer_trainer.py +176 -0
- keras_hub/src/utils/__init__.py +13 -0
- keras_hub/src/utils/keras_utils.py +130 -0
- keras_hub/src/utils/pipeline_model.py +293 -0
- keras_hub/src/utils/preset_utils.py +621 -0
- keras_hub/src/utils/python_utils.py +21 -0
- keras_hub/src/utils/tensor_utils.py +206 -0
- keras_hub/src/utils/timm/__init__.py +13 -0
- keras_hub/src/utils/timm/convert.py +37 -0
- keras_hub/src/utils/timm/convert_resnet.py +171 -0
- keras_hub/src/utils/transformers/__init__.py +13 -0
- keras_hub/src/utils/transformers/convert.py +101 -0
- keras_hub/src/utils/transformers/convert_bert.py +173 -0
- keras_hub/src/utils/transformers/convert_distilbert.py +184 -0
- keras_hub/src/utils/transformers/convert_gemma.py +187 -0
- keras_hub/src/utils/transformers/convert_gpt2.py +186 -0
- keras_hub/src/utils/transformers/convert_llama3.py +136 -0
- keras_hub/src/utils/transformers/convert_pali_gemma.py +303 -0
- keras_hub/src/utils/transformers/safetensor_utils.py +97 -0
- keras_hub/src/version_utils.py +23 -0
- keras_hub_nightly-0.15.0.dev20240823171555.dist-info/METADATA +34 -0
- keras_hub_nightly-0.15.0.dev20240823171555.dist-info/RECORD +297 -0
- keras_hub_nightly-0.15.0.dev20240823171555.dist-info/WHEEL +5 -0
- keras_hub_nightly-0.15.0.dev20240823171555.dist-info/top_level.txt +1 -0
@@ -0,0 +1,267 @@
|
|
1
|
+
# Copyright 2024 The KerasHub Authors
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# https://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
|
15
|
+
import keras
|
16
|
+
|
17
|
+
from keras_hub.src.api_export import keras_hub_export
|
18
|
+
from keras_hub.src.layers.modeling.position_embedding import PositionEmbedding
|
19
|
+
from keras_hub.src.layers.modeling.reversible_embedding import (
|
20
|
+
ReversibleEmbedding,
|
21
|
+
)
|
22
|
+
from keras_hub.src.layers.modeling.transformer_encoder import TransformerEncoder
|
23
|
+
from keras_hub.src.models.backbone import Backbone
|
24
|
+
from keras_hub.src.utils.keras_utils import gelu_approximate
|
25
|
+
|
26
|
+
|
27
|
+
def albert_kernel_initializer(stddev=0.02):
|
28
|
+
return keras.initializers.TruncatedNormal(stddev=stddev)
|
29
|
+
|
30
|
+
|
31
|
+
@keras_hub_export("keras_hub.models.AlbertBackbone")
|
32
|
+
class AlbertBackbone(Backbone):
|
33
|
+
"""ALBERT encoder network.
|
34
|
+
|
35
|
+
This class implements a bi-directional Transformer-based encoder as
|
36
|
+
described in
|
37
|
+
["ALBERT: A Lite BERT for Self-supervised Learning of Language Representations"](https://arxiv.org/abs/1909.11942).
|
38
|
+
ALBERT is a more efficient variant of BERT, and uses parameter reduction
|
39
|
+
techniques such as cross-layer parameter sharing and factorized embedding
|
40
|
+
parameterization. This model class includes the embedding lookups and
|
41
|
+
transformer layers, but not the masked language model or sentence order
|
42
|
+
prediction heads.
|
43
|
+
|
44
|
+
The default constructor gives a fully customizable, randomly initialized
|
45
|
+
ALBERT encoder with any number of layers, heads, and embedding dimensions.
|
46
|
+
To load preset architectures and weights, use the `from_preset` constructor.
|
47
|
+
|
48
|
+
Disclaimer: Pre-trained models are provided on an "as is" basis, without
|
49
|
+
warranties or conditions of any kind.
|
50
|
+
|
51
|
+
Args:
|
52
|
+
vocabulary_size: int. The size of the token vocabulary.
|
53
|
+
num_layers: int, must be divisible by `num_groups`. The number of
|
54
|
+
"virtual" layers, i.e., the total number of times the input sequence
|
55
|
+
will be fed through the groups in one forward pass. The input will
|
56
|
+
be routed to the correct group based on the layer index.
|
57
|
+
num_heads: int. The number of attention heads for each transformer.
|
58
|
+
The hidden size must be divisible by the number of attention heads.
|
59
|
+
embedding_dim: int. The size of the embeddings.
|
60
|
+
hidden_dim: int. The size of the transformer encoding and pooler layers.
|
61
|
+
intermediate_dim: int. The output dimension of the first Dense layer in
|
62
|
+
a two-layer feedforward network for each transformer.
|
63
|
+
num_groups: int. Number of groups, with each group having
|
64
|
+
`num_inner_repetitions` number of `TransformerEncoder` layers.
|
65
|
+
num_inner_repetitions: int. Number of `TransformerEncoder` layers per
|
66
|
+
group.
|
67
|
+
dropout: float. Dropout probability for the Transformer encoder.
|
68
|
+
max_sequence_length: int. The maximum sequence length that this encoder
|
69
|
+
can consume. If None, `max_sequence_length` uses the value from
|
70
|
+
sequence length. This determines the variable shape for positional
|
71
|
+
embeddings.
|
72
|
+
num_segments: int. The number of types that the 'segment_ids' input can
|
73
|
+
take.
|
74
|
+
dtype: string or `keras.mixed_precision.DTypePolicy`. The dtype to use
|
75
|
+
for model computations and weights. Note that some computations,
|
76
|
+
such as softmax and layer normalization, will always be done at
|
77
|
+
float32 precision regardless of dtype.
|
78
|
+
|
79
|
+
Example:
|
80
|
+
```python
|
81
|
+
input_data = {
|
82
|
+
"token_ids": np.ones(shape=(1, 12), dtype="int32"),
|
83
|
+
"segment_ids": np.array([[0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0]]),
|
84
|
+
"padding_mask": np.array([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0]]),
|
85
|
+
}
|
86
|
+
|
87
|
+
# Randomly initialized ALBERT encoder
|
88
|
+
model = keras_hub.models.AlbertBackbone(
|
89
|
+
vocabulary_size=30000,
|
90
|
+
num_layers=12,
|
91
|
+
num_heads=12,
|
92
|
+
num_groups=1,
|
93
|
+
num_inner_repetitions=1,
|
94
|
+
embedding_dim=128,
|
95
|
+
hidden_dim=768,
|
96
|
+
intermediate_dim=3072,
|
97
|
+
max_sequence_length=12,
|
98
|
+
)
|
99
|
+
output = model(input_data)
|
100
|
+
```
|
101
|
+
"""
|
102
|
+
|
103
|
+
def __init__(
|
104
|
+
self,
|
105
|
+
vocabulary_size,
|
106
|
+
num_layers,
|
107
|
+
num_heads,
|
108
|
+
embedding_dim,
|
109
|
+
hidden_dim,
|
110
|
+
intermediate_dim,
|
111
|
+
num_groups=1,
|
112
|
+
num_inner_repetitions=1,
|
113
|
+
dropout=0.0,
|
114
|
+
max_sequence_length=512,
|
115
|
+
num_segments=2,
|
116
|
+
dtype=None,
|
117
|
+
**kwargs,
|
118
|
+
):
|
119
|
+
if num_layers % num_groups != 0:
|
120
|
+
raise ValueError(
|
121
|
+
"`num_layers` must be divisible by `num_groups`. Received: "
|
122
|
+
f"`num_layers={num_layers}` and `num_groups={num_groups}`."
|
123
|
+
)
|
124
|
+
|
125
|
+
# === Layers ===
|
126
|
+
self.token_embedding = ReversibleEmbedding(
|
127
|
+
input_dim=vocabulary_size,
|
128
|
+
output_dim=embedding_dim,
|
129
|
+
embeddings_initializer=albert_kernel_initializer(),
|
130
|
+
dtype=dtype,
|
131
|
+
name="token_embedding",
|
132
|
+
)
|
133
|
+
self.position_embedding = PositionEmbedding(
|
134
|
+
initializer=albert_kernel_initializer(),
|
135
|
+
sequence_length=max_sequence_length,
|
136
|
+
dtype=dtype,
|
137
|
+
name="position_embedding",
|
138
|
+
)
|
139
|
+
self.segment_embedding = keras.layers.Embedding(
|
140
|
+
input_dim=num_segments,
|
141
|
+
output_dim=embedding_dim,
|
142
|
+
embeddings_initializer=albert_kernel_initializer(),
|
143
|
+
dtype=dtype,
|
144
|
+
name="segment_embedding",
|
145
|
+
)
|
146
|
+
self.embeddings_add = keras.layers.Add(
|
147
|
+
dtype=dtype,
|
148
|
+
name="embeddings_add",
|
149
|
+
)
|
150
|
+
self.embeddings_layer_norm = keras.layers.LayerNormalization(
|
151
|
+
axis=-1,
|
152
|
+
epsilon=1e-12,
|
153
|
+
dtype=dtype,
|
154
|
+
name="embeddings_layer_norm",
|
155
|
+
)
|
156
|
+
self.embeddings_dropout = keras.layers.Dropout(
|
157
|
+
dropout,
|
158
|
+
dtype=dtype,
|
159
|
+
name="embeddings_dropout",
|
160
|
+
)
|
161
|
+
self.embeddings_projection = keras.layers.Dense(
|
162
|
+
hidden_dim,
|
163
|
+
kernel_initializer=albert_kernel_initializer(),
|
164
|
+
dtype=dtype,
|
165
|
+
name="embedding_projection",
|
166
|
+
)
|
167
|
+
self.transformer_layers = []
|
168
|
+
for group_idx in range(num_groups):
|
169
|
+
inner_layers = []
|
170
|
+
for inner_idx in range(num_inner_repetitions):
|
171
|
+
layer = TransformerEncoder(
|
172
|
+
num_heads=num_heads,
|
173
|
+
intermediate_dim=intermediate_dim,
|
174
|
+
activation=gelu_approximate,
|
175
|
+
dropout=dropout,
|
176
|
+
layer_norm_epsilon=1e-12,
|
177
|
+
kernel_initializer=albert_kernel_initializer(),
|
178
|
+
dtype=dtype,
|
179
|
+
name=f"group_{group_idx}_inner_layer_{inner_idx}",
|
180
|
+
)
|
181
|
+
inner_layers.append(layer)
|
182
|
+
self.transformer_layers.append(inner_layers)
|
183
|
+
self.pooled_dense = keras.layers.Dense(
|
184
|
+
hidden_dim,
|
185
|
+
kernel_initializer=albert_kernel_initializer(),
|
186
|
+
activation="tanh",
|
187
|
+
dtype=dtype,
|
188
|
+
name="pooled_dense",
|
189
|
+
)
|
190
|
+
|
191
|
+
# === Functional Model ===
|
192
|
+
# Inputs
|
193
|
+
token_id_input = keras.Input(
|
194
|
+
shape=(None,), dtype="int32", name="token_ids"
|
195
|
+
)
|
196
|
+
segment_id_input = keras.Input(
|
197
|
+
shape=(None,), dtype="int32", name="segment_ids"
|
198
|
+
)
|
199
|
+
padding_mask_input = keras.Input(
|
200
|
+
shape=(None,), dtype="int32", name="padding_mask"
|
201
|
+
)
|
202
|
+
# Embed tokens, positions, and segment ids.
|
203
|
+
tokens = self.token_embedding(token_id_input)
|
204
|
+
positions = self.position_embedding(tokens)
|
205
|
+
segments = self.segment_embedding(segment_id_input)
|
206
|
+
# Sum, normalize and apply dropout to embeddings.
|
207
|
+
x = self.embeddings_add((tokens, positions, segments))
|
208
|
+
x = self.embeddings_layer_norm(x)
|
209
|
+
x = self.embeddings_dropout(x)
|
210
|
+
x = self.embeddings_projection(x)
|
211
|
+
# Call transformer layers with repeated groups.
|
212
|
+
num_calls_per_group = num_layers // num_groups
|
213
|
+
for group in self.transformer_layers:
|
214
|
+
for _ in range(num_calls_per_group):
|
215
|
+
for transformer_layer in group:
|
216
|
+
x = transformer_layer(x, padding_mask=padding_mask_input)
|
217
|
+
# Construct the two ALBERT outputs. The pooled output is a dense layer
|
218
|
+
# on top of the [CLS] token.
|
219
|
+
sequence_output = x
|
220
|
+
cls_token_index = 0
|
221
|
+
pooled_output = self.pooled_dense(x[:, cls_token_index, :])
|
222
|
+
super().__init__(
|
223
|
+
inputs={
|
224
|
+
"token_ids": token_id_input,
|
225
|
+
"segment_ids": segment_id_input,
|
226
|
+
"padding_mask": padding_mask_input,
|
227
|
+
},
|
228
|
+
outputs={
|
229
|
+
"sequence_output": sequence_output,
|
230
|
+
"pooled_output": pooled_output,
|
231
|
+
},
|
232
|
+
dtype=dtype,
|
233
|
+
**kwargs,
|
234
|
+
)
|
235
|
+
|
236
|
+
# === Config ===
|
237
|
+
self.vocabulary_size = vocabulary_size
|
238
|
+
self.num_layers = num_layers
|
239
|
+
self.num_heads = num_heads
|
240
|
+
self.num_groups = num_groups
|
241
|
+
self.num_inner_repetitions = num_inner_repetitions
|
242
|
+
self.embedding_dim = embedding_dim
|
243
|
+
self.hidden_dim = hidden_dim
|
244
|
+
self.intermediate_dim = intermediate_dim
|
245
|
+
self.dropout = dropout
|
246
|
+
self.max_sequence_length = max_sequence_length
|
247
|
+
self.num_segments = num_segments
|
248
|
+
self.cls_token_index = cls_token_index
|
249
|
+
|
250
|
+
def get_config(self):
|
251
|
+
config = super().get_config()
|
252
|
+
config.update(
|
253
|
+
{
|
254
|
+
"vocabulary_size": self.vocabulary_size,
|
255
|
+
"num_layers": self.num_layers,
|
256
|
+
"num_heads": self.num_heads,
|
257
|
+
"num_groups": self.num_groups,
|
258
|
+
"num_inner_repetitions": self.num_inner_repetitions,
|
259
|
+
"embedding_dim": self.embedding_dim,
|
260
|
+
"hidden_dim": self.hidden_dim,
|
261
|
+
"intermediate_dim": self.intermediate_dim,
|
262
|
+
"dropout": self.dropout,
|
263
|
+
"max_sequence_length": self.max_sequence_length,
|
264
|
+
"num_segments": self.num_segments,
|
265
|
+
}
|
266
|
+
)
|
267
|
+
return config
|
@@ -0,0 +1,202 @@
|
|
1
|
+
# Copyright 2024 The KerasHub Authors
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# https://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
|
15
|
+
import keras
|
16
|
+
|
17
|
+
from keras_hub.src.api_export import keras_hub_export
|
18
|
+
from keras_hub.src.models.albert.albert_backbone import AlbertBackbone
|
19
|
+
from keras_hub.src.models.albert.albert_backbone import (
|
20
|
+
albert_kernel_initializer,
|
21
|
+
)
|
22
|
+
from keras_hub.src.models.albert.albert_preprocessor import AlbertPreprocessor
|
23
|
+
from keras_hub.src.models.classifier import Classifier
|
24
|
+
|
25
|
+
|
26
|
+
@keras_hub_export("keras_hub.models.AlbertClassifier")
|
27
|
+
class AlbertClassifier(Classifier):
|
28
|
+
"""An end-to-end ALBERT model for classification tasks
|
29
|
+
|
30
|
+
This model attaches a classification head to a `keras_hub.model.AlbertBackbone`
|
31
|
+
backbone, mapping from the backbone outputs to logit output suitable for
|
32
|
+
a classification task. For usage of this model with pre-trained weights, see
|
33
|
+
the `from_preset()` method.
|
34
|
+
|
35
|
+
This model can optionally be configured with a `preprocessor` layer, in
|
36
|
+
which case it will automatically apply preprocessing to raw inputs during
|
37
|
+
`fit()`, `predict()`, and `evaluate()`. This is done by default when
|
38
|
+
creating the model with `from_preset()`.
|
39
|
+
|
40
|
+
Disclaimer: Pre-trained models are provided on an "as is" basis, without
|
41
|
+
warranties or conditions of any kind.
|
42
|
+
|
43
|
+
Args:
|
44
|
+
backbone: A `keras_hub.models.AlertBackbone` instance.
|
45
|
+
num_classes: int. Number of classes to predict.
|
46
|
+
preprocessor: A `keras_hub.models.AlbertPreprocessor` or `None`. If
|
47
|
+
`None`, this model will not apply preprocessing, and inputs should
|
48
|
+
be preprocessed before calling the model.
|
49
|
+
activation: Optional `str` or callable. The
|
50
|
+
activation function to use on the model outputs. Set
|
51
|
+
`activation="softmax"` to return output probabilities.
|
52
|
+
Defaults to `None`.
|
53
|
+
dropout: float. The dropout probability value, applied after the dense
|
54
|
+
layer.
|
55
|
+
|
56
|
+
Examples:
|
57
|
+
|
58
|
+
Raw string data.
|
59
|
+
```python
|
60
|
+
features = ["The quick brown fox jumped.", "I forgot my homework."]
|
61
|
+
labels = [0, 3]
|
62
|
+
|
63
|
+
# Pretrained classifier.
|
64
|
+
classifier = keras_hub.models.AlbertClassifier.from_preset(
|
65
|
+
"albert_base_en_uncased",
|
66
|
+
num_classes=4,
|
67
|
+
)
|
68
|
+
classifier.fit(x=features, y=labels, batch_size=2)
|
69
|
+
classifier.predict(x=features, batch_size=2)
|
70
|
+
|
71
|
+
# Re-compile (e.g., with a new learning rate).
|
72
|
+
classifier.compile(
|
73
|
+
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
|
74
|
+
optimizer=keras.optimizers.Adam(5e-5),
|
75
|
+
jit_compile=True,
|
76
|
+
)
|
77
|
+
# Access backbone programmatically (e.g., to change `trainable`).
|
78
|
+
classifier.backbone.trainable = False
|
79
|
+
# Fit again.
|
80
|
+
classifier.fit(x=features, y=labels, batch_size=2)
|
81
|
+
```
|
82
|
+
|
83
|
+
Preprocessed integer data.
|
84
|
+
```python
|
85
|
+
features = {
|
86
|
+
"token_ids": np.ones(shape=(2, 12), dtype="int32"),
|
87
|
+
"segment_ids": np.array([[0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0]] * 2),
|
88
|
+
"padding_mask": np.array([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0]] * 2),
|
89
|
+
}
|
90
|
+
labels = [0, 3]
|
91
|
+
|
92
|
+
# Pretrained classifier without preprocessing.
|
93
|
+
classifier = keras_hub.models.AlbertClassifier.from_preset(
|
94
|
+
"albert_base_en_uncased",
|
95
|
+
num_classes=4,
|
96
|
+
preprocessor=None,
|
97
|
+
)
|
98
|
+
classifier.fit(x=features, y=labels, batch_size=2)
|
99
|
+
```
|
100
|
+
|
101
|
+
Custom backbone and vocabulary.
|
102
|
+
```python
|
103
|
+
features = ["The quick brown fox jumped.", "I forgot my homework."]
|
104
|
+
labels = [0, 3]
|
105
|
+
|
106
|
+
bytes_io = io.BytesIO()
|
107
|
+
ds = tf.data.Dataset.from_tensor_slices(features)
|
108
|
+
sentencepiece.SentencePieceTrainer.train(
|
109
|
+
sentence_iterator=ds.as_numpy_iterator(),
|
110
|
+
model_writer=bytes_io,
|
111
|
+
vocab_size=10,
|
112
|
+
model_type="WORD",
|
113
|
+
pad_id=0,
|
114
|
+
unk_id=1,
|
115
|
+
bos_id=2,
|
116
|
+
eos_id=3,
|
117
|
+
pad_piece="<pad>",
|
118
|
+
unk_piece="<unk>",
|
119
|
+
bos_piece="[CLS]",
|
120
|
+
eos_piece="[SEP]",
|
121
|
+
user_defined_symbols="[MASK]",
|
122
|
+
)
|
123
|
+
tokenizer = keras_hub.models.AlbertTokenizer(
|
124
|
+
proto=bytes_io.getvalue(),
|
125
|
+
)
|
126
|
+
preprocessor = keras_hub.models.AlbertPreprocessor(
|
127
|
+
tokenizer=tokenizer,
|
128
|
+
sequence_length=128,
|
129
|
+
)
|
130
|
+
backbone = keras_hub.models.AlbertBackbone(
|
131
|
+
vocabulary_size=tokenizer.vocabulary_size(),
|
132
|
+
num_layers=4,
|
133
|
+
num_heads=4,
|
134
|
+
hidden_dim=256,
|
135
|
+
embedding_dim=128,
|
136
|
+
intermediate_dim=512,
|
137
|
+
max_sequence_length=128,
|
138
|
+
)
|
139
|
+
classifier = keras_hub.models.AlbertClassifier(
|
140
|
+
backbone=backbone,
|
141
|
+
preprocessor=preprocessor,
|
142
|
+
num_classes=4,
|
143
|
+
)
|
144
|
+
classifier.fit(x=features, y=labels, batch_size=2)
|
145
|
+
```
|
146
|
+
"""
|
147
|
+
|
148
|
+
backbone_cls = AlbertBackbone
|
149
|
+
preprocessor_cls = AlbertPreprocessor
|
150
|
+
|
151
|
+
def __init__(
|
152
|
+
self,
|
153
|
+
backbone,
|
154
|
+
num_classes,
|
155
|
+
preprocessor=None,
|
156
|
+
activation=None,
|
157
|
+
dropout=0.1,
|
158
|
+
**kwargs,
|
159
|
+
):
|
160
|
+
# === Layers ===
|
161
|
+
self.backbone = backbone
|
162
|
+
self.preprocessor = preprocessor
|
163
|
+
self.output_dense = keras.layers.Dense(
|
164
|
+
num_classes,
|
165
|
+
kernel_initializer=albert_kernel_initializer(),
|
166
|
+
activation=activation,
|
167
|
+
dtype=backbone.dtype_policy,
|
168
|
+
name="logits",
|
169
|
+
)
|
170
|
+
self.output_dropout = keras.layers.Dropout(
|
171
|
+
dropout,
|
172
|
+
dtype=backbone.dtype_policy,
|
173
|
+
name="output_dropout",
|
174
|
+
)
|
175
|
+
|
176
|
+
# === Functional Model ===
|
177
|
+
inputs = backbone.input
|
178
|
+
pooled = backbone(inputs)["pooled_output"]
|
179
|
+
pooled = self.output_dropout(pooled)
|
180
|
+
outputs = self.output_dense(pooled)
|
181
|
+
super().__init__(
|
182
|
+
inputs=inputs,
|
183
|
+
outputs=outputs,
|
184
|
+
**kwargs,
|
185
|
+
)
|
186
|
+
|
187
|
+
# === Config ===
|
188
|
+
self.num_classes = num_classes
|
189
|
+
self.activation = keras.activations.get(activation)
|
190
|
+
self.dropout = dropout
|
191
|
+
|
192
|
+
def get_config(self):
|
193
|
+
config = super().get_config()
|
194
|
+
config.update(
|
195
|
+
{
|
196
|
+
"num_classes": self.num_classes,
|
197
|
+
"activation": keras.activations.serialize(self.activation),
|
198
|
+
"dropout": self.dropout,
|
199
|
+
}
|
200
|
+
)
|
201
|
+
|
202
|
+
return config
|
@@ -0,0 +1,129 @@
|
|
1
|
+
# Copyright 2024 The KerasHub Authors
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# https://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
|
15
|
+
import keras
|
16
|
+
|
17
|
+
from keras_hub.src.api_export import keras_hub_export
|
18
|
+
from keras_hub.src.layers.modeling.masked_lm_head import MaskedLMHead
|
19
|
+
from keras_hub.src.models.albert.albert_backbone import AlbertBackbone
|
20
|
+
from keras_hub.src.models.albert.albert_backbone import (
|
21
|
+
albert_kernel_initializer,
|
22
|
+
)
|
23
|
+
from keras_hub.src.models.albert.albert_masked_lm_preprocessor import (
|
24
|
+
AlbertMaskedLMPreprocessor,
|
25
|
+
)
|
26
|
+
from keras_hub.src.models.masked_lm import MaskedLM
|
27
|
+
from keras_hub.src.utils.keras_utils import gelu_approximate
|
28
|
+
|
29
|
+
|
30
|
+
@keras_hub_export("keras_hub.models.AlbertMaskedLM")
|
31
|
+
class AlbertMaskedLM(MaskedLM):
|
32
|
+
"""An end-to-end ALBERT model for the masked language modeling task.
|
33
|
+
|
34
|
+
This model will train ALBERT on a masked language modeling task.
|
35
|
+
The model will predict labels for a number of masked tokens in the
|
36
|
+
input data. For usage of this model with pre-trained weights, see the
|
37
|
+
`from_preset()` method.
|
38
|
+
|
39
|
+
This model can optionally be configured with a `preprocessor` layer, in
|
40
|
+
which case inputs can be raw string features during `fit()`, `predict()`,
|
41
|
+
and `evaluate()`. Inputs will be tokenized and dynamically masked during
|
42
|
+
training and evaluation. This is done by default when creating the model
|
43
|
+
with `from_preset()`.
|
44
|
+
|
45
|
+
Disclaimer: Pre-trained models are provided on an "as is" basis, without
|
46
|
+
warranties or conditions of any kind.
|
47
|
+
|
48
|
+
Args:
|
49
|
+
backbone: A `keras_hub.models.AlbertBackbone` instance.
|
50
|
+
preprocessor: A `keras_hub.models.AlbertMaskedLMPreprocessor` or
|
51
|
+
`None`. If `None`, this model will not apply preprocessing, and
|
52
|
+
inputs should be preprocessed before calling the model.
|
53
|
+
|
54
|
+
Examples:
|
55
|
+
|
56
|
+
Raw string data.
|
57
|
+
```python
|
58
|
+
features = ["The quick brown fox jumped.", "I forgot my homework."]
|
59
|
+
|
60
|
+
# Pretrained language model.
|
61
|
+
masked_lm = keras_hub.models.AlbertMaskedLM.from_preset(
|
62
|
+
"albert_base_en_uncased",
|
63
|
+
)
|
64
|
+
masked_lm.fit(x=features, batch_size=2)
|
65
|
+
|
66
|
+
# Re-compile (e.g., with a new learning rate).
|
67
|
+
masked_lm.compile(
|
68
|
+
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
|
69
|
+
optimizer=keras.optimizers.Adam(5e-5),
|
70
|
+
jit_compile=True,
|
71
|
+
)
|
72
|
+
# Access backbone programmatically (e.g., to change `trainable`).
|
73
|
+
masked_lm.backbone.trainable = False
|
74
|
+
# Fit again.
|
75
|
+
masked_lm.fit(x=features, batch_size=2)
|
76
|
+
```
|
77
|
+
|
78
|
+
Preprocessed integer data.
|
79
|
+
```python
|
80
|
+
# Create preprocessed batch where 0 is the mask token.
|
81
|
+
features = {
|
82
|
+
"token_ids": np.array([[1, 2, 0, 4, 0, 6, 7, 8]] * 2),
|
83
|
+
"padding_mask": np.array([[1, 1, 1, 1, 1, 1, 1, 1]] * 2),
|
84
|
+
"mask_positions": np.array([[2, 4]] * 2),
|
85
|
+
"segment_ids": np.array([[0, 0, 0, 0, 0, 0, 0, 0]] * 2),
|
86
|
+
}
|
87
|
+
# Labels are the original masked values.
|
88
|
+
labels = [[3, 5]] * 2
|
89
|
+
|
90
|
+
masked_lm = keras_hub.models.AlbertMaskedLM.from_preset(
|
91
|
+
"albert_base_en_uncased",
|
92
|
+
preprocessor=None,
|
93
|
+
)
|
94
|
+
masked_lm.fit(x=features, y=labels, batch_size=2)
|
95
|
+
```
|
96
|
+
"""
|
97
|
+
|
98
|
+
backbone_cls = AlbertBackbone
|
99
|
+
preprocessor_cls = AlbertMaskedLMPreprocessor
|
100
|
+
|
101
|
+
def __init__(self, backbone, preprocessor=None, **kwargs):
|
102
|
+
# === Layers ===
|
103
|
+
self.backbone = backbone
|
104
|
+
self.preprocessor = preprocessor
|
105
|
+
self.masked_lm_head = MaskedLMHead(
|
106
|
+
vocabulary_size=backbone.vocabulary_size,
|
107
|
+
token_embedding=backbone.token_embedding,
|
108
|
+
intermediate_activation=gelu_approximate,
|
109
|
+
kernel_initializer=albert_kernel_initializer(),
|
110
|
+
dtype=backbone.dtype_policy,
|
111
|
+
name="mlm_head",
|
112
|
+
)
|
113
|
+
|
114
|
+
# === Functional Model ===
|
115
|
+
inputs = {
|
116
|
+
**backbone.input,
|
117
|
+
"mask_positions": keras.Input(
|
118
|
+
shape=(None,), dtype="int32", name="mask_positions"
|
119
|
+
),
|
120
|
+
}
|
121
|
+
backbone_outputs = backbone(backbone.input)
|
122
|
+
outputs = self.masked_lm_head(
|
123
|
+
backbone_outputs["sequence_output"], inputs["mask_positions"]
|
124
|
+
)
|
125
|
+
super().__init__(
|
126
|
+
inputs=inputs,
|
127
|
+
outputs=outputs,
|
128
|
+
**kwargs,
|
129
|
+
)
|