keras-hub-nightly 0.15.0.dev20240823171555__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- keras_hub/__init__.py +52 -0
- keras_hub/api/__init__.py +27 -0
- keras_hub/api/layers/__init__.py +47 -0
- keras_hub/api/metrics/__init__.py +24 -0
- keras_hub/api/models/__init__.py +249 -0
- keras_hub/api/samplers/__init__.py +29 -0
- keras_hub/api/tokenizers/__init__.py +35 -0
- keras_hub/src/__init__.py +13 -0
- keras_hub/src/api_export.py +53 -0
- keras_hub/src/layers/__init__.py +13 -0
- keras_hub/src/layers/modeling/__init__.py +13 -0
- keras_hub/src/layers/modeling/alibi_bias.py +143 -0
- keras_hub/src/layers/modeling/cached_multi_head_attention.py +137 -0
- keras_hub/src/layers/modeling/f_net_encoder.py +200 -0
- keras_hub/src/layers/modeling/masked_lm_head.py +239 -0
- keras_hub/src/layers/modeling/position_embedding.py +123 -0
- keras_hub/src/layers/modeling/reversible_embedding.py +311 -0
- keras_hub/src/layers/modeling/rotary_embedding.py +169 -0
- keras_hub/src/layers/modeling/sine_position_encoding.py +108 -0
- keras_hub/src/layers/modeling/token_and_position_embedding.py +150 -0
- keras_hub/src/layers/modeling/transformer_decoder.py +496 -0
- keras_hub/src/layers/modeling/transformer_encoder.py +262 -0
- keras_hub/src/layers/modeling/transformer_layer_utils.py +106 -0
- keras_hub/src/layers/preprocessing/__init__.py +13 -0
- keras_hub/src/layers/preprocessing/masked_lm_mask_generator.py +220 -0
- keras_hub/src/layers/preprocessing/multi_segment_packer.py +319 -0
- keras_hub/src/layers/preprocessing/preprocessing_layer.py +62 -0
- keras_hub/src/layers/preprocessing/random_deletion.py +271 -0
- keras_hub/src/layers/preprocessing/random_swap.py +267 -0
- keras_hub/src/layers/preprocessing/start_end_packer.py +219 -0
- keras_hub/src/metrics/__init__.py +13 -0
- keras_hub/src/metrics/bleu.py +394 -0
- keras_hub/src/metrics/edit_distance.py +197 -0
- keras_hub/src/metrics/perplexity.py +181 -0
- keras_hub/src/metrics/rouge_base.py +204 -0
- keras_hub/src/metrics/rouge_l.py +97 -0
- keras_hub/src/metrics/rouge_n.py +125 -0
- keras_hub/src/models/__init__.py +13 -0
- keras_hub/src/models/albert/__init__.py +20 -0
- keras_hub/src/models/albert/albert_backbone.py +267 -0
- keras_hub/src/models/albert/albert_classifier.py +202 -0
- keras_hub/src/models/albert/albert_masked_lm.py +129 -0
- keras_hub/src/models/albert/albert_masked_lm_preprocessor.py +194 -0
- keras_hub/src/models/albert/albert_preprocessor.py +206 -0
- keras_hub/src/models/albert/albert_presets.py +70 -0
- keras_hub/src/models/albert/albert_tokenizer.py +119 -0
- keras_hub/src/models/backbone.py +311 -0
- keras_hub/src/models/bart/__init__.py +20 -0
- keras_hub/src/models/bart/bart_backbone.py +261 -0
- keras_hub/src/models/bart/bart_preprocessor.py +276 -0
- keras_hub/src/models/bart/bart_presets.py +74 -0
- keras_hub/src/models/bart/bart_seq_2_seq_lm.py +490 -0
- keras_hub/src/models/bart/bart_seq_2_seq_lm_preprocessor.py +262 -0
- keras_hub/src/models/bart/bart_tokenizer.py +124 -0
- keras_hub/src/models/bert/__init__.py +23 -0
- keras_hub/src/models/bert/bert_backbone.py +227 -0
- keras_hub/src/models/bert/bert_classifier.py +183 -0
- keras_hub/src/models/bert/bert_masked_lm.py +131 -0
- keras_hub/src/models/bert/bert_masked_lm_preprocessor.py +198 -0
- keras_hub/src/models/bert/bert_preprocessor.py +184 -0
- keras_hub/src/models/bert/bert_presets.py +147 -0
- keras_hub/src/models/bert/bert_tokenizer.py +112 -0
- keras_hub/src/models/bloom/__init__.py +20 -0
- keras_hub/src/models/bloom/bloom_attention.py +186 -0
- keras_hub/src/models/bloom/bloom_backbone.py +173 -0
- keras_hub/src/models/bloom/bloom_causal_lm.py +298 -0
- keras_hub/src/models/bloom/bloom_causal_lm_preprocessor.py +176 -0
- keras_hub/src/models/bloom/bloom_decoder.py +206 -0
- keras_hub/src/models/bloom/bloom_preprocessor.py +185 -0
- keras_hub/src/models/bloom/bloom_presets.py +121 -0
- keras_hub/src/models/bloom/bloom_tokenizer.py +116 -0
- keras_hub/src/models/causal_lm.py +383 -0
- keras_hub/src/models/classifier.py +109 -0
- keras_hub/src/models/csp_darknet/__init__.py +13 -0
- keras_hub/src/models/csp_darknet/csp_darknet_backbone.py +410 -0
- keras_hub/src/models/csp_darknet/csp_darknet_image_classifier.py +133 -0
- keras_hub/src/models/deberta_v3/__init__.py +24 -0
- keras_hub/src/models/deberta_v3/deberta_v3_backbone.py +210 -0
- keras_hub/src/models/deberta_v3/deberta_v3_classifier.py +228 -0
- keras_hub/src/models/deberta_v3/deberta_v3_masked_lm.py +135 -0
- keras_hub/src/models/deberta_v3/deberta_v3_masked_lm_preprocessor.py +191 -0
- keras_hub/src/models/deberta_v3/deberta_v3_preprocessor.py +206 -0
- keras_hub/src/models/deberta_v3/deberta_v3_presets.py +82 -0
- keras_hub/src/models/deberta_v3/deberta_v3_tokenizer.py +155 -0
- keras_hub/src/models/deberta_v3/disentangled_attention_encoder.py +227 -0
- keras_hub/src/models/deberta_v3/disentangled_self_attention.py +412 -0
- keras_hub/src/models/deberta_v3/relative_embedding.py +94 -0
- keras_hub/src/models/densenet/__init__.py +13 -0
- keras_hub/src/models/densenet/densenet_backbone.py +210 -0
- keras_hub/src/models/densenet/densenet_image_classifier.py +131 -0
- keras_hub/src/models/distil_bert/__init__.py +26 -0
- keras_hub/src/models/distil_bert/distil_bert_backbone.py +187 -0
- keras_hub/src/models/distil_bert/distil_bert_classifier.py +208 -0
- keras_hub/src/models/distil_bert/distil_bert_masked_lm.py +137 -0
- keras_hub/src/models/distil_bert/distil_bert_masked_lm_preprocessor.py +194 -0
- keras_hub/src/models/distil_bert/distil_bert_preprocessor.py +175 -0
- keras_hub/src/models/distil_bert/distil_bert_presets.py +57 -0
- keras_hub/src/models/distil_bert/distil_bert_tokenizer.py +114 -0
- keras_hub/src/models/electra/__init__.py +20 -0
- keras_hub/src/models/electra/electra_backbone.py +247 -0
- keras_hub/src/models/electra/electra_preprocessor.py +154 -0
- keras_hub/src/models/electra/electra_presets.py +95 -0
- keras_hub/src/models/electra/electra_tokenizer.py +104 -0
- keras_hub/src/models/f_net/__init__.py +20 -0
- keras_hub/src/models/f_net/f_net_backbone.py +236 -0
- keras_hub/src/models/f_net/f_net_classifier.py +154 -0
- keras_hub/src/models/f_net/f_net_masked_lm.py +132 -0
- keras_hub/src/models/f_net/f_net_masked_lm_preprocessor.py +196 -0
- keras_hub/src/models/f_net/f_net_preprocessor.py +177 -0
- keras_hub/src/models/f_net/f_net_presets.py +43 -0
- keras_hub/src/models/f_net/f_net_tokenizer.py +95 -0
- keras_hub/src/models/falcon/__init__.py +20 -0
- keras_hub/src/models/falcon/falcon_attention.py +156 -0
- keras_hub/src/models/falcon/falcon_backbone.py +164 -0
- keras_hub/src/models/falcon/falcon_causal_lm.py +291 -0
- keras_hub/src/models/falcon/falcon_causal_lm_preprocessor.py +173 -0
- keras_hub/src/models/falcon/falcon_preprocessor.py +187 -0
- keras_hub/src/models/falcon/falcon_presets.py +30 -0
- keras_hub/src/models/falcon/falcon_tokenizer.py +110 -0
- keras_hub/src/models/falcon/falcon_transformer_decoder.py +255 -0
- keras_hub/src/models/feature_pyramid_backbone.py +73 -0
- keras_hub/src/models/gemma/__init__.py +20 -0
- keras_hub/src/models/gemma/gemma_attention.py +250 -0
- keras_hub/src/models/gemma/gemma_backbone.py +316 -0
- keras_hub/src/models/gemma/gemma_causal_lm.py +448 -0
- keras_hub/src/models/gemma/gemma_causal_lm_preprocessor.py +167 -0
- keras_hub/src/models/gemma/gemma_decoder_block.py +241 -0
- keras_hub/src/models/gemma/gemma_preprocessor.py +191 -0
- keras_hub/src/models/gemma/gemma_presets.py +248 -0
- keras_hub/src/models/gemma/gemma_tokenizer.py +103 -0
- keras_hub/src/models/gemma/rms_normalization.py +40 -0
- keras_hub/src/models/gpt2/__init__.py +20 -0
- keras_hub/src/models/gpt2/gpt2_backbone.py +199 -0
- keras_hub/src/models/gpt2/gpt2_causal_lm.py +437 -0
- keras_hub/src/models/gpt2/gpt2_causal_lm_preprocessor.py +173 -0
- keras_hub/src/models/gpt2/gpt2_preprocessor.py +187 -0
- keras_hub/src/models/gpt2/gpt2_presets.py +82 -0
- keras_hub/src/models/gpt2/gpt2_tokenizer.py +110 -0
- keras_hub/src/models/gpt_neo_x/__init__.py +13 -0
- keras_hub/src/models/gpt_neo_x/gpt_neo_x_attention.py +251 -0
- keras_hub/src/models/gpt_neo_x/gpt_neo_x_backbone.py +175 -0
- keras_hub/src/models/gpt_neo_x/gpt_neo_x_causal_lm.py +201 -0
- keras_hub/src/models/gpt_neo_x/gpt_neo_x_causal_lm_preprocessor.py +141 -0
- keras_hub/src/models/gpt_neo_x/gpt_neo_x_decoder.py +258 -0
- keras_hub/src/models/gpt_neo_x/gpt_neo_x_preprocessor.py +145 -0
- keras_hub/src/models/gpt_neo_x/gpt_neo_x_tokenizer.py +88 -0
- keras_hub/src/models/image_classifier.py +90 -0
- keras_hub/src/models/llama/__init__.py +20 -0
- keras_hub/src/models/llama/llama_attention.py +225 -0
- keras_hub/src/models/llama/llama_backbone.py +188 -0
- keras_hub/src/models/llama/llama_causal_lm.py +327 -0
- keras_hub/src/models/llama/llama_causal_lm_preprocessor.py +170 -0
- keras_hub/src/models/llama/llama_decoder.py +246 -0
- keras_hub/src/models/llama/llama_layernorm.py +48 -0
- keras_hub/src/models/llama/llama_preprocessor.py +189 -0
- keras_hub/src/models/llama/llama_presets.py +80 -0
- keras_hub/src/models/llama/llama_tokenizer.py +84 -0
- keras_hub/src/models/llama3/__init__.py +20 -0
- keras_hub/src/models/llama3/llama3_backbone.py +84 -0
- keras_hub/src/models/llama3/llama3_causal_lm.py +46 -0
- keras_hub/src/models/llama3/llama3_causal_lm_preprocessor.py +173 -0
- keras_hub/src/models/llama3/llama3_preprocessor.py +21 -0
- keras_hub/src/models/llama3/llama3_presets.py +69 -0
- keras_hub/src/models/llama3/llama3_tokenizer.py +63 -0
- keras_hub/src/models/masked_lm.py +101 -0
- keras_hub/src/models/mistral/__init__.py +20 -0
- keras_hub/src/models/mistral/mistral_attention.py +238 -0
- keras_hub/src/models/mistral/mistral_backbone.py +203 -0
- keras_hub/src/models/mistral/mistral_causal_lm.py +328 -0
- keras_hub/src/models/mistral/mistral_causal_lm_preprocessor.py +175 -0
- keras_hub/src/models/mistral/mistral_layer_norm.py +48 -0
- keras_hub/src/models/mistral/mistral_preprocessor.py +190 -0
- keras_hub/src/models/mistral/mistral_presets.py +48 -0
- keras_hub/src/models/mistral/mistral_tokenizer.py +82 -0
- keras_hub/src/models/mistral/mistral_transformer_decoder.py +265 -0
- keras_hub/src/models/mix_transformer/__init__.py +13 -0
- keras_hub/src/models/mix_transformer/mix_transformer_backbone.py +181 -0
- keras_hub/src/models/mix_transformer/mix_transformer_classifier.py +133 -0
- keras_hub/src/models/mix_transformer/mix_transformer_layers.py +300 -0
- keras_hub/src/models/opt/__init__.py +20 -0
- keras_hub/src/models/opt/opt_backbone.py +173 -0
- keras_hub/src/models/opt/opt_causal_lm.py +301 -0
- keras_hub/src/models/opt/opt_causal_lm_preprocessor.py +177 -0
- keras_hub/src/models/opt/opt_preprocessor.py +188 -0
- keras_hub/src/models/opt/opt_presets.py +72 -0
- keras_hub/src/models/opt/opt_tokenizer.py +116 -0
- keras_hub/src/models/pali_gemma/__init__.py +23 -0
- keras_hub/src/models/pali_gemma/pali_gemma_backbone.py +277 -0
- keras_hub/src/models/pali_gemma/pali_gemma_causal_lm.py +313 -0
- keras_hub/src/models/pali_gemma/pali_gemma_causal_lm_preprocessor.py +147 -0
- keras_hub/src/models/pali_gemma/pali_gemma_decoder_block.py +160 -0
- keras_hub/src/models/pali_gemma/pali_gemma_presets.py +78 -0
- keras_hub/src/models/pali_gemma/pali_gemma_tokenizer.py +79 -0
- keras_hub/src/models/pali_gemma/pali_gemma_vit.py +566 -0
- keras_hub/src/models/phi3/__init__.py +20 -0
- keras_hub/src/models/phi3/phi3_attention.py +260 -0
- keras_hub/src/models/phi3/phi3_backbone.py +224 -0
- keras_hub/src/models/phi3/phi3_causal_lm.py +218 -0
- keras_hub/src/models/phi3/phi3_causal_lm_preprocessor.py +173 -0
- keras_hub/src/models/phi3/phi3_decoder.py +260 -0
- keras_hub/src/models/phi3/phi3_layernorm.py +48 -0
- keras_hub/src/models/phi3/phi3_preprocessor.py +190 -0
- keras_hub/src/models/phi3/phi3_presets.py +50 -0
- keras_hub/src/models/phi3/phi3_rotary_embedding.py +137 -0
- keras_hub/src/models/phi3/phi3_tokenizer.py +94 -0
- keras_hub/src/models/preprocessor.py +207 -0
- keras_hub/src/models/resnet/__init__.py +13 -0
- keras_hub/src/models/resnet/resnet_backbone.py +612 -0
- keras_hub/src/models/resnet/resnet_image_classifier.py +136 -0
- keras_hub/src/models/roberta/__init__.py +20 -0
- keras_hub/src/models/roberta/roberta_backbone.py +184 -0
- keras_hub/src/models/roberta/roberta_classifier.py +209 -0
- keras_hub/src/models/roberta/roberta_masked_lm.py +136 -0
- keras_hub/src/models/roberta/roberta_masked_lm_preprocessor.py +198 -0
- keras_hub/src/models/roberta/roberta_preprocessor.py +192 -0
- keras_hub/src/models/roberta/roberta_presets.py +43 -0
- keras_hub/src/models/roberta/roberta_tokenizer.py +132 -0
- keras_hub/src/models/seq_2_seq_lm.py +54 -0
- keras_hub/src/models/t5/__init__.py +20 -0
- keras_hub/src/models/t5/t5_backbone.py +261 -0
- keras_hub/src/models/t5/t5_layer_norm.py +35 -0
- keras_hub/src/models/t5/t5_multi_head_attention.py +324 -0
- keras_hub/src/models/t5/t5_presets.py +95 -0
- keras_hub/src/models/t5/t5_tokenizer.py +100 -0
- keras_hub/src/models/t5/t5_transformer_layer.py +178 -0
- keras_hub/src/models/task.py +419 -0
- keras_hub/src/models/vgg/__init__.py +13 -0
- keras_hub/src/models/vgg/vgg_backbone.py +158 -0
- keras_hub/src/models/vgg/vgg_image_classifier.py +124 -0
- keras_hub/src/models/vit_det/__init__.py +13 -0
- keras_hub/src/models/vit_det/vit_det_backbone.py +204 -0
- keras_hub/src/models/vit_det/vit_layers.py +565 -0
- keras_hub/src/models/whisper/__init__.py +20 -0
- keras_hub/src/models/whisper/whisper_audio_feature_extractor.py +260 -0
- keras_hub/src/models/whisper/whisper_backbone.py +305 -0
- keras_hub/src/models/whisper/whisper_cached_multi_head_attention.py +153 -0
- keras_hub/src/models/whisper/whisper_decoder.py +141 -0
- keras_hub/src/models/whisper/whisper_encoder.py +106 -0
- keras_hub/src/models/whisper/whisper_preprocessor.py +326 -0
- keras_hub/src/models/whisper/whisper_presets.py +148 -0
- keras_hub/src/models/whisper/whisper_tokenizer.py +163 -0
- keras_hub/src/models/xlm_roberta/__init__.py +26 -0
- keras_hub/src/models/xlm_roberta/xlm_roberta_backbone.py +81 -0
- keras_hub/src/models/xlm_roberta/xlm_roberta_classifier.py +225 -0
- keras_hub/src/models/xlm_roberta/xlm_roberta_masked_lm.py +141 -0
- keras_hub/src/models/xlm_roberta/xlm_roberta_masked_lm_preprocessor.py +195 -0
- keras_hub/src/models/xlm_roberta/xlm_roberta_preprocessor.py +205 -0
- keras_hub/src/models/xlm_roberta/xlm_roberta_presets.py +43 -0
- keras_hub/src/models/xlm_roberta/xlm_roberta_tokenizer.py +191 -0
- keras_hub/src/models/xlnet/__init__.py +13 -0
- keras_hub/src/models/xlnet/relative_attention.py +459 -0
- keras_hub/src/models/xlnet/xlnet_backbone.py +222 -0
- keras_hub/src/models/xlnet/xlnet_content_and_query_embedding.py +133 -0
- keras_hub/src/models/xlnet/xlnet_encoder.py +378 -0
- keras_hub/src/samplers/__init__.py +13 -0
- keras_hub/src/samplers/beam_sampler.py +207 -0
- keras_hub/src/samplers/contrastive_sampler.py +231 -0
- keras_hub/src/samplers/greedy_sampler.py +50 -0
- keras_hub/src/samplers/random_sampler.py +77 -0
- keras_hub/src/samplers/sampler.py +237 -0
- keras_hub/src/samplers/serialization.py +97 -0
- keras_hub/src/samplers/top_k_sampler.py +92 -0
- keras_hub/src/samplers/top_p_sampler.py +113 -0
- keras_hub/src/tests/__init__.py +13 -0
- keras_hub/src/tests/test_case.py +608 -0
- keras_hub/src/tokenizers/__init__.py +13 -0
- keras_hub/src/tokenizers/byte_pair_tokenizer.py +638 -0
- keras_hub/src/tokenizers/byte_tokenizer.py +299 -0
- keras_hub/src/tokenizers/sentence_piece_tokenizer.py +267 -0
- keras_hub/src/tokenizers/sentence_piece_tokenizer_trainer.py +150 -0
- keras_hub/src/tokenizers/tokenizer.py +235 -0
- keras_hub/src/tokenizers/unicode_codepoint_tokenizer.py +355 -0
- keras_hub/src/tokenizers/word_piece_tokenizer.py +544 -0
- keras_hub/src/tokenizers/word_piece_tokenizer_trainer.py +176 -0
- keras_hub/src/utils/__init__.py +13 -0
- keras_hub/src/utils/keras_utils.py +130 -0
- keras_hub/src/utils/pipeline_model.py +293 -0
- keras_hub/src/utils/preset_utils.py +621 -0
- keras_hub/src/utils/python_utils.py +21 -0
- keras_hub/src/utils/tensor_utils.py +206 -0
- keras_hub/src/utils/timm/__init__.py +13 -0
- keras_hub/src/utils/timm/convert.py +37 -0
- keras_hub/src/utils/timm/convert_resnet.py +171 -0
- keras_hub/src/utils/transformers/__init__.py +13 -0
- keras_hub/src/utils/transformers/convert.py +101 -0
- keras_hub/src/utils/transformers/convert_bert.py +173 -0
- keras_hub/src/utils/transformers/convert_distilbert.py +184 -0
- keras_hub/src/utils/transformers/convert_gemma.py +187 -0
- keras_hub/src/utils/transformers/convert_gpt2.py +186 -0
- keras_hub/src/utils/transformers/convert_llama3.py +136 -0
- keras_hub/src/utils/transformers/convert_pali_gemma.py +303 -0
- keras_hub/src/utils/transformers/safetensor_utils.py +97 -0
- keras_hub/src/version_utils.py +23 -0
- keras_hub_nightly-0.15.0.dev20240823171555.dist-info/METADATA +34 -0
- keras_hub_nightly-0.15.0.dev20240823171555.dist-info/RECORD +297 -0
- keras_hub_nightly-0.15.0.dev20240823171555.dist-info/WHEEL +5 -0
- keras_hub_nightly-0.15.0.dev20240823171555.dist-info/top_level.txt +1 -0
@@ -0,0 +1,210 @@
|
|
1
|
+
# Copyright 2024 The KerasHub Authors
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# https://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
|
15
|
+
|
16
|
+
import keras
|
17
|
+
|
18
|
+
from keras_hub.src.api_export import keras_hub_export
|
19
|
+
from keras_hub.src.layers.modeling.reversible_embedding import (
|
20
|
+
ReversibleEmbedding,
|
21
|
+
)
|
22
|
+
from keras_hub.src.models.backbone import Backbone
|
23
|
+
from keras_hub.src.models.deberta_v3.disentangled_attention_encoder import (
|
24
|
+
DisentangledAttentionEncoder,
|
25
|
+
)
|
26
|
+
from keras_hub.src.models.deberta_v3.relative_embedding import RelativeEmbedding
|
27
|
+
|
28
|
+
|
29
|
+
def deberta_kernel_initializer(stddev=0.02):
|
30
|
+
return keras.initializers.TruncatedNormal(stddev=stddev)
|
31
|
+
|
32
|
+
|
33
|
+
@keras_hub_export("keras_hub.models.DebertaV3Backbone")
|
34
|
+
class DebertaV3Backbone(Backbone):
|
35
|
+
"""DeBERTa encoder network.
|
36
|
+
|
37
|
+
This network implements a bi-directional Transformer-based encoder as
|
38
|
+
described in
|
39
|
+
["DeBERTaV3: Improving DeBERTa using ELECTRA-Style Pre-Training with Gradient-Disentangled Embedding Sharing"](https://arxiv.org/abs/2111.09543).
|
40
|
+
It includes the embedding lookups and transformer layers, but does not
|
41
|
+
include the enhanced masked decoding head used during pretraining.
|
42
|
+
|
43
|
+
The default constructor gives a fully customizable, randomly initialized
|
44
|
+
DeBERTa encoder with any number of layers, heads, and embedding
|
45
|
+
dimensions. To load preset architectures and weights, use the `from_preset`
|
46
|
+
constructor.
|
47
|
+
|
48
|
+
Note: `DebertaV3Backbone` has a performance issue on TPUs, and we recommend
|
49
|
+
other models for TPU training and inference.
|
50
|
+
|
51
|
+
Disclaimer: Pre-trained models are provided on an "as is" basis, without
|
52
|
+
warranties or conditions of any kind. The underlying model is provided by a
|
53
|
+
third party and subject to a separate license, available
|
54
|
+
[here](https://github.com/microsoft/DeBERTa).
|
55
|
+
|
56
|
+
Args:
|
57
|
+
vocabulary_size: int. The size of the token vocabulary.
|
58
|
+
num_layers: int. The number of transformer layers.
|
59
|
+
num_heads: int. The number of attention heads for each transformer.
|
60
|
+
The hidden size must be divisible by the number of attention heads.
|
61
|
+
hidden_dim: int. The size of the transformer encoding layer.
|
62
|
+
intermediate_dim: int. The output dimension of the first Dense layer in
|
63
|
+
a two-layer feedforward network for each transformer.
|
64
|
+
dropout: float. Dropout probability for the DeBERTa model.
|
65
|
+
max_sequence_length: int. The maximum sequence length this encoder can
|
66
|
+
consume. The sequence length of the input must be less than
|
67
|
+
`max_sequence_length`.
|
68
|
+
bucket_size: int. The size of the relative position buckets. Generally
|
69
|
+
equal to `max_sequence_length // 2`.
|
70
|
+
dtype: string or `keras.mixed_precision.DTypePolicy`. The dtype to use
|
71
|
+
for model computations and weights. Note that some computations,
|
72
|
+
such as softmax and layer normalization, will always be done at
|
73
|
+
float32 precision regardless of dtype.
|
74
|
+
|
75
|
+
Example:
|
76
|
+
```python
|
77
|
+
input_data = {
|
78
|
+
"token_ids": np.ones(shape=(1, 12), dtype="int32"),
|
79
|
+
"padding_mask": np.array([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0]]),
|
80
|
+
}
|
81
|
+
|
82
|
+
# Pretrained DeBERTa encoder.
|
83
|
+
model = keras_hub.models.DebertaV3Backbone.from_preset(
|
84
|
+
"deberta_v3_base_en",
|
85
|
+
)
|
86
|
+
model(input_data)
|
87
|
+
|
88
|
+
# Randomly initialized DeBERTa encoder with custom config
|
89
|
+
model = keras_hub.models.DebertaV3Backbone(
|
90
|
+
vocabulary_size=128100,
|
91
|
+
num_layers=12,
|
92
|
+
num_heads=6,
|
93
|
+
hidden_dim=384,
|
94
|
+
intermediate_dim=1536,
|
95
|
+
max_sequence_length=512,
|
96
|
+
bucket_size=256,
|
97
|
+
)
|
98
|
+
# Call the model on the input data.
|
99
|
+
model(input_data)
|
100
|
+
```
|
101
|
+
"""
|
102
|
+
|
103
|
+
def __init__(
|
104
|
+
self,
|
105
|
+
vocabulary_size,
|
106
|
+
num_layers,
|
107
|
+
num_heads,
|
108
|
+
hidden_dim,
|
109
|
+
intermediate_dim,
|
110
|
+
dropout=0.1,
|
111
|
+
max_sequence_length=512,
|
112
|
+
bucket_size=256,
|
113
|
+
dtype=None,
|
114
|
+
**kwargs,
|
115
|
+
):
|
116
|
+
# === Layers ===
|
117
|
+
self.token_embedding = ReversibleEmbedding(
|
118
|
+
input_dim=vocabulary_size,
|
119
|
+
output_dim=hidden_dim,
|
120
|
+
embeddings_initializer=deberta_kernel_initializer(),
|
121
|
+
dtype=dtype,
|
122
|
+
name="token_embedding",
|
123
|
+
)
|
124
|
+
self.embeddings_layer_norm = keras.layers.LayerNormalization(
|
125
|
+
epsilon=1e-7,
|
126
|
+
dtype=dtype,
|
127
|
+
name="embeddings_layer_norm",
|
128
|
+
)
|
129
|
+
self.embeddings_dropout = keras.layers.Dropout(
|
130
|
+
dropout,
|
131
|
+
dtype=dtype,
|
132
|
+
name="embeddings_dropout",
|
133
|
+
)
|
134
|
+
self.relative_embeddings = RelativeEmbedding(
|
135
|
+
hidden_dim=hidden_dim,
|
136
|
+
bucket_size=bucket_size,
|
137
|
+
layer_norm_epsilon=1e-7,
|
138
|
+
kernel_initializer=deberta_kernel_initializer(),
|
139
|
+
dtype=dtype,
|
140
|
+
name="rel_embedding",
|
141
|
+
)
|
142
|
+
self.transformer_layers = []
|
143
|
+
for i in range(num_layers):
|
144
|
+
layer = DisentangledAttentionEncoder(
|
145
|
+
num_heads=num_heads,
|
146
|
+
intermediate_dim=intermediate_dim,
|
147
|
+
max_position_embeddings=max_sequence_length,
|
148
|
+
bucket_size=bucket_size,
|
149
|
+
dropout=dropout,
|
150
|
+
activation=keras.activations.gelu,
|
151
|
+
layer_norm_epsilon=1e-7,
|
152
|
+
kernel_initializer=deberta_kernel_initializer(),
|
153
|
+
dtype=dtype,
|
154
|
+
name=f"disentangled_attention_encoder_layer_{i}",
|
155
|
+
)
|
156
|
+
self.transformer_layers.append(layer)
|
157
|
+
|
158
|
+
# === Functional Model ===
|
159
|
+
token_id_input = keras.Input(
|
160
|
+
shape=(None,), dtype="int32", name="token_ids"
|
161
|
+
)
|
162
|
+
padding_mask_input = keras.Input(
|
163
|
+
shape=(None,), dtype="int32", name="padding_mask"
|
164
|
+
)
|
165
|
+
x = self.token_embedding(token_id_input)
|
166
|
+
x = self.embeddings_layer_norm(x)
|
167
|
+
x = self.embeddings_dropout(x)
|
168
|
+
rel_embeddings = self.relative_embeddings(x)
|
169
|
+
for transformer_layer in self.transformer_layers:
|
170
|
+
x = transformer_layer(
|
171
|
+
x,
|
172
|
+
rel_embeddings=rel_embeddings,
|
173
|
+
padding_mask=padding_mask_input,
|
174
|
+
)
|
175
|
+
super().__init__(
|
176
|
+
inputs={
|
177
|
+
"token_ids": token_id_input,
|
178
|
+
"padding_mask": padding_mask_input,
|
179
|
+
},
|
180
|
+
outputs=x,
|
181
|
+
dtype=dtype,
|
182
|
+
**kwargs,
|
183
|
+
)
|
184
|
+
|
185
|
+
# === Config ===
|
186
|
+
self.vocabulary_size = vocabulary_size
|
187
|
+
self.num_layers = num_layers
|
188
|
+
self.num_heads = num_heads
|
189
|
+
self.hidden_dim = hidden_dim
|
190
|
+
self.intermediate_dim = intermediate_dim
|
191
|
+
self.dropout = dropout
|
192
|
+
self.max_sequence_length = max_sequence_length
|
193
|
+
self.bucket_size = bucket_size
|
194
|
+
self.start_token_index = 0
|
195
|
+
|
196
|
+
def get_config(self):
|
197
|
+
config = super().get_config()
|
198
|
+
config.update(
|
199
|
+
{
|
200
|
+
"vocabulary_size": self.vocabulary_size,
|
201
|
+
"num_layers": self.num_layers,
|
202
|
+
"num_heads": self.num_heads,
|
203
|
+
"hidden_dim": self.hidden_dim,
|
204
|
+
"intermediate_dim": self.intermediate_dim,
|
205
|
+
"dropout": self.dropout,
|
206
|
+
"max_sequence_length": self.max_sequence_length,
|
207
|
+
"bucket_size": self.bucket_size,
|
208
|
+
}
|
209
|
+
)
|
210
|
+
return config
|
@@ -0,0 +1,228 @@
|
|
1
|
+
# Copyright 2024 The KerasHub Authors
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# https://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
|
15
|
+
|
16
|
+
import keras
|
17
|
+
|
18
|
+
from keras_hub.src.api_export import keras_hub_export
|
19
|
+
from keras_hub.src.models.classifier import Classifier
|
20
|
+
from keras_hub.src.models.deberta_v3.deberta_v3_backbone import (
|
21
|
+
DebertaV3Backbone,
|
22
|
+
)
|
23
|
+
from keras_hub.src.models.deberta_v3.deberta_v3_backbone import (
|
24
|
+
deberta_kernel_initializer,
|
25
|
+
)
|
26
|
+
from keras_hub.src.models.deberta_v3.deberta_v3_preprocessor import (
|
27
|
+
DebertaV3Preprocessor,
|
28
|
+
)
|
29
|
+
|
30
|
+
|
31
|
+
@keras_hub_export("keras_hub.models.DebertaV3Classifier")
|
32
|
+
class DebertaV3Classifier(Classifier):
|
33
|
+
"""An end-to-end DeBERTa model for classification tasks.
|
34
|
+
|
35
|
+
This model attaches a classification head to a
|
36
|
+
`keras_hub.model.DebertaV3Backbone` model, mapping from the backbone
|
37
|
+
outputs to logit output suitable for a classification task. For usage of
|
38
|
+
this model with pre-trained weights, see the `from_preset()` method.
|
39
|
+
|
40
|
+
This model can optionally be configured with a `preprocessor` layer, in
|
41
|
+
which case it will automatically apply preprocessing to raw inputs during
|
42
|
+
`fit()`, `predict()`, and `evaluate()`. This is done by default when
|
43
|
+
creating the model with `from_preset()`.
|
44
|
+
|
45
|
+
Note: `DebertaV3Backbone` has a performance issue on TPUs, and we recommend
|
46
|
+
other models for TPU training and inference.
|
47
|
+
|
48
|
+
Disclaimer: Pre-trained models are provided on an "as is" basis, without
|
49
|
+
warranties or conditions of any kind. The underlying model is provided by a
|
50
|
+
third party and subject to a separate license, available
|
51
|
+
[here](https://github.com/microsoft/DeBERTa).
|
52
|
+
|
53
|
+
Args:
|
54
|
+
backbone: A `keras_hub.models.DebertaV3` instance.
|
55
|
+
num_classes: int. Number of classes to predict.
|
56
|
+
preprocessor: A `keras_hub.models.DebertaV3Preprocessor` or `None`. If
|
57
|
+
`None`, this model will not apply preprocessing, and inputs should
|
58
|
+
be preprocessed before calling the model.
|
59
|
+
activation: Optional `str` or callable. The
|
60
|
+
activation function to use on the model outputs. Set
|
61
|
+
`activation="softmax"` to return output probabilities.
|
62
|
+
Defaults to `None`.
|
63
|
+
hidden_dim: int. The size of the pooler layer.
|
64
|
+
dropout: float. Dropout probability applied to the pooled output. For
|
65
|
+
the second dropout layer, `backbone.dropout` is used.
|
66
|
+
|
67
|
+
Examples:
|
68
|
+
|
69
|
+
Raw string data.
|
70
|
+
```python
|
71
|
+
features = ["The quick brown fox jumped.", "I forgot my homework."]
|
72
|
+
labels = [0, 3]
|
73
|
+
|
74
|
+
# Pretrained classifier.
|
75
|
+
classifier = keras_hub.models.DebertaV3Classifier.from_preset(
|
76
|
+
"deberta_v3_base_en",
|
77
|
+
num_classes=4,
|
78
|
+
)
|
79
|
+
classifier.fit(x=features, y=labels, batch_size=2)
|
80
|
+
classifier.predict(x=features, batch_size=2)
|
81
|
+
|
82
|
+
# Re-compile (e.g., with a new learning rate).
|
83
|
+
classifier.compile(
|
84
|
+
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
|
85
|
+
optimizer=keras.optimizers.Adam(5e-5),
|
86
|
+
jit_compile=True,
|
87
|
+
)
|
88
|
+
# Access backbone programmatically (e.g., to change `trainable`).
|
89
|
+
classifier.backbone.trainable = False
|
90
|
+
# Fit again.
|
91
|
+
classifier.fit(x=features, y=labels, batch_size=2)
|
92
|
+
```
|
93
|
+
|
94
|
+
Preprocessed integer data.
|
95
|
+
```python
|
96
|
+
features = {
|
97
|
+
"token_ids": np.ones(shape=(2, 12), dtype="int32"),
|
98
|
+
"padding_mask": np.array([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0]] * 2),
|
99
|
+
}
|
100
|
+
labels = [0, 3]
|
101
|
+
|
102
|
+
# Pretrained classifier without preprocessing.
|
103
|
+
classifier = keras_hub.models.DebertaV3Classifier.from_preset(
|
104
|
+
"deberta_v3_base_en",
|
105
|
+
num_classes=4,
|
106
|
+
preprocessor=None,
|
107
|
+
)
|
108
|
+
classifier.fit(x=features, y=labels, batch_size=2)
|
109
|
+
```
|
110
|
+
|
111
|
+
Custom backbone and vocabulary.
|
112
|
+
```python
|
113
|
+
features = ["The quick brown fox jumped.", "I forgot my homework."]
|
114
|
+
labels = [0, 3]
|
115
|
+
|
116
|
+
bytes_io = io.BytesIO()
|
117
|
+
ds = tf.data.Dataset.from_tensor_slices(features)
|
118
|
+
sentencepiece.SentencePieceTrainer.train(
|
119
|
+
sentence_iterator=ds.as_numpy_iterator(),
|
120
|
+
model_writer=bytes_io,
|
121
|
+
vocab_size=10,
|
122
|
+
model_type="WORD",
|
123
|
+
pad_id=0,
|
124
|
+
bos_id=1,
|
125
|
+
eos_id=2,
|
126
|
+
unk_id=3,
|
127
|
+
pad_piece="[PAD]",
|
128
|
+
bos_piece="[CLS]",
|
129
|
+
eos_piece="[SEP]",
|
130
|
+
unk_piece="[UNK]",
|
131
|
+
)
|
132
|
+
tokenizer = keras_hub.models.DebertaV3Tokenizer(
|
133
|
+
proto=bytes_io.getvalue(),
|
134
|
+
)
|
135
|
+
preprocessor = keras_hub.models.DebertaV3Preprocessor(
|
136
|
+
tokenizer=tokenizer,
|
137
|
+
sequence_length=128,
|
138
|
+
)
|
139
|
+
backbone = keras_hub.models.DebertaV3Backbone(
|
140
|
+
vocabulary_size=30552,
|
141
|
+
num_layers=4,
|
142
|
+
num_heads=4,
|
143
|
+
hidden_dim=256,
|
144
|
+
intermediate_dim=512,
|
145
|
+
max_sequence_length=128,
|
146
|
+
)
|
147
|
+
classifier = keras_hub.models.DebertaV3Classifier(
|
148
|
+
backbone=backbone,
|
149
|
+
preprocessor=preprocessor,
|
150
|
+
num_classes=4,
|
151
|
+
)
|
152
|
+
classifier.fit(x=features, y=labels, batch_size=2)
|
153
|
+
```
|
154
|
+
"""
|
155
|
+
|
156
|
+
backbone_cls = DebertaV3Backbone
|
157
|
+
preprocessor_cls = DebertaV3Preprocessor
|
158
|
+
|
159
|
+
def __init__(
|
160
|
+
self,
|
161
|
+
backbone,
|
162
|
+
num_classes,
|
163
|
+
preprocessor=None,
|
164
|
+
activation=None,
|
165
|
+
hidden_dim=None,
|
166
|
+
dropout=0.0,
|
167
|
+
**kwargs,
|
168
|
+
):
|
169
|
+
# === Layers ===
|
170
|
+
self.backbone = backbone
|
171
|
+
self.preprocessor = preprocessor
|
172
|
+
self.pooled_dropout = keras.layers.Dropout(
|
173
|
+
dropout,
|
174
|
+
dtype=backbone.dtype_policy,
|
175
|
+
name="pooled_dropout",
|
176
|
+
)
|
177
|
+
hidden_dim = hidden_dim or backbone.hidden_dim
|
178
|
+
self.pooled_dense = keras.layers.Dense(
|
179
|
+
hidden_dim,
|
180
|
+
activation=keras.activations.gelu,
|
181
|
+
dtype=backbone.dtype_policy,
|
182
|
+
name="pooled_dense",
|
183
|
+
)
|
184
|
+
self.output_dropout = keras.layers.Dropout(
|
185
|
+
backbone.dropout,
|
186
|
+
dtype=backbone.dtype_policy,
|
187
|
+
name="classifier_dropout",
|
188
|
+
)
|
189
|
+
self.output_dense = keras.layers.Dense(
|
190
|
+
num_classes,
|
191
|
+
kernel_initializer=deberta_kernel_initializer(),
|
192
|
+
activation=activation,
|
193
|
+
dtype=backbone.dtype_policy,
|
194
|
+
name="logits",
|
195
|
+
)
|
196
|
+
|
197
|
+
# === Functional Model ===
|
198
|
+
inputs = backbone.input
|
199
|
+
x = backbone(inputs)[:, backbone.start_token_index, :]
|
200
|
+
x = self.pooled_dropout(x)
|
201
|
+
x = self.pooled_dense(x)
|
202
|
+
x = self.output_dropout(x)
|
203
|
+
outputs = self.output_dense(x)
|
204
|
+
super().__init__(
|
205
|
+
inputs=inputs,
|
206
|
+
outputs=outputs,
|
207
|
+
**kwargs,
|
208
|
+
)
|
209
|
+
|
210
|
+
# === Config ===
|
211
|
+
self.backbone = backbone
|
212
|
+
self.preprocessor = preprocessor
|
213
|
+
self.num_classes = num_classes
|
214
|
+
self.activation = keras.activations.get(activation)
|
215
|
+
self.hidden_dim = hidden_dim
|
216
|
+
self.dropout = dropout
|
217
|
+
|
218
|
+
def get_config(self):
|
219
|
+
config = super().get_config()
|
220
|
+
config.update(
|
221
|
+
{
|
222
|
+
"num_classes": self.num_classes,
|
223
|
+
"activation": keras.activations.serialize(self.activation),
|
224
|
+
"hidden_dim": self.hidden_dim,
|
225
|
+
"dropout": self.dropout,
|
226
|
+
}
|
227
|
+
)
|
228
|
+
return config
|
@@ -0,0 +1,135 @@
|
|
1
|
+
# Copyright 2024 The KerasHub Authors
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# https://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
|
15
|
+
|
16
|
+
import keras
|
17
|
+
|
18
|
+
from keras_hub.src.api_export import keras_hub_export
|
19
|
+
from keras_hub.src.layers.modeling.masked_lm_head import MaskedLMHead
|
20
|
+
from keras_hub.src.models.deberta_v3.deberta_v3_backbone import (
|
21
|
+
DebertaV3Backbone,
|
22
|
+
)
|
23
|
+
from keras_hub.src.models.deberta_v3.deberta_v3_backbone import (
|
24
|
+
deberta_kernel_initializer,
|
25
|
+
)
|
26
|
+
from keras_hub.src.models.deberta_v3.deberta_v3_masked_lm_preprocessor import (
|
27
|
+
DebertaV3MaskedLMPreprocessor,
|
28
|
+
)
|
29
|
+
from keras_hub.src.models.masked_lm import MaskedLM
|
30
|
+
|
31
|
+
|
32
|
+
@keras_hub_export("keras_hub.models.DebertaV3MaskedLM")
|
33
|
+
class DebertaV3MaskedLM(MaskedLM):
|
34
|
+
"""An end-to-end DeBERTaV3 model for the masked language modeling task.
|
35
|
+
|
36
|
+
This model will train DeBERTaV3 on a masked language modeling task.
|
37
|
+
The model will predict labels for a number of masked tokens in the
|
38
|
+
input data. For usage of this model with pre-trained weights, see the
|
39
|
+
`from_preset()` method.
|
40
|
+
|
41
|
+
This model can optionally be configured with a `preprocessor` layer, in
|
42
|
+
which case inputs can be raw string features during `fit()`, `predict()`,
|
43
|
+
and `evaluate()`. Inputs will be tokenized and dynamically masked during
|
44
|
+
training and evaluation. This is done by default when creating the model
|
45
|
+
with `from_preset()`.
|
46
|
+
|
47
|
+
Disclaimer: Pre-trained models are provided on an "as is" basis, without
|
48
|
+
warranties or conditions of any kind. The underlying model is provided by a
|
49
|
+
third party and subject to a separate license, available
|
50
|
+
[here](https://github.com/microsoft/DeBERTa).
|
51
|
+
|
52
|
+
Args:
|
53
|
+
backbone: A `keras_hub.models.DebertaV3Backbone` instance.
|
54
|
+
preprocessor: A `keras_hub.models.DebertaV3MaskedLMPreprocessor` or
|
55
|
+
`None`. If `None`, this model will not apply preprocessing, and
|
56
|
+
inputs should be preprocessed before calling the model.
|
57
|
+
|
58
|
+
Examples:
|
59
|
+
|
60
|
+
Raw string data.
|
61
|
+
```python
|
62
|
+
features = ["The quick brown fox jumped.", "I forgot my homework."]
|
63
|
+
|
64
|
+
# Pretrained language model.
|
65
|
+
masked_lm = keras_hub.models.DebertaV3MaskedLM.from_preset(
|
66
|
+
"deberta_v3_base_en",
|
67
|
+
)
|
68
|
+
masked_lm.fit(x=features, batch_size=2)
|
69
|
+
|
70
|
+
# Re-compile (e.g., with a new learning rate).
|
71
|
+
masked_lm.compile(
|
72
|
+
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
|
73
|
+
optimizer=keras.optimizers.Adam(5e-5),
|
74
|
+
jit_compile=True,
|
75
|
+
)
|
76
|
+
# Access backbone programmatically (e.g., to change `trainable`).
|
77
|
+
masked_lm.backbone.trainable = False
|
78
|
+
# Fit again.
|
79
|
+
masked_lm.fit(x=features, batch_size=2)
|
80
|
+
```
|
81
|
+
|
82
|
+
Preprocessed integer data.
|
83
|
+
```python
|
84
|
+
# Create preprocessed batch where 0 is the mask token.
|
85
|
+
features = {
|
86
|
+
"token_ids": np.array([[1, 2, 0, 4, 0, 6, 7, 8]] * 2),
|
87
|
+
"padding_mask": np.array([[1, 1, 1, 1, 1, 1, 1, 1]] * 2),
|
88
|
+
"mask_positions": np.array([[2, 4]] * 2),
|
89
|
+
}
|
90
|
+
# Labels are the original masked values.
|
91
|
+
labels = [[3, 5]] * 2
|
92
|
+
|
93
|
+
masked_lm = keras_hub.models.DebertaV3MaskedLM.from_preset(
|
94
|
+
"deberta_v3_base_en",
|
95
|
+
preprocessor=None,
|
96
|
+
)
|
97
|
+
masked_lm.fit(x=features, y=labels, batch_size=2)
|
98
|
+
```
|
99
|
+
"""
|
100
|
+
|
101
|
+
backbone_cls = DebertaV3Backbone
|
102
|
+
preprocessor_cls = DebertaV3MaskedLMPreprocessor
|
103
|
+
|
104
|
+
def __init__(
|
105
|
+
self,
|
106
|
+
backbone,
|
107
|
+
preprocessor=None,
|
108
|
+
**kwargs,
|
109
|
+
):
|
110
|
+
# === Layers ===
|
111
|
+
self.backbone = backbone
|
112
|
+
self.preprocessor = preprocessor
|
113
|
+
self.masked_lm_head = MaskedLMHead(
|
114
|
+
vocabulary_size=backbone.vocabulary_size,
|
115
|
+
token_embedding=backbone.token_embedding,
|
116
|
+
intermediate_activation=keras.activations.gelu,
|
117
|
+
kernel_initializer=deberta_kernel_initializer(),
|
118
|
+
dtype=backbone.dtype_policy,
|
119
|
+
name="mlm_head",
|
120
|
+
)
|
121
|
+
|
122
|
+
# === Functional Model ===
|
123
|
+
inputs = {
|
124
|
+
**backbone.input,
|
125
|
+
"mask_positions": keras.Input(
|
126
|
+
shape=(None,), dtype="int32", name="mask_positions"
|
127
|
+
),
|
128
|
+
}
|
129
|
+
x = backbone(backbone.input)
|
130
|
+
outputs = self.masked_lm_head(x, inputs["mask_positions"])
|
131
|
+
super().__init__(
|
132
|
+
inputs=inputs,
|
133
|
+
outputs=outputs,
|
134
|
+
**kwargs,
|
135
|
+
)
|