keras-hub-nightly 0.15.0.dev20240823171555__py3-none-any.whl → 0.16.0.dev2024092017__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (198) hide show
  1. keras_hub/__init__.py +0 -6
  2. keras_hub/api/__init__.py +2 -0
  3. keras_hub/api/bounding_box/__init__.py +36 -0
  4. keras_hub/api/layers/__init__.py +14 -0
  5. keras_hub/api/models/__init__.py +97 -48
  6. keras_hub/api/tokenizers/__init__.py +30 -0
  7. keras_hub/api/utils/__init__.py +22 -0
  8. keras_hub/src/api_export.py +15 -9
  9. keras_hub/src/bounding_box/__init__.py +13 -0
  10. keras_hub/src/bounding_box/converters.py +529 -0
  11. keras_hub/src/bounding_box/formats.py +162 -0
  12. keras_hub/src/bounding_box/iou.py +263 -0
  13. keras_hub/src/bounding_box/to_dense.py +95 -0
  14. keras_hub/src/bounding_box/to_ragged.py +99 -0
  15. keras_hub/src/bounding_box/utils.py +194 -0
  16. keras_hub/src/bounding_box/validate_format.py +99 -0
  17. keras_hub/src/layers/preprocessing/audio_converter.py +121 -0
  18. keras_hub/src/layers/preprocessing/image_converter.py +130 -0
  19. keras_hub/src/layers/preprocessing/masked_lm_mask_generator.py +2 -0
  20. keras_hub/src/layers/preprocessing/multi_segment_packer.py +9 -8
  21. keras_hub/src/layers/preprocessing/preprocessing_layer.py +2 -29
  22. keras_hub/src/layers/preprocessing/random_deletion.py +33 -31
  23. keras_hub/src/layers/preprocessing/random_swap.py +33 -31
  24. keras_hub/src/layers/preprocessing/resizing_image_converter.py +101 -0
  25. keras_hub/src/layers/preprocessing/start_end_packer.py +3 -2
  26. keras_hub/src/models/albert/__init__.py +1 -2
  27. keras_hub/src/models/albert/albert_masked_lm_preprocessor.py +6 -86
  28. keras_hub/src/models/albert/{albert_classifier.py → albert_text_classifier.py} +34 -10
  29. keras_hub/src/models/albert/{albert_preprocessor.py → albert_text_classifier_preprocessor.py} +14 -70
  30. keras_hub/src/models/albert/albert_tokenizer.py +17 -36
  31. keras_hub/src/models/backbone.py +12 -34
  32. keras_hub/src/models/bart/__init__.py +1 -2
  33. keras_hub/src/models/bart/bart_seq_2_seq_lm_preprocessor.py +21 -148
  34. keras_hub/src/models/bart/bart_tokenizer.py +12 -39
  35. keras_hub/src/models/bert/__init__.py +1 -5
  36. keras_hub/src/models/bert/bert_masked_lm_preprocessor.py +6 -87
  37. keras_hub/src/models/bert/bert_presets.py +1 -4
  38. keras_hub/src/models/bert/{bert_classifier.py → bert_text_classifier.py} +19 -12
  39. keras_hub/src/models/bert/{bert_preprocessor.py → bert_text_classifier_preprocessor.py} +14 -70
  40. keras_hub/src/models/bert/bert_tokenizer.py +17 -35
  41. keras_hub/src/models/bloom/__init__.py +1 -2
  42. keras_hub/src/models/bloom/bloom_causal_lm_preprocessor.py +6 -91
  43. keras_hub/src/models/bloom/bloom_tokenizer.py +12 -41
  44. keras_hub/src/models/causal_lm.py +10 -29
  45. keras_hub/src/models/causal_lm_preprocessor.py +195 -0
  46. keras_hub/src/models/csp_darknet/csp_darknet_backbone.py +54 -15
  47. keras_hub/src/models/deberta_v3/__init__.py +1 -4
  48. keras_hub/src/models/deberta_v3/deberta_v3_masked_lm_preprocessor.py +14 -77
  49. keras_hub/src/models/deberta_v3/{deberta_v3_classifier.py → deberta_v3_text_classifier.py} +16 -11
  50. keras_hub/src/models/deberta_v3/{deberta_v3_preprocessor.py → deberta_v3_text_classifier_preprocessor.py} +23 -64
  51. keras_hub/src/models/deberta_v3/deberta_v3_tokenizer.py +30 -25
  52. keras_hub/src/models/densenet/densenet_backbone.py +46 -22
  53. keras_hub/src/models/distil_bert/__init__.py +1 -4
  54. keras_hub/src/models/distil_bert/distil_bert_masked_lm_preprocessor.py +14 -76
  55. keras_hub/src/models/distil_bert/{distil_bert_classifier.py → distil_bert_text_classifier.py} +17 -12
  56. keras_hub/src/models/distil_bert/{distil_bert_preprocessor.py → distil_bert_text_classifier_preprocessor.py} +23 -63
  57. keras_hub/src/models/distil_bert/distil_bert_tokenizer.py +19 -35
  58. keras_hub/src/models/efficientnet/__init__.py +13 -0
  59. keras_hub/src/models/efficientnet/efficientnet_backbone.py +569 -0
  60. keras_hub/src/models/efficientnet/fusedmbconv.py +229 -0
  61. keras_hub/src/models/efficientnet/mbconv.py +238 -0
  62. keras_hub/src/models/electra/__init__.py +1 -2
  63. keras_hub/src/models/electra/electra_tokenizer.py +17 -32
  64. keras_hub/src/models/f_net/__init__.py +1 -2
  65. keras_hub/src/models/f_net/f_net_masked_lm_preprocessor.py +12 -78
  66. keras_hub/src/models/f_net/{f_net_classifier.py → f_net_text_classifier.py} +17 -10
  67. keras_hub/src/models/f_net/{f_net_preprocessor.py → f_net_text_classifier_preprocessor.py} +19 -63
  68. keras_hub/src/models/f_net/f_net_tokenizer.py +17 -35
  69. keras_hub/src/models/falcon/__init__.py +1 -2
  70. keras_hub/src/models/falcon/falcon_causal_lm_preprocessor.py +6 -89
  71. keras_hub/src/models/falcon/falcon_tokenizer.py +12 -35
  72. keras_hub/src/models/gemma/__init__.py +1 -2
  73. keras_hub/src/models/gemma/gemma_causal_lm_preprocessor.py +6 -90
  74. keras_hub/src/models/gemma/gemma_decoder_block.py +1 -1
  75. keras_hub/src/models/gemma/gemma_tokenizer.py +12 -23
  76. keras_hub/src/models/gpt2/__init__.py +1 -2
  77. keras_hub/src/models/gpt2/gpt2_causal_lm_preprocessor.py +6 -89
  78. keras_hub/src/models/gpt2/gpt2_preprocessor.py +12 -90
  79. keras_hub/src/models/gpt2/gpt2_tokenizer.py +12 -34
  80. keras_hub/src/models/gpt_neo_x/gpt_neo_x_causal_lm_preprocessor.py +6 -91
  81. keras_hub/src/models/gpt_neo_x/gpt_neo_x_tokenizer.py +12 -34
  82. keras_hub/src/models/image_classifier.py +0 -5
  83. keras_hub/src/models/image_classifier_preprocessor.py +83 -0
  84. keras_hub/src/models/llama/__init__.py +1 -2
  85. keras_hub/src/models/llama/llama_causal_lm_preprocessor.py +6 -85
  86. keras_hub/src/models/llama/llama_tokenizer.py +12 -25
  87. keras_hub/src/models/llama3/__init__.py +1 -2
  88. keras_hub/src/models/llama3/llama3_causal_lm_preprocessor.py +6 -89
  89. keras_hub/src/models/llama3/llama3_tokenizer.py +12 -33
  90. keras_hub/src/models/masked_lm.py +0 -2
  91. keras_hub/src/models/masked_lm_preprocessor.py +156 -0
  92. keras_hub/src/models/mistral/__init__.py +1 -2
  93. keras_hub/src/models/mistral/mistral_causal_lm_preprocessor.py +6 -91
  94. keras_hub/src/models/mistral/mistral_tokenizer.py +12 -23
  95. keras_hub/src/models/mix_transformer/mix_transformer_backbone.py +2 -2
  96. keras_hub/src/models/mobilenet/__init__.py +13 -0
  97. keras_hub/src/models/mobilenet/mobilenet_backbone.py +530 -0
  98. keras_hub/src/models/mobilenet/mobilenet_image_classifier.py +114 -0
  99. keras_hub/src/models/opt/__init__.py +1 -2
  100. keras_hub/src/models/opt/opt_causal_lm_preprocessor.py +6 -93
  101. keras_hub/src/models/opt/opt_tokenizer.py +12 -41
  102. keras_hub/src/models/pali_gemma/__init__.py +1 -4
  103. keras_hub/src/models/pali_gemma/pali_gemma_causal_lm_preprocessor.py +28 -28
  104. keras_hub/src/models/pali_gemma/pali_gemma_image_converter.py +25 -0
  105. keras_hub/src/models/pali_gemma/pali_gemma_presets.py +5 -5
  106. keras_hub/src/models/pali_gemma/pali_gemma_tokenizer.py +11 -3
  107. keras_hub/src/models/phi3/__init__.py +1 -2
  108. keras_hub/src/models/phi3/phi3_causal_lm.py +3 -9
  109. keras_hub/src/models/phi3/phi3_causal_lm_preprocessor.py +6 -89
  110. keras_hub/src/models/phi3/phi3_tokenizer.py +12 -36
  111. keras_hub/src/models/preprocessor.py +72 -83
  112. keras_hub/src/models/resnet/__init__.py +6 -0
  113. keras_hub/src/models/resnet/resnet_backbone.py +390 -42
  114. keras_hub/src/models/resnet/resnet_image_classifier.py +33 -6
  115. keras_hub/src/models/resnet/resnet_image_classifier_preprocessor.py +28 -0
  116. keras_hub/src/models/{llama3/llama3_preprocessor.py → resnet/resnet_image_converter.py} +7 -5
  117. keras_hub/src/models/resnet/resnet_presets.py +95 -0
  118. keras_hub/src/models/retinanet/__init__.py +13 -0
  119. keras_hub/src/models/retinanet/anchor_generator.py +175 -0
  120. keras_hub/src/models/retinanet/box_matcher.py +259 -0
  121. keras_hub/src/models/retinanet/non_max_supression.py +578 -0
  122. keras_hub/src/models/roberta/__init__.py +1 -2
  123. keras_hub/src/models/roberta/roberta_masked_lm_preprocessor.py +22 -74
  124. keras_hub/src/models/roberta/{roberta_classifier.py → roberta_text_classifier.py} +16 -11
  125. keras_hub/src/models/roberta/{roberta_preprocessor.py → roberta_text_classifier_preprocessor.py} +21 -53
  126. keras_hub/src/models/roberta/roberta_tokenizer.py +13 -52
  127. keras_hub/src/models/seq_2_seq_lm_preprocessor.py +269 -0
  128. keras_hub/src/models/stable_diffusion_v3/__init__.py +13 -0
  129. keras_hub/src/models/stable_diffusion_v3/clip_encoder_block.py +103 -0
  130. keras_hub/src/models/stable_diffusion_v3/clip_preprocessor.py +93 -0
  131. keras_hub/src/models/stable_diffusion_v3/clip_text_encoder.py +149 -0
  132. keras_hub/src/models/stable_diffusion_v3/clip_tokenizer.py +167 -0
  133. keras_hub/src/models/stable_diffusion_v3/mmdit.py +427 -0
  134. keras_hub/src/models/stable_diffusion_v3/mmdit_block.py +317 -0
  135. keras_hub/src/models/stable_diffusion_v3/t5_xxl_preprocessor.py +74 -0
  136. keras_hub/src/models/stable_diffusion_v3/t5_xxl_text_encoder.py +155 -0
  137. keras_hub/src/models/stable_diffusion_v3/vae_attention.py +126 -0
  138. keras_hub/src/models/stable_diffusion_v3/vae_image_decoder.py +186 -0
  139. keras_hub/src/models/t5/__init__.py +1 -2
  140. keras_hub/src/models/t5/t5_tokenizer.py +13 -23
  141. keras_hub/src/models/task.py +71 -116
  142. keras_hub/src/models/{classifier.py → text_classifier.py} +19 -13
  143. keras_hub/src/models/text_classifier_preprocessor.py +138 -0
  144. keras_hub/src/models/whisper/__init__.py +1 -2
  145. keras_hub/src/models/whisper/{whisper_audio_feature_extractor.py → whisper_audio_converter.py} +20 -18
  146. keras_hub/src/models/whisper/whisper_backbone.py +0 -3
  147. keras_hub/src/models/whisper/whisper_presets.py +10 -10
  148. keras_hub/src/models/whisper/whisper_tokenizer.py +20 -16
  149. keras_hub/src/models/xlm_roberta/__init__.py +1 -4
  150. keras_hub/src/models/xlm_roberta/xlm_roberta_masked_lm_preprocessor.py +26 -72
  151. keras_hub/src/models/xlm_roberta/{xlm_roberta_classifier.py → xlm_roberta_text_classifier.py} +16 -11
  152. keras_hub/src/models/xlm_roberta/{xlm_roberta_preprocessor.py → xlm_roberta_text_classifier_preprocessor.py} +26 -53
  153. keras_hub/src/models/xlm_roberta/xlm_roberta_tokenizer.py +25 -10
  154. keras_hub/src/tests/test_case.py +46 -0
  155. keras_hub/src/tokenizers/byte_pair_tokenizer.py +30 -17
  156. keras_hub/src/tokenizers/byte_tokenizer.py +14 -15
  157. keras_hub/src/tokenizers/sentence_piece_tokenizer.py +20 -7
  158. keras_hub/src/tokenizers/tokenizer.py +67 -32
  159. keras_hub/src/tokenizers/unicode_codepoint_tokenizer.py +14 -15
  160. keras_hub/src/tokenizers/word_piece_tokenizer.py +34 -47
  161. keras_hub/src/utils/imagenet/__init__.py +13 -0
  162. keras_hub/src/utils/imagenet/imagenet_utils.py +1067 -0
  163. keras_hub/src/utils/keras_utils.py +0 -50
  164. keras_hub/src/utils/preset_utils.py +230 -68
  165. keras_hub/src/utils/tensor_utils.py +187 -69
  166. keras_hub/src/utils/timm/convert_resnet.py +19 -16
  167. keras_hub/src/utils/timm/preset_loader.py +66 -0
  168. keras_hub/src/utils/transformers/convert_albert.py +193 -0
  169. keras_hub/src/utils/transformers/convert_bart.py +373 -0
  170. keras_hub/src/utils/transformers/convert_bert.py +7 -17
  171. keras_hub/src/utils/transformers/convert_distilbert.py +10 -20
  172. keras_hub/src/utils/transformers/convert_gemma.py +5 -19
  173. keras_hub/src/utils/transformers/convert_gpt2.py +5 -18
  174. keras_hub/src/utils/transformers/convert_llama3.py +7 -18
  175. keras_hub/src/utils/transformers/convert_mistral.py +129 -0
  176. keras_hub/src/utils/transformers/convert_pali_gemma.py +7 -29
  177. keras_hub/src/utils/transformers/preset_loader.py +77 -0
  178. keras_hub/src/utils/transformers/safetensor_utils.py +2 -2
  179. keras_hub/src/version_utils.py +1 -1
  180. keras_hub_nightly-0.16.0.dev2024092017.dist-info/METADATA +202 -0
  181. keras_hub_nightly-0.16.0.dev2024092017.dist-info/RECORD +334 -0
  182. {keras_hub_nightly-0.15.0.dev20240823171555.dist-info → keras_hub_nightly-0.16.0.dev2024092017.dist-info}/WHEEL +1 -1
  183. keras_hub/src/models/bart/bart_preprocessor.py +0 -276
  184. keras_hub/src/models/bloom/bloom_preprocessor.py +0 -185
  185. keras_hub/src/models/electra/electra_preprocessor.py +0 -154
  186. keras_hub/src/models/falcon/falcon_preprocessor.py +0 -187
  187. keras_hub/src/models/gemma/gemma_preprocessor.py +0 -191
  188. keras_hub/src/models/gpt_neo_x/gpt_neo_x_preprocessor.py +0 -145
  189. keras_hub/src/models/llama/llama_preprocessor.py +0 -189
  190. keras_hub/src/models/mistral/mistral_preprocessor.py +0 -190
  191. keras_hub/src/models/opt/opt_preprocessor.py +0 -188
  192. keras_hub/src/models/phi3/phi3_preprocessor.py +0 -190
  193. keras_hub/src/models/whisper/whisper_preprocessor.py +0 -326
  194. keras_hub/src/utils/timm/convert.py +0 -37
  195. keras_hub/src/utils/transformers/convert.py +0 -101
  196. keras_hub_nightly-0.15.0.dev20240823171555.dist-info/METADATA +0 -34
  197. keras_hub_nightly-0.15.0.dev20240823171555.dist-info/RECORD +0 -297
  198. {keras_hub_nightly-0.15.0.dev20240823171555.dist-info → keras_hub_nightly-0.16.0.dev2024092017.dist-info}/top_level.txt +0 -0
@@ -1,191 +0,0 @@
1
- # Copyright 2024 The KerasHub Authors
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # https://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
-
16
- import keras
17
-
18
- from keras_hub.src.api_export import keras_hub_export
19
- from keras_hub.src.layers.preprocessing.start_end_packer import StartEndPacker
20
- from keras_hub.src.models.gemma.gemma_tokenizer import GemmaTokenizer
21
- from keras_hub.src.models.preprocessor import Preprocessor
22
- from keras_hub.src.utils.keras_utils import (
23
- convert_inputs_to_list_of_tensor_segments,
24
- )
25
-
26
-
27
- @keras_hub_export("keras_hub.models.GemmaPreprocessor")
28
- class GemmaPreprocessor(Preprocessor):
29
- """Gemma preprocessing layer which tokenizes and packs inputs.
30
-
31
- This preprocessing layer will do 2 things:
32
-
33
- - Tokenize the inputs using the `tokenizer`.
34
- - Construct a dictionary with keys `"token_ids"`, `"padding_mask"`, that can
35
- be passed directly to a `keras_hub.models.GemmaBackbone`.
36
-
37
- This layer can be used directly with `tf.data.Dataset.map` to preprocess
38
- string data in the `(x, y, sample_weight)` format used by
39
- `keras.Model.fit`.
40
-
41
- The call method of this layer accepts three arguments, `x`, `y`, and
42
- `sample_weight`. `x` can be a python string or tensor representing a single
43
- segment, a list of python strings representing a batch of single segments,
44
- or a list of tensors representing multiple segments to be packed together.
45
- `y` and `sample_weight` are both optional, can have any format, and will be
46
- passed through unaltered.
47
-
48
- `GemmaPreprocessor` expects the input to have only one segment, as Gemma is
49
- mainly used for generation tasks. For tasks having multi-segment inputs
50
- please combine inputs into a single string input before passing to the
51
- preprocessor layer.
52
-
53
- Args:
54
- tokenizer: A `keras_hub.models.GemmaTokenizer` instance.
55
- sequence_length: The length of the packed inputs.
56
- add_start_token: If `True`, the preprocessor will prepend the tokenizer
57
- start token to each input sequence.
58
- add_end_token: If `True`, the preprocessor will append the tokenizer
59
- end token to each input sequence.
60
-
61
- Call arguments:
62
- x: A string, `tf.Tensor` or list of python strings.
63
- y: Any label data. Will be passed through unaltered.
64
- sample_weight: Any label weight data. Will be passed through unaltered.
65
- sequence_length: Pass to override the configured `sequence_length` of
66
- the layer.
67
-
68
- Examples:
69
-
70
- Directly calling the layer on data.
71
- ```python
72
- preprocessor = keras_hub.models.GemmaPreprocessor.from_preset(
73
- "gemma_2b_en"
74
- )
75
-
76
- # Tokenize and pack a single sentence.
77
- preprocessor("The quick brown fox jumped.")
78
-
79
- # Tokenize a batch of sentences.
80
- preprocessor(["The quick brown fox jumped.", "Call me Ishmael."])
81
-
82
- # Custom vocabulary.
83
- bytes_io = io.BytesIO()
84
- ds = tf.data.Dataset.from_tensor_slices(["The quick brown fox jumped."])
85
- sentencepiece.SentencePieceTrainer.train(
86
- sentence_iterator=ds.as_numpy_iterator(),
87
- model_writer=bytes_io,
88
- vocab_size=8,
89
- model_type="WORD",
90
- pad_id=0,
91
- bos_id=1,
92
- eos_id=2,
93
- unk_id=3,
94
- pad_piece="<pad>",
95
- bos_piece="<bos>",
96
- eos_piece="<eos>",
97
- unk_piece="<unk>",
98
- )
99
- tokenizer = keras_hub.models.GemmaTokenizer(
100
- proto=bytes_io.getvalue(),
101
- )
102
- preprocessor = keras_hub.models.GemmaPreprocessor(tokenizer=tokenizer)
103
- preprocessor("The quick brown fox jumped.")
104
- ```
105
-
106
- Apply preprocessing to a `tf.data.Dataset`.
107
- ```python
108
- preprocessor = keras_hub.models.GemmaPreprocessor.from_preset(
109
- "gemma_2b_en"
110
- )
111
-
112
- text = tf.constant(["The quick brown fox jumped.", "Call me Ishmael."])
113
- label = tf.constant([1, 1])
114
-
115
- # Map labeled single sentences.
116
- ds = tf.data.Dataset.from_tensor_slices((text, label))
117
- ds = ds.map(preprocessor, num_parallel_calls=tf.data.AUTOTUNE)
118
-
119
- # Map unlabeled single sentences.
120
- ds = tf.data.Dataset.from_tensor_slices(text)
121
- ds = ds.map(preprocessor, num_parallel_calls=tf.data.AUTOTUNE)
122
- ```
123
- """
124
-
125
- tokenizer_cls = GemmaTokenizer
126
-
127
- def __init__(
128
- self,
129
- tokenizer,
130
- sequence_length=1024,
131
- add_start_token=True,
132
- add_end_token=True,
133
- **kwargs,
134
- ):
135
- super().__init__(**kwargs)
136
-
137
- self.tokenizer = tokenizer
138
- self.sequence_length = sequence_length
139
- self.add_start_token = add_start_token
140
- self.add_end_token = add_end_token
141
-
142
- def build(self, input_shape):
143
- # Defer packer creation to `build()` so that we can be sure tokenizer
144
- # assets have loaded when restoring a saved model.
145
- self.packer = StartEndPacker(
146
- start_value=self.tokenizer.start_token_id,
147
- end_value=self.tokenizer.end_token_id,
148
- pad_value=self.tokenizer.pad_token_id,
149
- sequence_length=self.sequence_length,
150
- return_padding_mask=True,
151
- )
152
- self.built = True
153
-
154
- def call(
155
- self,
156
- x,
157
- y=None,
158
- sample_weight=None,
159
- sequence_length=None,
160
- ):
161
- x = convert_inputs_to_list_of_tensor_segments(x)
162
- if len(x) != 1:
163
- raise ValueError(
164
- "GemmaPreprocessor requires each input to contain only "
165
- f"one segment, but received {len(x)}. If you are using Gemma "
166
- "for a multi-segment classification task, please combine your "
167
- "input into a single string."
168
- )
169
- sequence_length = sequence_length or self.sequence_length
170
- token_ids, padding_mask = self.packer(
171
- self.tokenizer(x[0]),
172
- sequence_length=sequence_length,
173
- add_start_value=self.add_start_token,
174
- add_end_value=self.add_end_token,
175
- )
176
- x = {
177
- "token_ids": token_ids,
178
- "padding_mask": padding_mask,
179
- }
180
- return keras.utils.pack_x_y_sample_weight(x, y, sample_weight)
181
-
182
- def get_config(self):
183
- config = super().get_config()
184
- config.update(
185
- {
186
- "sequence_length": self.sequence_length,
187
- "add_start_token": self.add_start_token,
188
- "add_end_token": self.add_end_token,
189
- }
190
- )
191
- return config
@@ -1,145 +0,0 @@
1
- # Copyright 2024 The KerasHub Authors
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # https://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- import keras
16
-
17
- from keras_hub.src.api_export import keras_hub_export
18
- from keras_hub.src.layers.preprocessing.start_end_packer import StartEndPacker
19
- from keras_hub.src.models.gpt_neo_x.gpt_neo_x_tokenizer import GPTNeoXTokenizer
20
- from keras_hub.src.models.preprocessor import Preprocessor
21
- from keras_hub.src.utils.keras_utils import (
22
- convert_inputs_to_list_of_tensor_segments,
23
- )
24
-
25
-
26
- @keras_hub_export("keras_hub.models.GPTNeoXPreprocessor")
27
- class GPTNeoXPreprocessor(Preprocessor):
28
- """GPTNeoX preprocessing layer which tokenizes and packs inputs.
29
-
30
- This preprocessing layer will do 2 things:
31
-
32
- - Tokenize the inputs using the `tokenizer`.
33
- - Construct a dictionary with keys `"token_ids"`, `"padding_mask"`, that can
34
- be passed directly to a `keras_hub.models.GPTNeoXBackbone`.
35
-
36
- This layer can be used directly with `tf.data.Dataset.map` to preprocess
37
- string data in the `(x, y, sample_weight)` format used by
38
- `keras.Model.fit`.
39
-
40
- The call method of this layer accepts three arguments, `x`, `y`, and
41
- `sample_weight`. `x` can be a python string or tensor representing a single
42
- segment, a list of python strings representing a batch of single segments,
43
- or a list of tensors representing multiple segments to be packed together.
44
- `y` and `sample_weight` are both optional, can have any format, and will be
45
- passed through unaltered.
46
-
47
- `GPTNeoXPreprocessor` forces the input to have only one segment, as GPTNeoX is
48
- mainly used for generation tasks. For tasks having multi-segment inputs
49
- like "glue/mnli", please use a model designed for classification purposes
50
- such as BERT or RoBERTa.
51
-
52
- Args:
53
- tokenizer: A `keras_hub.models.GPTNeoXTokenizer` instance.
54
- sequence_length: The length of the packed inputs.
55
- add_start_token: If `True`, the preprocessor will prepend the tokenizer
56
- start token to each input sequence.
57
- add_end_token: If `True`, the preprocessor will append the tokenizer
58
- end token to each input sequence.
59
-
60
- Call arguments:
61
- x: A string, `tf.Tensor` or list of python strings.
62
- y: Any label data. Will be passed through unaltered.
63
- sample_weight: Any label weight data. Will be passed through unaltered.
64
- sequence_length: Pass to override the configured `sequence_length` of
65
- the layer.
66
- """
67
-
68
- tokenizer_cls = GPTNeoXTokenizer
69
-
70
- def __init__(
71
- self,
72
- tokenizer,
73
- sequence_length=1024,
74
- add_start_token=True,
75
- add_end_token=True,
76
- **kwargs,
77
- ):
78
- super().__init__(**kwargs)
79
- self.tokenizer = tokenizer
80
- self.packer = None
81
- self.sequence_length = sequence_length
82
- self.add_start_token = add_start_token
83
- self.add_end_token = add_end_token
84
-
85
- def build(self, input_shape):
86
- # Defer packer creation to `build()` so that we can be sure tokenizer
87
- # assets have loaded when restoring a saved model.
88
- self.packer = StartEndPacker(
89
- start_value=self.tokenizer.start_token_id,
90
- end_value=self.tokenizer.end_token_id,
91
- pad_value=self.tokenizer.pad_token_id,
92
- sequence_length=self.sequence_length,
93
- return_padding_mask=True,
94
- )
95
- self.built = True
96
-
97
- def call(
98
- self,
99
- x,
100
- y=None,
101
- sample_weight=None,
102
- sequence_length=None,
103
- ):
104
- x = convert_inputs_to_list_of_tensor_segments(x)
105
- if len(x) != 1:
106
- raise ValueError(
107
- "GPTNeoX requires each input feature to contain only "
108
- f"one segment, but received {len(x)}. If you are using GPTNeoX "
109
- "for a multi-segment classification task, please refer to "
110
- "classification models like BERT or RoBERTa."
111
- )
112
- sequence_length = sequence_length or self.sequence_length
113
- token_ids, padding_mask = self.packer(
114
- self.tokenizer(x[0]),
115
- sequence_length=sequence_length,
116
- add_start_value=self.add_start_token,
117
- add_end_value=self.add_end_token,
118
- )
119
- x = {
120
- "token_ids": token_ids,
121
- "padding_mask": padding_mask,
122
- }
123
- return keras.utils.pack_x_y_sample_weight(x, y, sample_weight)
124
-
125
- def get_config(self):
126
- config = super().get_config()
127
- config.update(
128
- {
129
- "sequence_length": self.sequence_length,
130
- "add_start_token": self.add_start_token,
131
- "add_end_token": self.add_end_token,
132
- }
133
- )
134
- return config
135
-
136
- @property
137
- def sequence_length(self):
138
- """The padded length of model input sequences."""
139
- return self._sequence_length
140
-
141
- @sequence_length.setter
142
- def sequence_length(self, value):
143
- self._sequence_length = value
144
- if self.packer is not None:
145
- self.packer.sequence_length = value
@@ -1,189 +0,0 @@
1
- # Copyright 2024 The KerasHub Authors
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # https://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
- import keras
15
-
16
- from keras_hub.src.api_export import keras_hub_export
17
- from keras_hub.src.layers.preprocessing.start_end_packer import StartEndPacker
18
- from keras_hub.src.models.llama.llama_tokenizer import LlamaTokenizer
19
- from keras_hub.src.models.preprocessor import Preprocessor
20
- from keras_hub.src.utils.keras_utils import (
21
- convert_inputs_to_list_of_tensor_segments,
22
- )
23
-
24
-
25
- @keras_hub_export("keras_hub.models.LlamaPreprocessor")
26
- class LlamaPreprocessor(Preprocessor):
27
- """A Llama preprocessing layer which tokenizes and packs inputs.
28
-
29
- This preprocessing layer will do three things:
30
-
31
- 1. Tokenize any number of input segments using the `tokenizer`.
32
- 2. Pack the inputs together using a `keras_hub.layers.StartEndPacker`.
33
- with the appropriate tokens.
34
- 3. Construct a dictionary with keys `"token_ids"`, and `"padding_mask"`
35
- that can be passed directly to `keras_hub.models.LlamaBackbone`.
36
-
37
- This layer can be used directly with `tf.data.Dataset.map` to preprocess
38
- string data in the `(x, y, sample_weight)` format used by
39
- `keras.Model.fit`.
40
-
41
- Args:
42
- tokenizer: A `keras_hub.models.LlamaTokenizer` instance.
43
- sequence_length: The length of the packed inputs.
44
- add_start_token: If `True`, the preprocessor will prepend the tokenizer
45
- start token to each input sequence. Default is `True`.
46
- add_end_token: If `True`, the preprocessor will append the tokenizer
47
- end token to each input sequence. Default is `False`.
48
-
49
- Call arguments:
50
- x: A tensor of single string sequences, or a tuple of multiple
51
- tensor sequences to be packed together. Inputs may be batched or
52
- unbatched. For single sequences, raw python inputs will be converted
53
- to tensors. For multiple sequences, pass tensors directly.
54
- y: Any label data. Will be passed through unaltered.
55
- sample_weight: Any label weight data. Will be passed through unaltered.
56
- sequence_length: Pass to override the configured `sequence_length` of
57
- the layer.
58
-
59
- Examples:
60
-
61
- Directly calling the from_preset().
62
- ```python
63
- preprocessor = keras_hub.models.LlamaPreprocessor.from_preset(
64
- "llama_base_en"
65
- )
66
-
67
- # Tokenize and pack a single sentence.
68
- preprocessor("The quick brown fox jumped.")
69
-
70
- # Tokenize and a batch of single sentences.
71
- preprocessor(["The quick brown fox jumped.", "Call me Ishmael."])
72
-
73
- # Preprocess a batch of sentence pairs.
74
- # When handling multiple sequences, always convert to tensors first!
75
- first = tf.constant(["The quick brown fox jumped.", "Call me Ishmael."])
76
- second = tf.constant(["The fox tripped.", "Oh look, a whale."])
77
- preprocessor((first, second))
78
- ```
79
-
80
- Mapping with `tf.data.Dataset`.
81
- ```python
82
- preprocessor = keras_hub.models.LlamaPreprocessor.from_preset(
83
- "llama_base_en"
84
- )
85
- first = tf.constant(["The quick brown fox jumped.", "Call me Ishmael."])
86
- second = tf.constant(["The fox tripped.", "Oh look, a whale."])
87
- label = tf.constant([1, 1])
88
-
89
- # Map labeled single sentences.
90
- ds = tf.data.Dataset.from_tensor_slices((first, label))
91
- ds = ds.map(preprocessor, num_parallel_calls=tf.data.AUTOTUNE)
92
-
93
- # Map unlabeled single sentences.
94
- ds = tf.data.Dataset.from_tensor_slices(first)
95
- ds = ds.map(preprocessor, num_parallel_calls=tf.data.AUTOTUNE)
96
-
97
- # Map labeled sentence pairs.
98
- ds = tf.data.Dataset.from_tensor_slices(((first, second), label))
99
- ds = ds.map(preprocessor, num_parallel_calls=tf.data.AUTOTUNE)
100
-
101
- # Map unlabeled sentence pairs.
102
- ds = tf.data.Dataset.from_tensor_slices((first, second))
103
-
104
- # Watch out for tf.data's default unpacking of tuples here!
105
- # Best to invoke the `preprocessor` directly in this case.
106
- ds = ds.map(
107
- lambda first, second: preprocessor(x=(first, second)),
108
- num_parallel_calls=tf.data.AUTOTUNE,
109
- )
110
- ```
111
- """
112
-
113
- tokenizer_cls = LlamaTokenizer
114
-
115
- def __init__(
116
- self,
117
- tokenizer,
118
- sequence_length=1024,
119
- add_start_token=True,
120
- add_end_token=False,
121
- **kwargs,
122
- ):
123
- super().__init__(**kwargs)
124
- self.tokenizer = tokenizer
125
- self.packer = None
126
- self.add_start_token = add_start_token
127
- self.add_end_token = add_end_token
128
- self.sequence_length = sequence_length
129
-
130
- def build(self, input_shape):
131
- # Defer packer creation to `build()` so that we can be sure tokenizer
132
- # assets have loaded when restoring a saved model.
133
- self.packer = StartEndPacker(
134
- start_value=self.tokenizer.start_token_id,
135
- end_value=self.tokenizer.end_token_id,
136
- sequence_length=self.sequence_length,
137
- return_padding_mask=True,
138
- )
139
- self.built = True
140
-
141
- def get_config(self):
142
- config = super().get_config()
143
- config.update(
144
- {
145
- "sequence_length": self.sequence_length,
146
- "add_start_token": self.add_start_token,
147
- "add_end_token": self.add_end_token,
148
- }
149
- )
150
- return config
151
-
152
- def call(
153
- self,
154
- x,
155
- y=None,
156
- sample_weight=None,
157
- sequence_length=None,
158
- ):
159
- x = convert_inputs_to_list_of_tensor_segments(x)
160
- if len(x) != 1:
161
- raise ValueError(
162
- "Llama requires each input feature to contain only "
163
- f"one segment, but received {len(x)}. If you are using Llama"
164
- " for a multi-segment classification task, please refer to "
165
- "classification models like BERT or RoBERTa."
166
- )
167
- sequence_length = sequence_length or self.sequence_length
168
- token_ids, padding_mask = self.packer(
169
- self.tokenizer(x[0]),
170
- sequence_length=sequence_length,
171
- add_start_value=self.add_start_token,
172
- add_end_value=self.add_end_token,
173
- )
174
- x = {
175
- "token_ids": token_ids,
176
- "padding_mask": padding_mask,
177
- }
178
- return keras.utils.pack_x_y_sample_weight(x, y, sample_weight)
179
-
180
- @property
181
- def sequence_length(self):
182
- """The padded length of model input sequences."""
183
- return self._sequence_length
184
-
185
- @sequence_length.setter
186
- def sequence_length(self, value):
187
- self._sequence_length = value
188
- if self.packer is not None:
189
- self.packer.sequence_length = value