keras-hub-nightly 0.21.0.dev202505050407__py3-none-any.whl → 0.21.0.dev202505070407__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (40) hide show
  1. keras_hub/models/__init__.py +21 -0
  2. keras_hub/src/models/backbone.py +5 -2
  3. keras_hub/src/models/cspnet/cspnet_backbone.py +51 -26
  4. keras_hub/src/models/cspnet/cspnet_presets.py +38 -3
  5. keras_hub/src/models/mixtral/mixtral_attention.py +263 -0
  6. keras_hub/src/models/mixtral/mixtral_backbone.py +207 -0
  7. keras_hub/src/models/mixtral/mixtral_causal_lm.py +281 -0
  8. keras_hub/src/models/mixtral/mixtral_causal_lm_preprocessor.py +76 -0
  9. keras_hub/src/models/mixtral/mixtral_decoder.py +494 -0
  10. keras_hub/src/models/mixtral/mixtral_layer_norm.py +34 -0
  11. keras_hub/src/models/mixtral/mixtral_tokenizer.py +21 -0
  12. keras_hub/src/models/qwen/qwen_attention.py +3 -1
  13. keras_hub/src/models/qwen/qwen_presets.py +61 -0
  14. keras_hub/src/models/qwen_moe/__init__.py +0 -0
  15. keras_hub/src/models/qwen_moe/qwen_moe_attention.py +377 -0
  16. keras_hub/src/models/qwen_moe/qwen_moe_backbone.py +373 -0
  17. keras_hub/src/models/qwen_moe/qwen_moe_causal_lm.py +350 -0
  18. keras_hub/src/models/qwen_moe/qwen_moe_causal_lm_preprocessor.py +17 -0
  19. keras_hub/src/models/qwen_moe/qwen_moe_decoder.py +625 -0
  20. keras_hub/src/models/qwen_moe/qwen_moe_layernorm.py +32 -0
  21. keras_hub/src/models/qwen_moe/qwen_moe_tokenizer.py +46 -0
  22. keras_hub/src/models/retinanet/retinanet_image_converter.py +0 -13
  23. keras_hub/src/models/retinanet/retinanet_presets.py +2 -2
  24. keras_hub/src/models/segformer/segformer_image_segmenter_preprocessor.py +0 -18
  25. keras_hub/src/models/segformer/segformer_presets.py +12 -12
  26. keras_hub/src/models/task.py +5 -2
  27. keras_hub/src/utils/keras_utils.py +11 -0
  28. keras_hub/src/utils/preset_utils.py +69 -9
  29. keras_hub/src/utils/tensor_utils.py +27 -1
  30. keras_hub/src/utils/timm/convert_cspnet.py +94 -23
  31. keras_hub/src/utils/timm/preset_loader.py +6 -6
  32. keras_hub/src/utils/transformers/convert_mixtral.py +139 -0
  33. keras_hub/src/utils/transformers/convert_qwen_moe.py +253 -0
  34. keras_hub/src/utils/transformers/preset_loader.py +6 -0
  35. keras_hub/src/version.py +1 -1
  36. keras_hub/tokenizers/__init__.py +6 -0
  37. {keras_hub_nightly-0.21.0.dev202505050407.dist-info → keras_hub_nightly-0.21.0.dev202505070407.dist-info}/METADATA +1 -1
  38. {keras_hub_nightly-0.21.0.dev202505050407.dist-info → keras_hub_nightly-0.21.0.dev202505070407.dist-info}/RECORD +40 -22
  39. {keras_hub_nightly-0.21.0.dev202505050407.dist-info → keras_hub_nightly-0.21.0.dev202505070407.dist-info}/WHEEL +0 -0
  40. {keras_hub_nightly-0.21.0.dev202505050407.dist-info → keras_hub_nightly-0.21.0.dev202505070407.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,350 @@
1
+ import keras
2
+ from keras import ops
3
+
4
+ from keras_hub.src.api_export import keras_hub_export
5
+ from keras_hub.src.models.causal_lm import CausalLM
6
+ from keras_hub.src.models.qwen_moe.qwen_moe_backbone import QwenMoeBackbone
7
+ from keras_hub.src.models.qwen_moe.qwen_moe_causal_lm_preprocessor import (
8
+ QwenMoeCausalLMPreprocessor,
9
+ )
10
+ from keras_hub.src.utils.tensor_utils import any_equal
11
+
12
+
13
+ @keras_hub_export(
14
+ "keras_hub.models.QwenMoeCausalLM",
15
+ )
16
+ class QwenMoeCausalLM(CausalLM):
17
+ """An end-to-end Qwen MoE model for causal language modeling.
18
+
19
+ A causal language model (LM) predicts the next token based on previous
20
+ tokens. This task setup can be used to train the model unsupervised on plain
21
+ text input, or to autoregressively generate plain text similar to the data
22
+ used for training. This task can be used for pre-training or fine-tuning a
23
+ Qwen MoE model, simply by calling `fit()`.
24
+
25
+ This model has a `generate()` method, which generates text based on a
26
+ prompt. The generation strategy used is controlled by an additional
27
+ `sampler` argument on `compile()`. You can recompile the model with
28
+ different `keras_hub.samplers` objects to control the generation.
29
+ By default, `"greedy"` sampling will be used.
30
+
31
+ This model can optionally be configured with a `preprocessor` layer, in
32
+ which case it will automatically apply preprocessing to string inputs during
33
+ `fit()`, `predict()`, `evaluate()`, and `generate()`. This is done by
34
+ default when creating the model with `from_preset()`.
35
+
36
+ The Qwen MoE architecture leverages a Mixture of Experts (MoE) design, where
37
+ each transformer layer uses a sparse set of experts to process tokens
38
+ efficiently, making it suitable for large-scale language tasks with
39
+ optimized computational resources.
40
+
41
+ Args:
42
+ backbone: A `keras_hub.models.QwenMoeBackbone` instance.
43
+ preprocessor: A `keras_hub.models.QwenMoeCausalLMPreprocessor` or
44
+ `None`. If `None`, this model will not apply preprocessing, and
45
+ inputs should be preprocessed before calling the model.
46
+
47
+ Examples:
48
+
49
+ Use `generate()` to do text generation.
50
+ ```python
51
+ qwen_moe_lm = keras_hub.models.QwenMoeCausalLM.from_preset("qwen_moe_a2_7b")
52
+ qwen_moe_lm.generate("I want to say", max_length=30)
53
+
54
+ # Generate with batched prompts.
55
+ qwen_moe_lm.generate(["This is a", "Where are you"], max_length=30)
56
+ ```
57
+
58
+ Compile the `generate()` function with a custom sampler.
59
+ ```python
60
+ qwen_moe_lm = keras_hub.models.QwenMoeCausalLM.from_preset("qwen_moe_a2_7b")
61
+ qwen_moe_lm.compile(sampler="top_k")
62
+ qwen_moe_lm.generate("I want to say", max_length=30)
63
+
64
+ qwen_moe_lm.compile(sampler=keras_hub.samplers.BeamSampler(num_beams=2))
65
+ qwen_moe_lm.generate("I want to say", max_length=30)
66
+ ```
67
+
68
+ Use `generate()` without preprocessing.
69
+ ```python
70
+ prompt = {
71
+ # Token ids for "<bos> Qwen is".
72
+ "token_ids": np.array([[2, 12345, 678, 0, 0, 0, 0]] * 2),
73
+ # Use `"padding_mask"` to indicate values that should not be overridden.
74
+ "padding_mask": np.array([[1, 1, 1, 0, 0, 0, 0]] * 2),
75
+ }
76
+
77
+ qwen_moe_lm = keras_hub.models.QwenMoeCausalLM.from_preset(
78
+ "qwen_moe_a2_7b",
79
+ preprocessor=None,
80
+ )
81
+ qwen_moe_lm.generate(prompt)
82
+ ```
83
+
84
+ Call `fit()` on a single batch.
85
+ ```python
86
+ features = ["The quick brown fox jumped.", "I forgot my homework."]
87
+ qwen_moe_lm = keras_hub.models.QwenMoeCausalLM.from_preset("qwen_moe_a2_7b")
88
+ qwen_moe_lm.fit(x=features, batch_size=2)
89
+ ```
90
+
91
+ Call `fit()` with LoRA fine-tuning enabled.
92
+ ```python
93
+ features = ["The quick brown fox jumped.", "I forgot my homework."]
94
+ qwen_moe_lm = keras_hub.models.QwenMoeCausalLM.from_preset("qwen_moe_a2_7b")
95
+ qwen_moe_lm.backbone.enable_lora(rank=4)
96
+ qwen_moe_lm.fit(x=features, batch_size=2)
97
+ ```
98
+
99
+ Call `fit()` without preprocessing.
100
+ ```python
101
+ x = {
102
+ # Token ids for "<bos> Qwen is a language model<eos>"
103
+ "token_ids": np.array([[2, 12345, 678, 543, 9876, 1, 0, 0]] * 2),
104
+ "padding_mask": np.array([[1, 1, 1, 1, 1, 1, 0, 0]] * 2),
105
+ }
106
+ y = np.array([[12345, 678, 543, 9876, 1, 0, 0, 0]] * 2)
107
+ sw = np.array([[1, 1, 1, 1, 1, 0, 0, 0]] * 2)
108
+
109
+ qwen_moe_lm = keras_hub.models.QwenMoeCausalLM.from_preset(
110
+ "qwen_moe_a2_7b",
111
+ preprocessor=None,
112
+ )
113
+ qwen_moe_lm.fit(x=x, y=y, sample_weight=sw, batch_size=2)
114
+ ```
115
+
116
+ Custom backbone and vocabulary.
117
+ ```python
118
+ tokenizer = keras_hub.models.QwenMoeTokenizer(
119
+ proto="qwen_moe_vocab.spm",
120
+ )
121
+ preprocessor = keras_hub.models.QwenMoeCausalLMPreprocessor(
122
+ tokenizer=tokenizer,
123
+ sequence_length=128,
124
+ )
125
+ backbone = keras_hub.models.QwenMoeBackbone(
126
+ vocabulary_size=151936,
127
+ num_layers=28,
128
+ num_query_heads=16,
129
+ num_key_value_heads=8,
130
+ hidden_dim=2048,
131
+ intermediate_dim=4096,
132
+ moe_intermediate_dim=128,
133
+ shared_expert_intermediate_dim=4096,
134
+ num_experts=60,
135
+ top_k=4,
136
+ max_sequence_length=4096,
137
+ )
138
+ qwen_moe_lm = keras_hub.models.QwenMoeCausalLM(
139
+ backbone=backbone,
140
+ preprocessor=preprocessor,
141
+ )
142
+ qwen_moe_lm.fit(x=features, batch_size=2)
143
+ ```
144
+ """
145
+
146
+ backbone_cls = QwenMoeBackbone
147
+ preprocessor_cls = QwenMoeCausalLMPreprocessor
148
+
149
+ def __init__(self, backbone, preprocessor=None, **kwargs):
150
+ # === Layers ===
151
+ self.backbone = backbone
152
+ self.preprocessor = preprocessor
153
+
154
+ # === Functional Model ===
155
+ # This must be "backbone.input" i.e. the full input structure,
156
+ # rather than "backbone.inputs" which is the flattened list of inputs.
157
+ inputs = backbone.input
158
+ hidden_states = backbone(inputs)
159
+ outputs = backbone.token_embedding(hidden_states, reverse=True)
160
+ super().__init__(
161
+ inputs=inputs,
162
+ outputs=outputs,
163
+ **kwargs,
164
+ )
165
+
166
+ def call_with_cache(
167
+ self,
168
+ token_ids,
169
+ cache,
170
+ cache_update_index,
171
+ ):
172
+ """Forward pass of `QwenMoeCausalLM` with cache.
173
+
174
+ `call_with_cache` adds an additional forward pass for the model for
175
+ autoregressive inference. Unlike calling the model directly, this method
176
+ allows caching previous key/value Tensors in multi-head attention layer,
177
+ and avoids recomputing the outputs of seen tokens.
178
+
179
+ Args:
180
+ token_ids: a dense int Tensor with shape `(batch_size, max_length)`.
181
+ cache: a dense float Tensor, the cache of key and value.
182
+ cache_update_index: int, or int Tensor. The index of current inputs
183
+ in the whole sequence.
184
+
185
+ Returns:
186
+ A (logits, hidden_states, cache) tuple. Where `logits` is the
187
+ language model logits for the input token_ids, `hidden_states` is
188
+ the final hidden representation of the input tokens, and `cache` is
189
+ the decoding cache.
190
+ """
191
+ x = self.backbone.token_embedding(token_ids)
192
+ # Each decoder layer has a cache; we update them separately.
193
+ updated_cache = []
194
+ for i in range(self.backbone.num_layers):
195
+ current_cache = cache[:, i, ...]
196
+ x, next_cache = self.backbone.transformer_layers[i](
197
+ x,
198
+ self_attention_cache=current_cache,
199
+ self_attention_cache_update_index=cache_update_index,
200
+ )
201
+ updated_cache.append(next_cache)
202
+ cache = ops.stack(updated_cache, axis=1)
203
+ hidden_states = x = self.backbone.layer_norm(x)
204
+ logits = self.backbone.token_embedding(x, reverse=True)
205
+ return logits, hidden_states, cache
206
+
207
+ def _build_cache(self, token_ids):
208
+ """Build an empty cache for use with `call_with_cache()`."""
209
+ batch_size = ops.shape(token_ids)[0]
210
+ max_length = ops.shape(token_ids)[1]
211
+ num_layers = self.backbone.num_layers
212
+ num_key_value_heads = self.backbone.num_key_value_heads
213
+ head_dim = self.backbone.hidden_dim // self.backbone.num_query_heads
214
+ shape = [
215
+ batch_size,
216
+ num_layers,
217
+ 2,
218
+ max_length,
219
+ num_key_value_heads,
220
+ head_dim,
221
+ ]
222
+ cache = ops.zeros(shape, dtype=self.compute_dtype)
223
+ # Seed the cache.
224
+ _, hidden_states, cache = self.call_with_cache(token_ids, cache, 0)
225
+ return hidden_states, cache
226
+
227
+ def generate_step(
228
+ self,
229
+ inputs,
230
+ stop_token_ids=None,
231
+ ):
232
+ """A compilable generation function for a single batch of inputs.
233
+
234
+ This function represents the inner, XLA-compilable, generation function
235
+ for a single batch of inputs. Inputs should have the same structure as
236
+ model inputs, a dictionary with keys `"token_ids"` and `"padding_mask"`.
237
+
238
+ Args:
239
+ inputs: A dictionary with two keys `"token_ids"` and
240
+ `"padding_mask"` and batched tensor values.
241
+ stop_token_ids: Tuple of id's of the end token to stop on. If all
242
+ sequences have produced a new stop token, generation
243
+ will stop.
244
+ """
245
+ token_ids, padding_mask = inputs["token_ids"], inputs["padding_mask"]
246
+ # Create and seed cache with a single forward pass.
247
+ hidden_states, cache = self._build_cache(token_ids)
248
+ # Compute the lengths of all user inputted tokens ids.
249
+ row_lengths = ops.sum(ops.cast(padding_mask, "int32"), axis=-1)
250
+ # Start at the first index that has no user inputted id.
251
+ index = ops.min(row_lengths)
252
+
253
+ def next(prompt, cache, index):
254
+ # The cache index is the index of our previous token.
255
+ cache_update_index = index - 1
256
+ batch_size = ops.shape(prompt)[0]
257
+ prompt = ops.slice(prompt, [0, cache_update_index], [batch_size, 1])
258
+ logits, hidden_states, cache = self.call_with_cache(
259
+ prompt,
260
+ cache,
261
+ cache_update_index,
262
+ )
263
+ return (
264
+ ops.squeeze(logits, axis=1),
265
+ ops.squeeze(hidden_states, axis=1),
266
+ cache,
267
+ )
268
+
269
+ token_ids = self.sampler(
270
+ next=next,
271
+ prompt=token_ids,
272
+ cache=cache,
273
+ index=index,
274
+ mask=padding_mask,
275
+ stop_token_ids=stop_token_ids,
276
+ hidden_states=hidden_states,
277
+ model=self,
278
+ )
279
+
280
+ # Compute an output padding mask with the token ids we updated.
281
+ if stop_token_ids is not None:
282
+ # Build a mask of stop token locations not in the original
283
+ # prompt (not in locations where `padding_mask` is True).
284
+ end_locations = any_equal(
285
+ token_ids, stop_token_ids, ops.logical_not(padding_mask)
286
+ )
287
+ end_locations = ops.cast(end_locations, "int32")
288
+ # Use cumsum to get ones in all locations after end_locations.
289
+ cumsum = ops.cast(ops.cumsum(end_locations, axis=-1), "int32")
290
+ overflow = cumsum - end_locations
291
+ # Our padding mask is the inverse of these overflow locations.
292
+ padding_mask = ops.logical_not(ops.cast(overflow, "bool"))
293
+ else:
294
+ # Without early stopping, all locations will have been updated.
295
+ padding_mask = ops.ones_like(token_ids, dtype="bool")
296
+ return {
297
+ "token_ids": token_ids,
298
+ "padding_mask": padding_mask,
299
+ }
300
+
301
+ def score(
302
+ self,
303
+ token_ids,
304
+ padding_mask=None,
305
+ scoring_mode="logits",
306
+ layer_intercept_fn=None,
307
+ target_ids=None,
308
+ ):
309
+ if scoring_mode not in ("logits", "loss"):
310
+ raise ValueError(
311
+ "Unsupported scoring_mode. Must be one of 'logits' or 'loss'."
312
+ )
313
+
314
+ if scoring_mode == "loss" and target_ids is None:
315
+ raise ValueError(
316
+ "Cannot compute loss without targets. Please provide target "
317
+ "token ids via the target_ids parameter."
318
+ )
319
+
320
+ batch_shape = ops.shape(token_ids)[:2]
321
+ assert len(batch_shape) == 2
322
+
323
+ if padding_mask is None:
324
+ padding_mask = ops.ones(shape=batch_shape)
325
+
326
+ if layer_intercept_fn is None:
327
+
328
+ def default_layer_intercept_fn(x, unused_i):
329
+ return x
330
+
331
+ layer_intercept_fn = default_layer_intercept_fn
332
+
333
+ token_embeddings = self.backbone.token_embedding(token_ids)
334
+ x = layer_intercept_fn(token_embeddings, -1)
335
+
336
+ for i, transformer_layer in enumerate(self.backbone.transformer_layers):
337
+ x = transformer_layer(x, decoder_padding_mask=padding_mask)
338
+ x = layer_intercept_fn(x, i)
339
+
340
+ x = self.backbone.layer_norm(x)
341
+ logits = self.backbone.token_embedding(x, reverse=True)
342
+
343
+ if scoring_mode == "logits":
344
+ return logits
345
+
346
+ per_token_loss_fn = keras.losses.SparseCategoricalCrossentropy(
347
+ from_logits=True, reduction="none"
348
+ )
349
+ per_token_loss = per_token_loss_fn(target_ids, logits)
350
+ return per_token_loss
@@ -0,0 +1,17 @@
1
+ from keras_hub.src.api_export import keras_hub_export
2
+ from keras_hub.src.models.causal_lm_preprocessor import CausalLMPreprocessor
3
+ from keras_hub.src.models.qwen_moe.qwen_moe_backbone import QwenMoeBackbone
4
+ from keras_hub.src.models.qwen_moe.qwen_moe_tokenizer import QwenMoeTokenizer
5
+
6
+
7
+ @keras_hub_export(
8
+ [
9
+ "keras_hub.models.QwenMoeCausalLMPreprocessor",
10
+ ]
11
+ )
12
+ class QwenMoeCausalLMPreprocessor(CausalLMPreprocessor):
13
+ backbone_cls = QwenMoeBackbone
14
+ tokenizer_cls = QwenMoeTokenizer
15
+
16
+ def __init__(self, *args, **kwargs):
17
+ super().__init__(*args, **kwargs)