xinference 0.13.2__py3-none-any.whl → 0.13.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of xinference might be problematic. Click here for more details.

Files changed (78) hide show
  1. xinference/__init__.py +0 -1
  2. xinference/_version.py +3 -3
  3. xinference/api/restful_api.py +26 -4
  4. xinference/client/restful/restful_client.py +16 -1
  5. xinference/core/chat_interface.py +2 -2
  6. xinference/core/model.py +8 -3
  7. xinference/core/scheduler.py +4 -4
  8. xinference/model/audio/core.py +5 -2
  9. xinference/model/audio/cosyvoice.py +136 -0
  10. xinference/model/audio/model_spec.json +24 -0
  11. xinference/model/audio/model_spec_modelscope.json +27 -0
  12. xinference/model/flexible/launchers/__init__.py +1 -0
  13. xinference/model/flexible/launchers/image_process_launcher.py +70 -0
  14. xinference/model/image/model_spec.json +7 -0
  15. xinference/model/image/stable_diffusion/core.py +6 -1
  16. xinference/model/llm/llm_family.json +802 -82
  17. xinference/model/llm/llm_family_csghub.json +39 -0
  18. xinference/model/llm/llm_family_modelscope.json +295 -47
  19. xinference/model/llm/pytorch/chatglm.py +243 -5
  20. xinference/model/llm/pytorch/cogvlm2.py +1 -1
  21. xinference/model/llm/utils.py +78 -1
  22. xinference/model/llm/vllm/core.py +8 -0
  23. xinference/thirdparty/cosyvoice/__init__.py +0 -0
  24. xinference/thirdparty/cosyvoice/bin/__init__.py +0 -0
  25. xinference/thirdparty/cosyvoice/bin/inference.py +114 -0
  26. xinference/thirdparty/cosyvoice/bin/train.py +136 -0
  27. xinference/thirdparty/cosyvoice/cli/__init__.py +0 -0
  28. xinference/thirdparty/cosyvoice/cli/cosyvoice.py +83 -0
  29. xinference/thirdparty/cosyvoice/cli/frontend.py +168 -0
  30. xinference/thirdparty/cosyvoice/cli/model.py +60 -0
  31. xinference/thirdparty/cosyvoice/dataset/__init__.py +0 -0
  32. xinference/thirdparty/cosyvoice/dataset/dataset.py +160 -0
  33. xinference/thirdparty/cosyvoice/dataset/processor.py +369 -0
  34. xinference/thirdparty/cosyvoice/flow/__init__.py +0 -0
  35. xinference/thirdparty/cosyvoice/flow/decoder.py +222 -0
  36. xinference/thirdparty/cosyvoice/flow/flow.py +135 -0
  37. xinference/thirdparty/cosyvoice/flow/flow_matching.py +138 -0
  38. xinference/thirdparty/cosyvoice/flow/length_regulator.py +49 -0
  39. xinference/thirdparty/cosyvoice/hifigan/__init__.py +0 -0
  40. xinference/thirdparty/cosyvoice/hifigan/f0_predictor.py +55 -0
  41. xinference/thirdparty/cosyvoice/hifigan/generator.py +391 -0
  42. xinference/thirdparty/cosyvoice/llm/__init__.py +0 -0
  43. xinference/thirdparty/cosyvoice/llm/llm.py +206 -0
  44. xinference/thirdparty/cosyvoice/transformer/__init__.py +0 -0
  45. xinference/thirdparty/cosyvoice/transformer/activation.py +84 -0
  46. xinference/thirdparty/cosyvoice/transformer/attention.py +326 -0
  47. xinference/thirdparty/cosyvoice/transformer/convolution.py +145 -0
  48. xinference/thirdparty/cosyvoice/transformer/decoder.py +396 -0
  49. xinference/thirdparty/cosyvoice/transformer/decoder_layer.py +132 -0
  50. xinference/thirdparty/cosyvoice/transformer/embedding.py +293 -0
  51. xinference/thirdparty/cosyvoice/transformer/encoder.py +472 -0
  52. xinference/thirdparty/cosyvoice/transformer/encoder_layer.py +236 -0
  53. xinference/thirdparty/cosyvoice/transformer/label_smoothing_loss.py +96 -0
  54. xinference/thirdparty/cosyvoice/transformer/positionwise_feed_forward.py +115 -0
  55. xinference/thirdparty/cosyvoice/transformer/subsampling.py +383 -0
  56. xinference/thirdparty/cosyvoice/utils/__init__.py +0 -0
  57. xinference/thirdparty/cosyvoice/utils/class_utils.py +70 -0
  58. xinference/thirdparty/cosyvoice/utils/common.py +103 -0
  59. xinference/thirdparty/cosyvoice/utils/executor.py +110 -0
  60. xinference/thirdparty/cosyvoice/utils/file_utils.py +41 -0
  61. xinference/thirdparty/cosyvoice/utils/frontend_utils.py +125 -0
  62. xinference/thirdparty/cosyvoice/utils/mask.py +227 -0
  63. xinference/thirdparty/cosyvoice/utils/scheduler.py +739 -0
  64. xinference/thirdparty/cosyvoice/utils/train_utils.py +289 -0
  65. xinference/web/ui/build/asset-manifest.json +3 -3
  66. xinference/web/ui/build/index.html +1 -1
  67. xinference/web/ui/build/static/js/{main.95c1d652.js → main.2ef0cfaf.js} +3 -3
  68. xinference/web/ui/build/static/js/main.2ef0cfaf.js.map +1 -0
  69. xinference/web/ui/node_modules/.cache/babel-loader/b6807ecc0c231fea699533518a0eb2a2bf68a081ce00d452be40600dbffa17a7.json +1 -0
  70. {xinference-0.13.2.dist-info → xinference-0.13.3.dist-info}/METADATA +16 -8
  71. {xinference-0.13.2.dist-info → xinference-0.13.3.dist-info}/RECORD +76 -32
  72. xinference/web/ui/build/static/js/main.95c1d652.js.map +0 -1
  73. xinference/web/ui/node_modules/.cache/babel-loader/709711edada3f1596b309d571285fd31f1c364d66f4425bc28723d0088cc351a.json +0 -1
  74. /xinference/web/ui/build/static/js/{main.95c1d652.js.LICENSE.txt → main.2ef0cfaf.js.LICENSE.txt} +0 -0
  75. {xinference-0.13.2.dist-info → xinference-0.13.3.dist-info}/LICENSE +0 -0
  76. {xinference-0.13.2.dist-info → xinference-0.13.3.dist-info}/WHEEL +0 -0
  77. {xinference-0.13.2.dist-info → xinference-0.13.3.dist-info}/entry_points.txt +0 -0
  78. {xinference-0.13.2.dist-info → xinference-0.13.3.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,472 @@
1
+ # Copyright (c) 2021 Mobvoi Inc (Binbin Zhang, Di Wu)
2
+ # 2022 Xingchen Song (sxc19@mails.tsinghua.edu.cn)
3
+ # 2024 Alibaba Inc (Xiang Lyu)
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ # Modified from ESPnet(https://github.com/espnet/espnet)
17
+ """Encoder definition."""
18
+ from typing import Tuple
19
+
20
+ import torch
21
+ import torch.utils.checkpoint as ckpt
22
+
23
+ from cosyvoice.transformer.convolution import ConvolutionModule
24
+ from cosyvoice.transformer.encoder_layer import TransformerEncoderLayer
25
+ from cosyvoice.transformer.encoder_layer import ConformerEncoderLayer
26
+ from cosyvoice.transformer.positionwise_feed_forward import PositionwiseFeedForward
27
+ from cosyvoice.utils.class_utils import (
28
+ COSYVOICE_EMB_CLASSES,
29
+ COSYVOICE_SUBSAMPLE_CLASSES,
30
+ COSYVOICE_ATTENTION_CLASSES,
31
+ COSYVOICE_ACTIVATION_CLASSES,
32
+ )
33
+ from cosyvoice.utils.mask import make_pad_mask
34
+ from cosyvoice.utils.mask import add_optional_chunk_mask
35
+
36
+
37
+ class BaseEncoder(torch.nn.Module):
38
+
39
+ def __init__(
40
+ self,
41
+ input_size: int,
42
+ output_size: int = 256,
43
+ attention_heads: int = 4,
44
+ linear_units: int = 2048,
45
+ num_blocks: int = 6,
46
+ dropout_rate: float = 0.1,
47
+ positional_dropout_rate: float = 0.1,
48
+ attention_dropout_rate: float = 0.0,
49
+ input_layer: str = "conv2d",
50
+ pos_enc_layer_type: str = "abs_pos",
51
+ normalize_before: bool = True,
52
+ static_chunk_size: int = 0,
53
+ use_dynamic_chunk: bool = False,
54
+ global_cmvn: torch.nn.Module = None,
55
+ use_dynamic_left_chunk: bool = False,
56
+ gradient_checkpointing: bool = False,
57
+ ):
58
+ """
59
+ Args:
60
+ input_size (int): input dim
61
+ output_size (int): dimension of attention
62
+ attention_heads (int): the number of heads of multi head attention
63
+ linear_units (int): the hidden units number of position-wise feed
64
+ forward
65
+ num_blocks (int): the number of decoder blocks
66
+ dropout_rate (float): dropout rate
67
+ attention_dropout_rate (float): dropout rate in attention
68
+ positional_dropout_rate (float): dropout rate after adding
69
+ positional encoding
70
+ input_layer (str): input layer type.
71
+ optional [linear, conv2d, conv2d6, conv2d8]
72
+ pos_enc_layer_type (str): Encoder positional encoding layer type.
73
+ opitonal [abs_pos, scaled_abs_pos, rel_pos, no_pos]
74
+ normalize_before (bool):
75
+ True: use layer_norm before each sub-block of a layer.
76
+ False: use layer_norm after each sub-block of a layer.
77
+ static_chunk_size (int): chunk size for static chunk training and
78
+ decoding
79
+ use_dynamic_chunk (bool): whether use dynamic chunk size for
80
+ training or not, You can only use fixed chunk(chunk_size > 0)
81
+ or dyanmic chunk size(use_dynamic_chunk = True)
82
+ global_cmvn (Optional[torch.nn.Module]): Optional GlobalCMVN module
83
+ use_dynamic_left_chunk (bool): whether use dynamic left chunk in
84
+ dynamic chunk training
85
+ key_bias: whether use bias in attention.linear_k, False for whisper models.
86
+ gradient_checkpointing: rerunning a forward-pass segment for each
87
+ checkpointed segment during backward.
88
+ """
89
+ super().__init__()
90
+ self._output_size = output_size
91
+
92
+ self.global_cmvn = global_cmvn
93
+ self.embed = COSYVOICE_SUBSAMPLE_CLASSES[input_layer](
94
+ input_size,
95
+ output_size,
96
+ dropout_rate,
97
+ COSYVOICE_EMB_CLASSES[pos_enc_layer_type](output_size,
98
+ positional_dropout_rate),
99
+ )
100
+
101
+ self.normalize_before = normalize_before
102
+ self.after_norm = torch.nn.LayerNorm(output_size, eps=1e-5)
103
+ self.static_chunk_size = static_chunk_size
104
+ self.use_dynamic_chunk = use_dynamic_chunk
105
+ self.use_dynamic_left_chunk = use_dynamic_left_chunk
106
+ self.gradient_checkpointing = gradient_checkpointing
107
+
108
+ def output_size(self) -> int:
109
+ return self._output_size
110
+
111
+ def forward(
112
+ self,
113
+ xs: torch.Tensor,
114
+ xs_lens: torch.Tensor,
115
+ decoding_chunk_size: int = 0,
116
+ num_decoding_left_chunks: int = -1,
117
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
118
+ """Embed positions in tensor.
119
+
120
+ Args:
121
+ xs: padded input tensor (B, T, D)
122
+ xs_lens: input length (B)
123
+ decoding_chunk_size: decoding chunk size for dynamic chunk
124
+ 0: default for training, use random dynamic chunk.
125
+ <0: for decoding, use full chunk.
126
+ >0: for decoding, use fixed chunk size as set.
127
+ num_decoding_left_chunks: number of left chunks, this is for decoding,
128
+ the chunk size is decoding_chunk_size.
129
+ >=0: use num_decoding_left_chunks
130
+ <0: use all left chunks
131
+ Returns:
132
+ encoder output tensor xs, and subsampled masks
133
+ xs: padded output tensor (B, T' ~= T/subsample_rate, D)
134
+ masks: torch.Tensor batch padding mask after subsample
135
+ (B, 1, T' ~= T/subsample_rate)
136
+ NOTE(xcsong):
137
+ We pass the `__call__` method of the modules instead of `forward` to the
138
+ checkpointing API because `__call__` attaches all the hooks of the module.
139
+ https://discuss.pytorch.org/t/any-different-between-model-input-and-model-forward-input/3690/2
140
+ """
141
+ T = xs.size(1)
142
+ masks = ~make_pad_mask(xs_lens, T).unsqueeze(1) # (B, 1, T)
143
+ if self.global_cmvn is not None:
144
+ xs = self.global_cmvn(xs)
145
+ xs, pos_emb, masks = self.embed(xs, masks)
146
+ mask_pad = masks # (B, 1, T/subsample_rate)
147
+ chunk_masks = add_optional_chunk_mask(xs, masks,
148
+ self.use_dynamic_chunk,
149
+ self.use_dynamic_left_chunk,
150
+ decoding_chunk_size,
151
+ self.static_chunk_size,
152
+ num_decoding_left_chunks)
153
+ if self.gradient_checkpointing and self.training:
154
+ xs = self.forward_layers_checkpointed(xs, chunk_masks, pos_emb,
155
+ mask_pad)
156
+ else:
157
+ xs = self.forward_layers(xs, chunk_masks, pos_emb, mask_pad)
158
+ if self.normalize_before:
159
+ xs = self.after_norm(xs)
160
+ # Here we assume the mask is not changed in encoder layers, so just
161
+ # return the masks before encoder layers, and the masks will be used
162
+ # for cross attention with decoder later
163
+ return xs, masks
164
+
165
+ def forward_layers(self, xs: torch.Tensor, chunk_masks: torch.Tensor,
166
+ pos_emb: torch.Tensor,
167
+ mask_pad: torch.Tensor) -> torch.Tensor:
168
+ for layer in self.encoders:
169
+ xs, chunk_masks, _, _ = layer(xs, chunk_masks, pos_emb, mask_pad)
170
+ return xs
171
+
172
+ @torch.jit.ignore(drop=True)
173
+ def forward_layers_checkpointed(self, xs: torch.Tensor,
174
+ chunk_masks: torch.Tensor,
175
+ pos_emb: torch.Tensor,
176
+ mask_pad: torch.Tensor) -> torch.Tensor:
177
+ for layer in self.encoders:
178
+ xs, chunk_masks, _, _ = ckpt.checkpoint(layer.__call__, xs,
179
+ chunk_masks, pos_emb,
180
+ mask_pad)
181
+ return xs
182
+
183
+ def forward_chunk(
184
+ self,
185
+ xs: torch.Tensor,
186
+ offset: int,
187
+ required_cache_size: int,
188
+ att_cache: torch.Tensor = torch.zeros(0, 0, 0, 0),
189
+ cnn_cache: torch.Tensor = torch.zeros(0, 0, 0, 0),
190
+ att_mask: torch.Tensor = torch.ones((0, 0, 0), dtype=torch.bool),
191
+ ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
192
+ """ Forward just one chunk
193
+
194
+ Args:
195
+ xs (torch.Tensor): chunk input, with shape (b=1, time, mel-dim),
196
+ where `time == (chunk_size - 1) * subsample_rate + \
197
+ subsample.right_context + 1`
198
+ offset (int): current offset in encoder output time stamp
199
+ required_cache_size (int): cache size required for next chunk
200
+ compuation
201
+ >=0: actual cache size
202
+ <0: means all history cache is required
203
+ att_cache (torch.Tensor): cache tensor for KEY & VALUE in
204
+ transformer/conformer attention, with shape
205
+ (elayers, head, cache_t1, d_k * 2), where
206
+ `head * d_k == hidden-dim` and
207
+ `cache_t1 == chunk_size * num_decoding_left_chunks`.
208
+ cnn_cache (torch.Tensor): cache tensor for cnn_module in conformer,
209
+ (elayers, b=1, hidden-dim, cache_t2), where
210
+ `cache_t2 == cnn.lorder - 1`
211
+
212
+ Returns:
213
+ torch.Tensor: output of current input xs,
214
+ with shape (b=1, chunk_size, hidden-dim).
215
+ torch.Tensor: new attention cache required for next chunk, with
216
+ dynamic shape (elayers, head, ?, d_k * 2)
217
+ depending on required_cache_size.
218
+ torch.Tensor: new conformer cnn cache required for next chunk, with
219
+ same shape as the original cnn_cache.
220
+
221
+ """
222
+ assert xs.size(0) == 1
223
+ # tmp_masks is just for interface compatibility
224
+ tmp_masks = torch.ones(1,
225
+ xs.size(1),
226
+ device=xs.device,
227
+ dtype=torch.bool)
228
+ tmp_masks = tmp_masks.unsqueeze(1)
229
+ if self.global_cmvn is not None:
230
+ xs = self.global_cmvn(xs)
231
+ # NOTE(xcsong): Before embed, shape(xs) is (b=1, time, mel-dim)
232
+ xs, pos_emb, _ = self.embed(xs, tmp_masks, offset)
233
+ # NOTE(xcsong): After embed, shape(xs) is (b=1, chunk_size, hidden-dim)
234
+ elayers, cache_t1 = att_cache.size(0), att_cache.size(2)
235
+ chunk_size = xs.size(1)
236
+ attention_key_size = cache_t1 + chunk_size
237
+ pos_emb = self.embed.position_encoding(offset=offset - cache_t1,
238
+ size=attention_key_size)
239
+ if required_cache_size < 0:
240
+ next_cache_start = 0
241
+ elif required_cache_size == 0:
242
+ next_cache_start = attention_key_size
243
+ else:
244
+ next_cache_start = max(attention_key_size - required_cache_size, 0)
245
+ r_att_cache = []
246
+ r_cnn_cache = []
247
+ for i, layer in enumerate(self.encoders):
248
+ # NOTE(xcsong): Before layer.forward
249
+ # shape(att_cache[i:i + 1]) is (1, head, cache_t1, d_k * 2),
250
+ # shape(cnn_cache[i]) is (b=1, hidden-dim, cache_t2)
251
+ xs, _, new_att_cache, new_cnn_cache = layer(
252
+ xs,
253
+ att_mask,
254
+ pos_emb,
255
+ att_cache=att_cache[i:i + 1] if elayers > 0 else att_cache,
256
+ cnn_cache=cnn_cache[i] if cnn_cache.size(0) > 0 else cnn_cache)
257
+ # NOTE(xcsong): After layer.forward
258
+ # shape(new_att_cache) is (1, head, attention_key_size, d_k * 2),
259
+ # shape(new_cnn_cache) is (b=1, hidden-dim, cache_t2)
260
+ r_att_cache.append(new_att_cache[:, :, next_cache_start:, :])
261
+ r_cnn_cache.append(new_cnn_cache.unsqueeze(0))
262
+ if self.normalize_before:
263
+ xs = self.after_norm(xs)
264
+
265
+ # NOTE(xcsong): shape(r_att_cache) is (elayers, head, ?, d_k * 2),
266
+ # ? may be larger than cache_t1, it depends on required_cache_size
267
+ r_att_cache = torch.cat(r_att_cache, dim=0)
268
+ # NOTE(xcsong): shape(r_cnn_cache) is (e, b=1, hidden-dim, cache_t2)
269
+ r_cnn_cache = torch.cat(r_cnn_cache, dim=0)
270
+
271
+ return (xs, r_att_cache, r_cnn_cache)
272
+
273
+ def forward_chunk_by_chunk(
274
+ self,
275
+ xs: torch.Tensor,
276
+ decoding_chunk_size: int,
277
+ num_decoding_left_chunks: int = -1,
278
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
279
+ """ Forward input chunk by chunk with chunk_size like a streaming
280
+ fashion
281
+
282
+ Here we should pay special attention to computation cache in the
283
+ streaming style forward chunk by chunk. Three things should be taken
284
+ into account for computation in the current network:
285
+ 1. transformer/conformer encoder layers output cache
286
+ 2. convolution in conformer
287
+ 3. convolution in subsampling
288
+
289
+ However, we don't implement subsampling cache for:
290
+ 1. We can control subsampling module to output the right result by
291
+ overlapping input instead of cache left context, even though it
292
+ wastes some computation, but subsampling only takes a very
293
+ small fraction of computation in the whole model.
294
+ 2. Typically, there are several covolution layers with subsampling
295
+ in subsampling module, it is tricky and complicated to do cache
296
+ with different convolution layers with different subsampling
297
+ rate.
298
+ 3. Currently, nn.Sequential is used to stack all the convolution
299
+ layers in subsampling, we need to rewrite it to make it work
300
+ with cache, which is not prefered.
301
+ Args:
302
+ xs (torch.Tensor): (1, max_len, dim)
303
+ chunk_size (int): decoding chunk size
304
+ """
305
+ assert decoding_chunk_size > 0
306
+ # The model is trained by static or dynamic chunk
307
+ assert self.static_chunk_size > 0 or self.use_dynamic_chunk
308
+ subsampling = self.embed.subsampling_rate
309
+ context = self.embed.right_context + 1 # Add current frame
310
+ stride = subsampling * decoding_chunk_size
311
+ decoding_window = (decoding_chunk_size - 1) * subsampling + context
312
+ num_frames = xs.size(1)
313
+ att_cache: torch.Tensor = torch.zeros((0, 0, 0, 0), device=xs.device)
314
+ cnn_cache: torch.Tensor = torch.zeros((0, 0, 0, 0), device=xs.device)
315
+ outputs = []
316
+ offset = 0
317
+ required_cache_size = decoding_chunk_size * num_decoding_left_chunks
318
+
319
+ # Feed forward overlap input step by step
320
+ for cur in range(0, num_frames - context + 1, stride):
321
+ end = min(cur + decoding_window, num_frames)
322
+ chunk_xs = xs[:, cur:end, :]
323
+ (y, att_cache,
324
+ cnn_cache) = self.forward_chunk(chunk_xs, offset,
325
+ required_cache_size, att_cache,
326
+ cnn_cache)
327
+ outputs.append(y)
328
+ offset += y.size(1)
329
+ ys = torch.cat(outputs, 1)
330
+ masks = torch.ones((1, 1, ys.size(1)),
331
+ device=ys.device,
332
+ dtype=torch.bool)
333
+ return ys, masks
334
+
335
+
336
+ class TransformerEncoder(BaseEncoder):
337
+ """Transformer encoder module."""
338
+
339
+ def __init__(
340
+ self,
341
+ input_size: int,
342
+ output_size: int = 256,
343
+ attention_heads: int = 4,
344
+ linear_units: int = 2048,
345
+ num_blocks: int = 6,
346
+ dropout_rate: float = 0.1,
347
+ positional_dropout_rate: float = 0.1,
348
+ attention_dropout_rate: float = 0.0,
349
+ input_layer: str = "conv2d",
350
+ pos_enc_layer_type: str = "abs_pos",
351
+ normalize_before: bool = True,
352
+ static_chunk_size: int = 0,
353
+ use_dynamic_chunk: bool = False,
354
+ global_cmvn: torch.nn.Module = None,
355
+ use_dynamic_left_chunk: bool = False,
356
+ key_bias: bool = True,
357
+ selfattention_layer_type: str = "selfattn",
358
+ activation_type: str = "relu",
359
+ gradient_checkpointing: bool = False,
360
+ ):
361
+ """ Construct TransformerEncoder
362
+
363
+ See Encoder for the meaning of each parameter.
364
+ """
365
+ super().__init__(input_size, output_size, attention_heads,
366
+ linear_units, num_blocks, dropout_rate,
367
+ positional_dropout_rate, attention_dropout_rate,
368
+ input_layer, pos_enc_layer_type, normalize_before,
369
+ static_chunk_size, use_dynamic_chunk, global_cmvn,
370
+ use_dynamic_left_chunk, gradient_checkpointing)
371
+ activation = COSYVOICE_ACTIVATION_CLASSES[activation_type]()
372
+ self.encoders = torch.nn.ModuleList([
373
+ TransformerEncoderLayer(
374
+ output_size,
375
+ COSYVOICE_ATTENTION_CLASSES[selfattention_layer_type](attention_heads,
376
+ output_size,
377
+ attention_dropout_rate,
378
+ key_bias),
379
+ PositionwiseFeedForward(output_size, linear_units,
380
+ dropout_rate, activation),
381
+ dropout_rate, normalize_before) for _ in range(num_blocks)
382
+ ])
383
+
384
+
385
+ class ConformerEncoder(BaseEncoder):
386
+ """Conformer encoder module."""
387
+
388
+ def __init__(
389
+ self,
390
+ input_size: int,
391
+ output_size: int = 256,
392
+ attention_heads: int = 4,
393
+ linear_units: int = 2048,
394
+ num_blocks: int = 6,
395
+ dropout_rate: float = 0.1,
396
+ positional_dropout_rate: float = 0.1,
397
+ attention_dropout_rate: float = 0.0,
398
+ input_layer: str = "conv2d",
399
+ pos_enc_layer_type: str = "rel_pos",
400
+ normalize_before: bool = True,
401
+ static_chunk_size: int = 0,
402
+ use_dynamic_chunk: bool = False,
403
+ global_cmvn: torch.nn.Module = None,
404
+ use_dynamic_left_chunk: bool = False,
405
+ positionwise_conv_kernel_size: int = 1,
406
+ macaron_style: bool = True,
407
+ selfattention_layer_type: str = "rel_selfattn",
408
+ activation_type: str = "swish",
409
+ use_cnn_module: bool = True,
410
+ cnn_module_kernel: int = 15,
411
+ causal: bool = False,
412
+ cnn_module_norm: str = "batch_norm",
413
+ key_bias: bool = True,
414
+ gradient_checkpointing: bool = False,
415
+ ):
416
+ """Construct ConformerEncoder
417
+
418
+ Args:
419
+ input_size to use_dynamic_chunk, see in BaseEncoder
420
+ positionwise_conv_kernel_size (int): Kernel size of positionwise
421
+ conv1d layer.
422
+ macaron_style (bool): Whether to use macaron style for
423
+ positionwise layer.
424
+ selfattention_layer_type (str): Encoder attention layer type,
425
+ the parameter has no effect now, it's just for configure
426
+ compatibility.
427
+ activation_type (str): Encoder activation function type.
428
+ use_cnn_module (bool): Whether to use convolution module.
429
+ cnn_module_kernel (int): Kernel size of convolution module.
430
+ causal (bool): whether to use causal convolution or not.
431
+ key_bias: whether use bias in attention.linear_k, False for whisper models.
432
+ """
433
+ super().__init__(input_size, output_size, attention_heads,
434
+ linear_units, num_blocks, dropout_rate,
435
+ positional_dropout_rate, attention_dropout_rate,
436
+ input_layer, pos_enc_layer_type, normalize_before,
437
+ static_chunk_size, use_dynamic_chunk, global_cmvn,
438
+ use_dynamic_left_chunk, gradient_checkpointing)
439
+ activation = COSYVOICE_ACTIVATION_CLASSES[activation_type]()
440
+
441
+ # self-attention module definition
442
+ encoder_selfattn_layer_args = (
443
+ attention_heads,
444
+ output_size,
445
+ attention_dropout_rate,
446
+ key_bias,
447
+ )
448
+ # feed-forward module definition
449
+ positionwise_layer_args = (
450
+ output_size,
451
+ linear_units,
452
+ dropout_rate,
453
+ activation,
454
+ )
455
+ # convolution module definition
456
+ convolution_layer_args = (output_size, cnn_module_kernel, activation,
457
+ cnn_module_norm, causal)
458
+
459
+ self.encoders = torch.nn.ModuleList([
460
+ ConformerEncoderLayer(
461
+ output_size,
462
+ COSYVOICE_ATTENTION_CLASSES[selfattention_layer_type](
463
+ *encoder_selfattn_layer_args),
464
+ PositionwiseFeedForward(*positionwise_layer_args),
465
+ PositionwiseFeedForward(
466
+ *positionwise_layer_args) if macaron_style else None,
467
+ ConvolutionModule(
468
+ *convolution_layer_args) if use_cnn_module else None,
469
+ dropout_rate,
470
+ normalize_before,
471
+ ) for _ in range(num_blocks)
472
+ ])