ctranslate2 4.6.1__cp314-cp314-win_amd64.whl → 4.6.3__cp314-cp314-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Binary file
ctranslate2/cudnn64_9.dll CHANGED
Binary file
ctranslate2/extensions.py CHANGED
@@ -556,12 +556,28 @@ def _process_iterable(process_func, iterables, max_batch_size, batch_type, **kwa
556
556
 
557
557
  def _batch_iterator(iterable, batch_size, batch_type):
558
558
  streams = None
559
- cur_batch_size = 0
559
+ max_length = 0
560
560
 
561
561
  for example in iterable:
562
562
  if not isinstance(example, tuple):
563
563
  example = (example,)
564
564
 
565
+ if batch_type == "examples":
566
+ if streams and len(streams[0]) == batch_size:
567
+ yield streams
568
+ streams = None
569
+
570
+ elif batch_type == "tokens":
571
+ max_length = max(max_length, len(example[0]))
572
+
573
+ if streams and (len(streams[0]) + 1) * max_length > batch_size:
574
+ yield streams
575
+ streams = None
576
+ max_length = len(example[0])
577
+
578
+ else:
579
+ raise ValueError("Invalid batch type %s" % batch_type)
580
+
565
581
  if streams is None:
566
582
  streams = tuple([] for _ in example)
567
583
  for batch, element in zip(streams, example):
@@ -569,17 +585,5 @@ def _batch_iterator(iterable, batch_size, batch_type):
569
585
  raise ValueError("Input iterables do not have the same length")
570
586
  batch.append(element)
571
587
 
572
- if batch_type == "examples":
573
- cur_batch_size += 1
574
- elif batch_type == "tokens":
575
- cur_batch_size += len(example[0])
576
- else:
577
- raise ValueError("Invalid batch type %s" % batch_type)
578
-
579
- if cur_batch_size >= batch_size:
580
- yield streams
581
- streams = None
582
- cur_batch_size = 0
583
-
584
588
  if streams is not None:
585
589
  yield streams
@@ -32,14 +32,22 @@ class MultiHeadAttentionSpec(model_spec.LayerSpec):
32
32
  num_heads_kv=None,
33
33
  head_dim=None,
34
34
  sliding_window=None,
35
+ qk_norm=False,
36
+ qk_norm_rms=True,
37
+ has_norm=True,
35
38
  ):
36
39
  self.queries_scale = model_spec.OPTIONAL
37
40
 
38
- self.layer_norm = common_spec.LayerNormSpec(rms_norm=rms_norm)
41
+ if has_norm:
42
+ self.layer_norm = common_spec.LayerNormSpec(rms_norm=rms_norm)
39
43
  self.linear = [
40
44
  common_spec.LinearSpec() for _ in range(2 if self_attention else 3)
41
45
  ]
42
46
 
47
+ if qk_norm:
48
+ self.q_norm = common_spec.LayerNormSpec(rms_norm=qk_norm_rms)
49
+ self.k_norm = common_spec.LayerNormSpec(rms_norm=qk_norm_rms)
50
+
43
51
  if relative_position:
44
52
  self.relative_position_keys = None
45
53
  self.relative_position_values = None
@@ -23,6 +23,16 @@ class TransformerEncoderSpec(model_spec.LayerSpec):
23
23
  ffn_glu: bool = False,
24
24
  rms_norm: bool = False,
25
25
  multi_query_attention: bool = False,
26
+ num_heads_kv: Optional[int] = None,
27
+ head_dim: Optional[int] = None,
28
+ rotary_dim: Optional[int] = None,
29
+ rotary_interleave: bool = True,
30
+ rotary_scaling_type: Optional[attention_spec.RotaryScalingType] = None,
31
+ rotary_scaling_factor: float = 1,
32
+ rotary_base: float = 10000,
33
+ sliding_window: Optional[int] = None,
34
+ qk_norm: Optional[bool] = False,
35
+ pre_post_layer_norm: bool = False,
26
36
  ):
27
37
  """Initializes a Transformer encoder specification.
28
38
 
@@ -43,8 +53,28 @@ class TransformerEncoderSpec(model_spec.LayerSpec):
43
53
  ffn_glu: Use gated linear units in the FFN layers as described in
44
54
  https://arxiv.org/abs/2002.05202.
45
55
  rms_norm: Use the root mean square layer normalization.
46
- multi_query_attention: Use multi-query attention.
56
+ multi_query_attention: Use multi-query attention (alias for num_heads_kv=1).
57
+ num_heads_kv: Number of attention heads for the key and value.
58
+ head_dim: Number of dimensions per attention head.
59
+ rotary_dim: Apply rotary embeddings to these first N dimensions. If 0, rotary
60
+ embeddings are applied to all dimensions.
61
+ rotary_interleave: Interleave the head dimensions when rotary embeddings are applied.
62
+ Otherwise the head dimensions are sliced in half.
63
+ rotary_scaling_type: Type of RoPE scaling.
64
+ rotary_scaling_factor: Factor used in the RoPE scaling.
65
+ rotary_base: The base period of the rotary embeddings.
66
+ sliding_window: Max sequence length to retain in KV Cache.
67
+ qk_norm: Apply layer normalization to the query and key projections.
68
+ pre_post_layer_norm: Add post layer norm for each pre norm layer.
47
69
  """
70
+
71
+ if multi_query_attention:
72
+ if num_heads_kv is not None and num_heads_kv != 1:
73
+ raise ValueError(
74
+ "Enabling multi_query_attention implies num_heads_kv=1"
75
+ )
76
+ num_heads_kv = 1
77
+
48
78
  self.multi_query_attention = multi_query_attention
49
79
  self.num_heads = np.dtype("int16").type(num_heads)
50
80
  self.pre_norm = pre_norm
@@ -60,13 +90,24 @@ class TransformerEncoderSpec(model_spec.LayerSpec):
60
90
  self.layer_norm = common_spec.LayerNormSpec(rms_norm=rms_norm)
61
91
  if layernorm_embedding:
62
92
  self.layernorm_embedding = common_spec.LayerNormSpec(rms_norm=rms_norm)
93
+ if sliding_window is not None:
94
+ self.sliding_window = np.dtype("int32").type(sliding_window)
95
+
63
96
  self.layer = [
64
97
  TransformerEncoderLayerSpec(
65
98
  relative_position=relative_position,
66
99
  relative_attention_bias=relative_attention_bias,
67
100
  ffn_glu=ffn_glu,
68
101
  rms_norm=rms_norm,
69
- num_heads_kv=1 if multi_query_attention else None,
102
+ num_heads_kv=num_heads_kv,
103
+ head_dim=head_dim,
104
+ rotary_dim=rotary_dim,
105
+ rotary_interleave=rotary_interleave,
106
+ rotary_scaling_type=rotary_scaling_type,
107
+ rotary_scaling_factor=rotary_scaling_factor,
108
+ rotary_base=rotary_base,
109
+ qk_norm=qk_norm,
110
+ pre_post_layer_norm=pre_post_layer_norm,
70
111
  )
71
112
  for _ in range(num_layers)
72
113
  ]
@@ -109,6 +150,8 @@ class TransformerDecoderSpec(model_spec.LayerSpec):
109
150
  quant_type: Optional[common_spec.Quantization] = None,
110
151
  quant_group_size: Optional[int] = None,
111
152
  quant_bits: Optional[int] = None,
153
+ qk_norm: bool = False,
154
+ external_pre_post_encoder_layers: Optional[bool] = False,
112
155
  ):
113
156
  """Initializes a Transformer decoder specification.
114
157
 
@@ -155,6 +198,8 @@ class TransformerDecoderSpec(model_spec.LayerSpec):
155
198
  quant_type: quantization type used (like awq... for lower bit quantization)
156
199
  quant_group_size: group size of the lower bit quantization
157
200
  quant_bits: number of bit of the quantization (ex: 4bit)
201
+ external_pre_post_encoder_layers: if the encoder attention pre and processing
202
+ is done outside the attention.
158
203
  """
159
204
 
160
205
  self._config = dict()
@@ -171,12 +216,6 @@ class TransformerDecoderSpec(model_spec.LayerSpec):
171
216
  )
172
217
  num_heads_kv = 1
173
218
 
174
- if with_encoder_attention and num_heads_kv not in (None, 1, num_heads):
175
- raise ValueError(
176
- "num_heads_kv=%d is not supported in the cross-attention layers"
177
- % num_heads_kv
178
- )
179
-
180
219
  self.num_heads = np.dtype("int16").type(num_heads)
181
220
  self.pre_norm = pre_norm
182
221
  self.activation = np.dtype("int8").type(activation)
@@ -222,6 +261,8 @@ class TransformerDecoderSpec(model_spec.LayerSpec):
222
261
  num_heads_kv=num_heads_kv,
223
262
  head_dim=head_dim,
224
263
  sliding_window=sliding_window,
264
+ qk_norm=qk_norm,
265
+ external_pre_post_encoder_layers=external_pre_post_encoder_layers,
225
266
  )
226
267
  for _ in range(num_layers)
227
268
  ]
@@ -252,7 +293,15 @@ class TransformerEncoderLayerSpec(model_spec.LayerSpec):
252
293
  ffn_glu=False,
253
294
  rms_norm=False,
254
295
  num_heads_kv=None,
296
+ head_dim=None,
255
297
  sliding_window=None,
298
+ rotary_dim: Optional[int] = None,
299
+ rotary_interleave: bool = True,
300
+ rotary_scaling_type: Optional[attention_spec.RotaryScalingType] = None,
301
+ rotary_scaling_factor: float = 1,
302
+ rotary_base: float = 10000,
303
+ qk_norm=False,
304
+ pre_post_layer_norm: bool = False,
256
305
  ):
257
306
  self.self_attention = attention_spec.MultiHeadAttentionSpec(
258
307
  self_attention=True,
@@ -260,10 +309,32 @@ class TransformerEncoderLayerSpec(model_spec.LayerSpec):
260
309
  relative_attention_bias=relative_attention_bias,
261
310
  rms_norm=rms_norm,
262
311
  num_heads_kv=num_heads_kv,
312
+ head_dim=head_dim,
263
313
  sliding_window=sliding_window,
314
+ rotary_dim=rotary_dim,
315
+ rotary_interleave=rotary_interleave,
316
+ rotary_scaling_type=rotary_scaling_type,
317
+ rotary_scaling_factor=rotary_scaling_factor,
318
+ rotary_base=rotary_base,
319
+ qk_norm=qk_norm,
264
320
  )
265
321
  self.ffn = FeedForwardSpec(glu=ffn_glu, rms_norm=rms_norm)
266
322
 
323
+ if pre_post_layer_norm:
324
+ self.input_layer_norm = common_spec.LayerNormSpec(rms_norm=rms_norm)
325
+ self.post_attention_layer_norm = common_spec.LayerNormSpec(
326
+ rms_norm=rms_norm
327
+ )
328
+ self.pre_feedforward_layer_norm = common_spec.LayerNormSpec(
329
+ rms_norm=rms_norm
330
+ )
331
+ self.post_feedforward_layer_norm = common_spec.LayerNormSpec(
332
+ rms_norm=rms_norm
333
+ )
334
+
335
+ delattr(self.self_attention, "layer_norm")
336
+ delattr(self.ffn, "layer_norm")
337
+
267
338
 
268
339
  class TransformerDecoderLayerSpec(model_spec.LayerSpec):
269
340
  def __init__(
@@ -286,6 +357,8 @@ class TransformerDecoderLayerSpec(model_spec.LayerSpec):
286
357
  num_heads_kv=None,
287
358
  head_dim=None,
288
359
  sliding_window=None,
360
+ qk_norm=False,
361
+ external_pre_post_encoder_layers=False,
289
362
  ):
290
363
  self.self_attention = attention_spec.MultiHeadAttentionSpec(
291
364
  self_attention=True,
@@ -302,13 +375,17 @@ class TransformerDecoderLayerSpec(model_spec.LayerSpec):
302
375
  num_heads_kv=num_heads_kv,
303
376
  head_dim=head_dim,
304
377
  sliding_window=sliding_window,
378
+ qk_norm=qk_norm,
305
379
  )
306
380
 
307
381
  if with_encoder_attention:
308
382
  self.attention = attention_spec.MultiHeadAttentionSpec(
309
383
  rms_norm=rms_norm,
310
384
  num_heads_kv=num_heads_kv,
385
+ head_dim=head_dim,
311
386
  sliding_window=sliding_window,
387
+ qk_norm=qk_norm,
388
+ has_norm=external_pre_post_encoder_layers is False,
312
389
  )
313
390
 
314
391
  self.ffn = FeedForwardSpec(glu=ffn_glu, rms_norm=rms_norm)
@@ -324,10 +401,21 @@ class TransformerDecoderLayerSpec(model_spec.LayerSpec):
324
401
  delattr(self.ffn, "layer_norm")
325
402
 
326
403
  if pre_post_layer_norm:
404
+ # Self-attention layer norms
327
405
  self.input_layer_norm = common_spec.LayerNormSpec(rms_norm=rms_norm)
328
406
  self.post_attention_layer_norm = common_spec.LayerNormSpec(
329
407
  rms_norm=rms_norm
330
408
  )
409
+
410
+ if with_encoder_attention and external_pre_post_encoder_layers:
411
+ self.external_post_encoder_attention_layer_norm = (
412
+ common_spec.LayerNormSpec(rms_norm=rms_norm)
413
+ )
414
+ self.external_pre_encoder_attention_layer_norm = (
415
+ common_spec.LayerNormSpec(rms_norm=rms_norm)
416
+ )
417
+
418
+ # Feed-forward layer norms
331
419
  self.pre_feedforward_layer_norm = common_spec.LayerNormSpec(
332
420
  rms_norm=rms_norm
333
421
  )
@@ -557,6 +645,7 @@ class TransformerDecoderModelSpec(model_spec.LanguageModelSpec):
557
645
  quant_type: Optional[common_spec.Quantization] = None,
558
646
  quant_group_size: Optional[int] = None,
559
647
  quant_bits: Optional[int] = None,
648
+ qk_norm: bool = False,
560
649
  ):
561
650
  """Creates a Transformer decoder model specification.
562
651
 
@@ -631,6 +720,7 @@ class TransformerDecoderModelSpec(model_spec.LanguageModelSpec):
631
720
  quant_type=quant_type,
632
721
  quant_group_size=quant_group_size,
633
722
  quant_bits=quant_bits,
723
+ qk_norm=qk_norm,
634
724
  )
635
725
 
636
726
  return cls(decoder)
ctranslate2/version.py CHANGED
@@ -1,3 +1,3 @@
1
1
  """Version information."""
2
2
 
3
- __version__ = "4.6.1"
3
+ __version__ = "4.6.3"
@@ -1,9 +1,10 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: ctranslate2
3
- Version: 4.6.1
3
+ Version: 4.6.3
4
4
  Summary: Fast inference engine for Transformer models
5
5
  Home-page: https://opennmt.net
6
6
  Author: OpenNMT
7
+ License: MIT
7
8
  Project-URL: Documentation, https://opennmt.net/CTranslate2
8
9
  Project-URL: Forum, https://forum.opennmt.net
9
10
  Project-URL: Gitter, https://gitter.im/OpenNMT/CTranslate2
@@ -13,7 +14,6 @@ Classifier: Development Status :: 5 - Production/Stable
13
14
  Classifier: Environment :: GPU :: NVIDIA CUDA :: 12 :: 12.4
14
15
  Classifier: Intended Audience :: Developers
15
16
  Classifier: Intended Audience :: Science/Research
16
- Classifier: License :: OSI Approved :: MIT License
17
17
  Classifier: Programming Language :: Python :: 3
18
18
  Classifier: Programming Language :: Python :: 3 :: Only
19
19
  Classifier: Programming Language :: Python :: 3.9
@@ -34,6 +34,7 @@ Dynamic: description
34
34
  Dynamic: description-content-type
35
35
  Dynamic: home-page
36
36
  Dynamic: keywords
37
+ Dynamic: license
37
38
  Dynamic: project-url
38
39
  Dynamic: requires-dist
39
40
  Dynamic: requires-python
@@ -49,7 +50,7 @@ The project implements a custom runtime that applies many performance optimizati
49
50
 
50
51
  The following model types are currently supported:
51
52
 
52
- * Encoder-decoder models: Transformer base/big, M2M-100, NLLB, BART, mBART, Pegasus, T5, Whisper
53
+ * Encoder-decoder models: Transformer base/big, M2M-100, NLLB, BART, mBART, Pegasus, T5, Whisper T5Gemma
53
54
  * Decoder-only models: GPT-2, GPT-J, GPT-NeoX, OPT, BLOOM, MPT, Llama, Mistral, Gemma, CodeGen, GPTBigCode, Falcon, Qwen2
54
55
  * Encoder-only models: BERT, DistilBERT, XLM-RoBERTa
55
56
 
@@ -160,6 +161,16 @@ Executed with 4 threads on a [*c5.2xlarge*](https://aws.amazon.com/ec2/instance-
160
161
 
161
162
  Executed with CUDA 11 on a [*g5.xlarge*](https://aws.amazon.com/ec2/instance-types/g5/) Amazon EC2 instance equipped with a NVIDIA A10G GPU (driver version: 510.47.03).
162
163
 
164
+ ## Contributing
165
+
166
+ CTranslate2 is a community-driven project. We welcome contributions of all kinds:
167
+ * **New Model Support:** Help us implement more Transformer architectures.
168
+ * **Performance:** Propose optimizations for CPU or GPU kernels.
169
+ * **Bug Reports:** Open an issue if you find something not working as expected.
170
+ * **Documentation:** Improve our guides or add new examples.
171
+
172
+ Check out our [Contributing Guide](CONTRIBUTING.md) to learn how to set up your development environment.
173
+
163
174
  ## Additional resources
164
175
 
165
176
  * [Documentation](https://opennmt.net/CTranslate2)
@@ -1,33 +1,33 @@
1
- ctranslate2/__init__.py,sha256=HmklwU3BCnUgQAAGQeCBOr44QS5X0npr_WDkBh2e6Lo,1508
2
- ctranslate2/_ext.cp314-win_amd64.pyd,sha256=gqC7yPHNxHKG_7GTH0K2Vajp8yjGvYWdjkZe5xjQqHg,719872
3
- ctranslate2/ctranslate2.dll,sha256=GJzPqItIFHs3sD9PwQm1yRHJyASjW3xszKIxUW3A0oc,58389504
4
- ctranslate2/cudnn64_9.dll,sha256=wHzEfy-kpWZZPHr0qn5X7fCamFoP3dFMuNb0VuJSrwU,438840
5
- ctranslate2/extensions.py,sha256=axO2FI8ddiFmlko2AzQ6VcdtF-3hDA7VmPGnTIkrPkI,21782
1
+ ctranslate2/__init__.py,sha256=CGqShDaFxQ-u-aCtVq99T4HKuBdMB8b49l2KSxnQb8M,1735
2
+ ctranslate2/_ext.cp314-win_amd64.pyd,sha256=7ls8m8Bo_fA3itDnwgbjTUvsVJa0IG8Z4D6eMDUh_9Q,720384
3
+ ctranslate2/ctranslate2.dll,sha256=9TRGHvoyNSzXe9eEc3jKJa5-21-AeSENgp3DXvhCZ4M,58590720
4
+ ctranslate2/cudnn64_9.dll,sha256=ntvN_3OwrwcOsWCyzmbln-ygSqAXNR2O7cxejhSZZ9I,266288
5
+ ctranslate2/extensions.py,sha256=kDNt0H9KvfNCc3PrRGzfkj9Fkvna84i2O5Y-rav6UkU,21940
6
6
  ctranslate2/libiomp5md.dll,sha256=mCIzNmsK_NoeD1WgsTQJfjW3eWE_VN22nmhebNBrdV8,1614192
7
7
  ctranslate2/logging.py,sha256=P9evHdxuMx_iHvwJjEASEq-j5062H64Pl5-fJjxEuHk,1221
8
- ctranslate2/version.py,sha256=4YXv9jQt8K_nFjSqbVGYqjJBzqTZM2P6siU7xOzZDwY,53
8
+ ctranslate2/version.py,sha256=TboXlbA67GNmSOm1v2u_U8AKgYh5iminMMLTvi3Xho4,53
9
9
  ctranslate2/converters/__init__.py,sha256=ufYjcXf2sK4fiXAUU6tIJyWmNuLjKFf_KH3GWLXe4ls,507
10
10
  ctranslate2/converters/converter.py,sha256=Qkb8NGLLmgqMT6HZkFq61zwbxyq3NlWcaxLZ6Ap-YOQ,3601
11
11
  ctranslate2/converters/eole_ct2.py,sha256=RUcDJH_2AUt0jDs5oAqccE6tQPbO9LQ6JmVriC1DTy8,12564
12
- ctranslate2/converters/fairseq.py,sha256=uQpd-ftYSO4c6WdEwCUyuZWhzWX1UTG7dGOC6EtcDVE,12765
12
+ ctranslate2/converters/fairseq.py,sha256=2vlBk4AVCHwXxKkwPHVmcjyfo1dAV0_DJS1i6q-44NE,12822
13
13
  ctranslate2/converters/marian.py,sha256=1_7P3EbIDPOdyJbtb_Lp-LCBPBb9A8E9OhzoyFwTb64,11274
14
14
  ctranslate2/converters/openai_gpt2.py,sha256=1rXKM2ZURZHWRv4XZ135fPkVWpM4rTG-q7VR7OD6d-A,3304
15
- ctranslate2/converters/opennmt_py.py,sha256=Vva60az6tGqlQXs0UgC09r_fCD3u2u6wUJB-8V4OUFQ,13183
15
+ ctranslate2/converters/opennmt_py.py,sha256=zex4TbHiiJMy0tkqQg39oNjxmSZKf8dnRLH3iQ1H4z0,13227
16
16
  ctranslate2/converters/opennmt_tf.py,sha256=uBRp2wz5xriSQcA_c0S0ekY7ws6RpRX_0EKeMRdM7-s,16222
17
17
  ctranslate2/converters/opus_mt.py,sha256=5KbPaTiBhhorPzMpTugIfIJ8SgcqHfJUbJrWKBN-Djs,1254
18
- ctranslate2/converters/transformers.py,sha256=Wzih7qmqNpen_EdxKaAoEWLMV4RR16GVsrwrbB9lj8A,114233
18
+ ctranslate2/converters/transformers.py,sha256=VRal3vKSQrAOvcNPwewjVMtgvWskz0KD5bdIrpNrZNA,142380
19
19
  ctranslate2/converters/utils.py,sha256=w7NG39lx-9dOdL57OqKVTdC__opkuP8RACg1TLlUJwM,3817
20
20
  ctranslate2/models/__init__.py,sha256=53p98uemtuvVPz8xK7_LbOhBiUJJu-c-NdmOHJgdXus,497
21
21
  ctranslate2/specs/__init__.py,sha256=9GabtSyczznYqiqUS6XvULi8pQ3_3RNRogXobGP0G80,653
22
- ctranslate2/specs/attention_spec.py,sha256=ios3aZRWbZ8PmcYi9pXIad52lMweqOUgV5ZJbkFOKmE,3218
22
+ ctranslate2/specs/attention_spec.py,sha256=FnaSiQREWQw_cURgsCb9_aIpGOCxyVGTCpIOdd-08v8,3492
23
23
  ctranslate2/specs/common_spec.py,sha256=freTDhQMy5PYofBrij4_FDgrKokMYApWSPIpASZIlJc,1608
24
24
  ctranslate2/specs/model_spec.py,sha256=atCAYzDEIzyJ1TCayFGZVutHqSWa1ww-vbZ0OiIJqh8,25736
25
- ctranslate2/specs/transformer_spec.py,sha256=vBTnBaZ8nslREF8FSJKP6VveyYAvS2_L0h8yqPAhpY0,30124
25
+ ctranslate2/specs/transformer_spec.py,sha256=-GJ0oSjI3ns-Ei_-xXIM_P2GaZxt5Z-g03zJ0m_4ciU,34317
26
26
  ctranslate2/specs/wav2vec2_spec.py,sha256=NITsuOuf2F5bU1-aXit8-WEtWV9fH2Eq7A7857UyYho,2106
27
27
  ctranslate2/specs/wav2vec2bert_spec.py,sha256=UgtsJWC9mMgJ7bn4T_xg1uXK0rqA4-9tT2KMGVgPKnw,3529
28
28
  ctranslate2/specs/whisper_spec.py,sha256=_vm1sc5yOowOJ4iyvcxMXrgt-UcLJrZT8OtPscUXcQQ,2447
29
- ctranslate2-4.6.1.dist-info/METADATA,sha256=80HyCFtLjBdWxYcQtHLdyoLJwYOvGoRPk3r_arkWMsg,10354
30
- ctranslate2-4.6.1.dist-info/WHEEL,sha256=7k6Wcy588iJYe5lf5K095NLg-uoBTnE-T8eHJ92G4_4,101
31
- ctranslate2-4.6.1.dist-info/entry_points.txt,sha256=ZHkojut_TmVRHl0bJIGm2b9wqr98GAJqxN9rlJtQshs,466
32
- ctranslate2-4.6.1.dist-info/top_level.txt,sha256=1hUaWzcFIuSo2BAIUHFA3Osgsu6S1giq0y6Rosv8HOQ,12
33
- ctranslate2-4.6.1.dist-info/RECORD,,
29
+ ctranslate2-4.6.3.dist-info/METADATA,sha256=awoc6t4JSxpv51lmfAG28ZG91FhGQ8DHspyLzLqLo_Q,10839
30
+ ctranslate2-4.6.3.dist-info/WHEEL,sha256=7k6Wcy588iJYe5lf5K095NLg-uoBTnE-T8eHJ92G4_4,101
31
+ ctranslate2-4.6.3.dist-info/entry_points.txt,sha256=ZHkojut_TmVRHl0bJIGm2b9wqr98GAJqxN9rlJtQshs,466
32
+ ctranslate2-4.6.3.dist-info/top_level.txt,sha256=1hUaWzcFIuSo2BAIUHFA3Osgsu6S1giq0y6Rosv8HOQ,12
33
+ ctranslate2-4.6.3.dist-info/RECORD,,