ctranslate2 4.7.0__cp314-cp314-macosx_11_0_arm64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ctranslate2/.dylibs/libctranslate2.4.7.0.dylib +0 -0
- ctranslate2/__init__.py +66 -0
- ctranslate2/_ext.cpython-314-darwin.so +0 -0
- ctranslate2/converters/__init__.py +8 -0
- ctranslate2/converters/converter.py +109 -0
- ctranslate2/converters/eole_ct2.py +353 -0
- ctranslate2/converters/fairseq.py +347 -0
- ctranslate2/converters/marian.py +315 -0
- ctranslate2/converters/openai_gpt2.py +95 -0
- ctranslate2/converters/opennmt_py.py +361 -0
- ctranslate2/converters/opennmt_tf.py +455 -0
- ctranslate2/converters/opus_mt.py +44 -0
- ctranslate2/converters/transformers.py +3721 -0
- ctranslate2/converters/utils.py +127 -0
- ctranslate2/extensions.py +589 -0
- ctranslate2/logging.py +45 -0
- ctranslate2/models/__init__.py +18 -0
- ctranslate2/specs/__init__.py +18 -0
- ctranslate2/specs/attention_spec.py +98 -0
- ctranslate2/specs/common_spec.py +66 -0
- ctranslate2/specs/model_spec.py +767 -0
- ctranslate2/specs/transformer_spec.py +797 -0
- ctranslate2/specs/wav2vec2_spec.py +72 -0
- ctranslate2/specs/wav2vec2bert_spec.py +97 -0
- ctranslate2/specs/whisper_spec.py +77 -0
- ctranslate2/version.py +3 -0
- ctranslate2-4.7.0.dist-info/METADATA +180 -0
- ctranslate2-4.7.0.dist-info/RECORD +31 -0
- ctranslate2-4.7.0.dist-info/WHEEL +6 -0
- ctranslate2-4.7.0.dist-info/entry_points.txt +8 -0
- ctranslate2-4.7.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,72 @@
|
|
|
1
|
+
from typing import List, Optional, Tuple
|
|
2
|
+
|
|
3
|
+
import numpy as np
|
|
4
|
+
|
|
5
|
+
from ctranslate2.specs import common_spec, model_spec, transformer_spec
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class Wav2Vec2Config(model_spec.ModelConfig):
|
|
9
|
+
"""Configuration for the Wav2Vec2 model."""
|
|
10
|
+
|
|
11
|
+
def __init__(self):
|
|
12
|
+
return
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class Wav2Vec2Spec(model_spec.LanguageModelSpec):
|
|
16
|
+
def __init__(
|
|
17
|
+
self,
|
|
18
|
+
feat_layers,
|
|
19
|
+
num_layers,
|
|
20
|
+
num_heads,
|
|
21
|
+
vocab_size,
|
|
22
|
+
return_hidden,
|
|
23
|
+
):
|
|
24
|
+
super().__init__()
|
|
25
|
+
self.vocab_size = np.dtype("int16").type(vocab_size)
|
|
26
|
+
self.encoder = Wav2Vec2EncoderSpec(
|
|
27
|
+
feat_layers,
|
|
28
|
+
num_layers,
|
|
29
|
+
num_heads,
|
|
30
|
+
return_hidden,
|
|
31
|
+
)
|
|
32
|
+
|
|
33
|
+
@property
|
|
34
|
+
def name(self):
|
|
35
|
+
return "Wav2Vec2Spec"
|
|
36
|
+
|
|
37
|
+
@property
|
|
38
|
+
def revision(self):
|
|
39
|
+
return 3
|
|
40
|
+
|
|
41
|
+
def get_default_config(self):
|
|
42
|
+
return Wav2Vec2Config()
|
|
43
|
+
|
|
44
|
+
def get_vocabulary_size(self):
|
|
45
|
+
return int(self.vocab_size.numpy())
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
class Wav2Vec2LayerNormConvLayer(model_spec.LayerSpec):
|
|
49
|
+
def __init__(self):
|
|
50
|
+
self.conv = common_spec.Conv1DSpec()
|
|
51
|
+
self.layer_norm = common_spec.LayerNormSpec()
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
class Wav2Vec2PosEmbedConvLayer(model_spec.LayerSpec):
|
|
55
|
+
def __init__(self):
|
|
56
|
+
self.conv = common_spec.Conv1DSpec()
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
class Wav2Vec2EncoderSpec(model_spec.LayerSpec):
|
|
60
|
+
def __init__(self, feat_layers, num_layers, num_heads, return_hidden):
|
|
61
|
+
self.num_heads = np.dtype("int16").type(num_heads)
|
|
62
|
+
self.feat_layer0 = Wav2Vec2LayerNormConvLayer()
|
|
63
|
+
self.feat_layer = [Wav2Vec2LayerNormConvLayer() for i in range(feat_layers - 1)]
|
|
64
|
+
self.fp_layer_norm = common_spec.LayerNormSpec()
|
|
65
|
+
self.fp_projection = common_spec.LinearSpec()
|
|
66
|
+
self.pos_conv_embed = Wav2Vec2PosEmbedConvLayer()
|
|
67
|
+
self.layer_norm = common_spec.LayerNormSpec()
|
|
68
|
+
self.layer = [
|
|
69
|
+
transformer_spec.TransformerEncoderLayerSpec() for _ in range(num_layers)
|
|
70
|
+
]
|
|
71
|
+
if not return_hidden:
|
|
72
|
+
self.lm_head = common_spec.LinearSpec()
|
|
@@ -0,0 +1,97 @@
|
|
|
1
|
+
import numpy as np
|
|
2
|
+
|
|
3
|
+
from ctranslate2.specs import attention_spec, common_spec, model_spec
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class Wav2Vec2BertConfig(model_spec.ModelConfig):
|
|
7
|
+
"""Configuration for the Wav2Vec2Bert model."""
|
|
8
|
+
|
|
9
|
+
def __init__(self):
|
|
10
|
+
return
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class Wav2Vec2BertSpec(model_spec.LanguageModelSpec):
|
|
14
|
+
def __init__(
|
|
15
|
+
self,
|
|
16
|
+
num_hidden_layers,
|
|
17
|
+
num_adapter_layers,
|
|
18
|
+
vocab_size,
|
|
19
|
+
return_hidden,
|
|
20
|
+
):
|
|
21
|
+
super().__init__()
|
|
22
|
+
self.vocab_size = np.dtype("int16").type(vocab_size)
|
|
23
|
+
self.encoder = Wav2Vec2BertEncoderSpec(
|
|
24
|
+
num_adapter_layers,
|
|
25
|
+
num_hidden_layers,
|
|
26
|
+
return_hidden,
|
|
27
|
+
)
|
|
28
|
+
|
|
29
|
+
@property
|
|
30
|
+
def name(self):
|
|
31
|
+
return "Wav2Vec2BertSpec"
|
|
32
|
+
|
|
33
|
+
@property
|
|
34
|
+
def revision(self):
|
|
35
|
+
return 1
|
|
36
|
+
|
|
37
|
+
def get_default_config(self):
|
|
38
|
+
return Wav2Vec2BertConfig()
|
|
39
|
+
|
|
40
|
+
def get_vocabulary_size(self):
|
|
41
|
+
return int(self.vocab_size.numpy())
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
class Wav2Vec2BertFeedForwardSpec(model_spec.LayerSpec):
|
|
45
|
+
def __init__(self, glu=False, rms_norm=False):
|
|
46
|
+
self.linear_0 = common_spec.LinearSpec()
|
|
47
|
+
self.linear_1 = common_spec.LinearSpec()
|
|
48
|
+
if glu:
|
|
49
|
+
self.linear_0_noact = common_spec.LinearSpec()
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
class EncoderSpec(model_spec.LayerSpec):
|
|
53
|
+
def __init__(self):
|
|
54
|
+
self.enc_ffn1_layer_norm = common_spec.LayerNormSpec()
|
|
55
|
+
self.enc_ffn1 = Wav2Vec2BertFeedForwardSpec()
|
|
56
|
+
self.enc_attn_layer_norm = common_spec.LayerNormSpec()
|
|
57
|
+
self.enc_attn = attention_spec.MultiHeadAttentionSpec(
|
|
58
|
+
self_attention=True,
|
|
59
|
+
relative_asymmetric_position=True,
|
|
60
|
+
)
|
|
61
|
+
del self.enc_attn.layer_norm
|
|
62
|
+
self.enc_conv_layer_norm = common_spec.LayerNormSpec()
|
|
63
|
+
self.enc_conv_pointwise_conv1 = common_spec.Conv1DSpec()
|
|
64
|
+
del self.enc_conv_pointwise_conv1.bias
|
|
65
|
+
self.enc_conv_depthwise_conv = common_spec.Conv1DSpec()
|
|
66
|
+
del self.enc_conv_depthwise_conv.bias
|
|
67
|
+
self.enc_conv_depthwise_layer_norm = common_spec.LayerNormSpec()
|
|
68
|
+
self.enc_conv_pointwise_conv2 = common_spec.Conv1DSpec()
|
|
69
|
+
del self.enc_conv_pointwise_conv2.bias
|
|
70
|
+
self.enc_ffn2_layer_norm = common_spec.LayerNormSpec()
|
|
71
|
+
self.enc_ffn2 = Wav2Vec2BertFeedForwardSpec()
|
|
72
|
+
self.enc_final_layer_norm = common_spec.LayerNormSpec()
|
|
73
|
+
|
|
74
|
+
|
|
75
|
+
class AdapterSpec(model_spec.LayerSpec):
|
|
76
|
+
def __init__(self):
|
|
77
|
+
self.adpt_residual_layer_norm = common_spec.LayerNormSpec()
|
|
78
|
+
self.adpt_residual_conv = common_spec.Conv1DSpec()
|
|
79
|
+
self.adpt_attn_layer_norm = common_spec.LayerNormSpec()
|
|
80
|
+
self.adpt_attn_conv = common_spec.Conv1DSpec()
|
|
81
|
+
self.adpt_attn_layer = attention_spec.MultiHeadAttentionSpec(
|
|
82
|
+
self_attention=True,
|
|
83
|
+
relative_asymmetric_position=False,
|
|
84
|
+
)
|
|
85
|
+
del self.adpt_attn_layer.layer_norm
|
|
86
|
+
self.adpt_ffn_layer_norm = common_spec.LayerNormSpec()
|
|
87
|
+
self.adpt_ffn = Wav2Vec2BertFeedForwardSpec()
|
|
88
|
+
|
|
89
|
+
|
|
90
|
+
class Wav2Vec2BertEncoderSpec(model_spec.LayerSpec):
|
|
91
|
+
def __init__(self, num_hidden_layers, num_adapter_layers, return_hidden):
|
|
92
|
+
self.fp_layer_norm = common_spec.LayerNormSpec()
|
|
93
|
+
self.fp_projection = common_spec.LinearSpec()
|
|
94
|
+
self.encoder_layers = [EncoderSpec() for _ in range(num_hidden_layers)]
|
|
95
|
+
self.adapter_layers = [AdapterSpec() for _ in range(num_adapter_layers)]
|
|
96
|
+
if not return_hidden:
|
|
97
|
+
self.lm_head = common_spec.LinearSpec()
|
|
@@ -0,0 +1,77 @@
|
|
|
1
|
+
from typing import List, Optional, Tuple
|
|
2
|
+
|
|
3
|
+
import numpy as np
|
|
4
|
+
|
|
5
|
+
from ctranslate2.specs import common_spec, model_spec, transformer_spec
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class WhisperConfig(model_spec.ModelConfig):
|
|
9
|
+
"""Configuration for the Whisper model."""
|
|
10
|
+
|
|
11
|
+
def __init__(
|
|
12
|
+
self,
|
|
13
|
+
suppress_ids: Optional[List[int]] = None,
|
|
14
|
+
suppress_ids_begin: Optional[List[int]] = None,
|
|
15
|
+
lang_ids: Optional[List[int]] = None,
|
|
16
|
+
alignment_heads: Optional[List[Tuple[int, int]]] = None,
|
|
17
|
+
):
|
|
18
|
+
super().__init__(
|
|
19
|
+
suppress_ids=suppress_ids,
|
|
20
|
+
suppress_ids_begin=suppress_ids_begin,
|
|
21
|
+
lang_ids=lang_ids,
|
|
22
|
+
alignment_heads=alignment_heads,
|
|
23
|
+
)
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
class WhisperSpec(model_spec.LanguageModelSpec):
|
|
27
|
+
"""Describes a Whisper model."""
|
|
28
|
+
|
|
29
|
+
def __init__(
|
|
30
|
+
self,
|
|
31
|
+
num_encoder_layers,
|
|
32
|
+
num_encoder_heads,
|
|
33
|
+
num_decoder_layers,
|
|
34
|
+
num_decoder_heads,
|
|
35
|
+
):
|
|
36
|
+
"""Initializes the model specification.
|
|
37
|
+
|
|
38
|
+
Args:
|
|
39
|
+
num_encoder_layers: The number of encoder layers.
|
|
40
|
+
num_encoder_heads: The number of encoder attention heads.
|
|
41
|
+
num_decoder_layers: The number of decoder layers.
|
|
42
|
+
num_decoder_heads: The number of decoder attention heads.
|
|
43
|
+
"""
|
|
44
|
+
super().__init__()
|
|
45
|
+
self.encoder = WhisperEncoderSpec(num_encoder_layers, num_encoder_heads)
|
|
46
|
+
self.decoder = transformer_spec.TransformerDecoderSpec(
|
|
47
|
+
num_decoder_layers,
|
|
48
|
+
num_decoder_heads,
|
|
49
|
+
activation=common_spec.Activation.GELU,
|
|
50
|
+
)
|
|
51
|
+
self.decoder.scale_embeddings = False
|
|
52
|
+
|
|
53
|
+
@property
|
|
54
|
+
def name(self):
|
|
55
|
+
return "WhisperSpec"
|
|
56
|
+
|
|
57
|
+
@property
|
|
58
|
+
def revision(self):
|
|
59
|
+
return 3
|
|
60
|
+
|
|
61
|
+
def get_default_config(self):
|
|
62
|
+
return WhisperConfig()
|
|
63
|
+
|
|
64
|
+
def get_vocabulary_size(self):
|
|
65
|
+
return self.decoder.embeddings.weight.shape[0]
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
class WhisperEncoderSpec(model_spec.LayerSpec):
|
|
69
|
+
def __init__(self, num_layers, num_heads):
|
|
70
|
+
self.num_heads = np.dtype("int16").type(num_heads)
|
|
71
|
+
self.conv1 = common_spec.Conv1DSpec()
|
|
72
|
+
self.conv2 = common_spec.Conv1DSpec()
|
|
73
|
+
self.position_encodings = transformer_spec.PositionEncoderSpec()
|
|
74
|
+
self.layer_norm = common_spec.LayerNormSpec()
|
|
75
|
+
self.layer = [
|
|
76
|
+
transformer_spec.TransformerEncoderLayerSpec() for _ in range(num_layers)
|
|
77
|
+
]
|
ctranslate2/version.py
ADDED
|
@@ -0,0 +1,180 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: ctranslate2
|
|
3
|
+
Version: 4.7.0
|
|
4
|
+
Summary: Fast inference engine for Transformer models
|
|
5
|
+
Home-page: https://opennmt.net
|
|
6
|
+
Author: OpenNMT
|
|
7
|
+
License: MIT
|
|
8
|
+
Project-URL: Documentation, https://opennmt.net/CTranslate2
|
|
9
|
+
Project-URL: Forum, https://forum.opennmt.net
|
|
10
|
+
Project-URL: Gitter, https://gitter.im/OpenNMT/CTranslate2
|
|
11
|
+
Project-URL: Source, https://github.com/OpenNMT/CTranslate2
|
|
12
|
+
Keywords: opennmt nmt neural machine translation cuda mkl inference quantization
|
|
13
|
+
Classifier: Development Status :: 5 - Production/Stable
|
|
14
|
+
Classifier: Environment :: GPU :: NVIDIA CUDA :: 12 :: 12.4
|
|
15
|
+
Classifier: Intended Audience :: Developers
|
|
16
|
+
Classifier: Intended Audience :: Science/Research
|
|
17
|
+
Classifier: Programming Language :: Python :: 3
|
|
18
|
+
Classifier: Programming Language :: Python :: 3 :: Only
|
|
19
|
+
Classifier: Programming Language :: Python :: 3.9
|
|
20
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
21
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
22
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
23
|
+
Classifier: Programming Language :: Python :: 3.13
|
|
24
|
+
Classifier: Programming Language :: Python :: 3.14
|
|
25
|
+
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
|
26
|
+
Requires-Python: >=3.9
|
|
27
|
+
Description-Content-Type: text/markdown
|
|
28
|
+
Requires-Dist: setuptools
|
|
29
|
+
Requires-Dist: numpy
|
|
30
|
+
Requires-Dist: pyyaml<7,>=5.3
|
|
31
|
+
Dynamic: author
|
|
32
|
+
Dynamic: classifier
|
|
33
|
+
Dynamic: description
|
|
34
|
+
Dynamic: description-content-type
|
|
35
|
+
Dynamic: home-page
|
|
36
|
+
Dynamic: keywords
|
|
37
|
+
Dynamic: license
|
|
38
|
+
Dynamic: project-url
|
|
39
|
+
Dynamic: requires-dist
|
|
40
|
+
Dynamic: requires-python
|
|
41
|
+
Dynamic: summary
|
|
42
|
+
|
|
43
|
+
[](https://github.com/OpenNMT/CTranslate2/actions?query=workflow%3ACI) [](https://badge.fury.io/py/ctranslate2) [](https://opennmt.net/CTranslate2/) [](https://gitter.im/OpenNMT/CTranslate2?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge) [](https://forum.opennmt.net/)
|
|
44
|
+
|
|
45
|
+
# CTranslate2
|
|
46
|
+
|
|
47
|
+
CTranslate2 is a C++ and Python library for efficient inference with Transformer models.
|
|
48
|
+
|
|
49
|
+
The project implements a custom runtime that applies many performance optimization techniques such as weights quantization, layers fusion, batch reordering, etc., to [accelerate and reduce the memory usage](#benchmarks) of Transformer models on CPU and GPU.
|
|
50
|
+
|
|
51
|
+
The following model types are currently supported:
|
|
52
|
+
|
|
53
|
+
* Encoder-decoder models: Transformer base/big, M2M-100, NLLB, BART, mBART, Pegasus, T5, Whisper T5Gemma
|
|
54
|
+
* Decoder-only models: GPT-2, GPT-J, GPT-NeoX, OPT, BLOOM, MPT, Llama, Mistral, Gemma, CodeGen, GPTBigCode, Falcon, Qwen2
|
|
55
|
+
* Encoder-only models: BERT, DistilBERT, XLM-RoBERTa
|
|
56
|
+
|
|
57
|
+
Compatible models should be first converted into an optimized model format. The library includes converters for multiple frameworks:
|
|
58
|
+
|
|
59
|
+
* [OpenNMT-py](https://opennmt.net/CTranslate2/guides/opennmt_py.html)
|
|
60
|
+
* [OpenNMT-tf](https://opennmt.net/CTranslate2/guides/opennmt_tf.html)
|
|
61
|
+
* [Fairseq](https://opennmt.net/CTranslate2/guides/fairseq.html)
|
|
62
|
+
* [Marian](https://opennmt.net/CTranslate2/guides/marian.html)
|
|
63
|
+
* [OPUS-MT](https://opennmt.net/CTranslate2/guides/opus_mt.html)
|
|
64
|
+
* [Transformers](https://opennmt.net/CTranslate2/guides/transformers.html)
|
|
65
|
+
|
|
66
|
+
The project is production-oriented and comes with [backward compatibility guarantees](https://opennmt.net/CTranslate2/versioning.html), but it also includes experimental features related to model compression and inference acceleration.
|
|
67
|
+
|
|
68
|
+
## Key features
|
|
69
|
+
|
|
70
|
+
* **Fast and efficient execution on CPU and GPU**<br/>The execution [is significantly faster and requires less resources](#benchmarks) than general-purpose deep learning frameworks on supported models and tasks thanks to many advanced optimizations: layer fusion, padding removal, batch reordering, in-place operations, caching mechanism, etc.
|
|
71
|
+
* **Quantization and reduced precision**<br/>The model serialization and computation support weights with [reduced precision](https://opennmt.net/CTranslate2/quantization.html): 16-bit floating points (FP16), 16-bit brain floating points (BF16), 16-bit integers (INT16), 8-bit integers (INT8) and AWQ quantization (INT4).
|
|
72
|
+
* **Multiple CPU architectures support**<br/>The project supports x86-64 and AArch64/ARM64 processors and integrates multiple backends that are optimized for these platforms: [Intel MKL](https://software.intel.com/content/www/us/en/develop/tools/oneapi/components/onemkl.html), [oneDNN](https://github.com/oneapi-src/oneDNN), [OpenBLAS](https://www.openblas.net/), [Ruy](https://github.com/google/ruy), and [Apple Accelerate](https://developer.apple.com/documentation/accelerate).
|
|
73
|
+
* **Automatic CPU detection and code dispatch**<br/>One binary can include multiple backends (e.g. Intel MKL and oneDNN) and instruction set architectures (e.g. AVX, AVX2) that are automatically selected at runtime based on the CPU information.
|
|
74
|
+
* **Parallel and asynchronous execution**<br/>Multiple batches can be processed in parallel and asynchronously using multiple GPUs or CPU cores.
|
|
75
|
+
* **Dynamic memory usage**<br/>The memory usage changes dynamically depending on the request size while still meeting performance requirements thanks to caching allocators on both CPU and GPU.
|
|
76
|
+
* **Lightweight on disk**<br/>Quantization can make the models 4 times smaller on disk with minimal accuracy loss.
|
|
77
|
+
* **Simple integration**<br/>The project has few dependencies and exposes simple APIs in [Python](https://opennmt.net/CTranslate2/python/overview.html) and C++ to cover most integration needs.
|
|
78
|
+
* **Configurable and interactive decoding**<br/>[Advanced decoding features](https://opennmt.net/CTranslate2/decoding.html) allow autocompleting a partial sequence and returning alternatives at a specific location in the sequence.
|
|
79
|
+
* **Support tensor parallelism for distributed inference**<br/>Very large model can be split into multiple GPUs. Following this [documentation](docs/parallel.md#model-and-tensor-parallelism) to set up the required environment.
|
|
80
|
+
|
|
81
|
+
Some of these features are difficult to achieve with standard deep learning frameworks and are the motivation for this project.
|
|
82
|
+
|
|
83
|
+
## Installation and usage
|
|
84
|
+
|
|
85
|
+
CTranslate2 can be installed with pip:
|
|
86
|
+
|
|
87
|
+
```bash
|
|
88
|
+
pip install ctranslate2
|
|
89
|
+
```
|
|
90
|
+
|
|
91
|
+
The Python module is used to convert models and can translate or generate text with few lines of code:
|
|
92
|
+
|
|
93
|
+
```python
|
|
94
|
+
translator = ctranslate2.Translator(translation_model_path)
|
|
95
|
+
translator.translate_batch(tokens)
|
|
96
|
+
|
|
97
|
+
generator = ctranslate2.Generator(generation_model_path)
|
|
98
|
+
generator.generate_batch(start_tokens)
|
|
99
|
+
```
|
|
100
|
+
|
|
101
|
+
See the [documentation](https://opennmt.net/CTranslate2) for more information and examples.
|
|
102
|
+
|
|
103
|
+
If you have an AMD ROCm GPU, we provide specific Python wheels on the [releases page](https://github.com/OpenNMT/CTranslate2/releases/).
|
|
104
|
+
|
|
105
|
+
## Benchmarks
|
|
106
|
+
|
|
107
|
+
We translate the En->De test set *newstest2014* with multiple models:
|
|
108
|
+
|
|
109
|
+
* [OpenNMT-tf WMT14](https://opennmt.net/Models-tf/#translation): a base Transformer trained with OpenNMT-tf on the WMT14 dataset (4.5M lines)
|
|
110
|
+
* [OpenNMT-py WMT14](https://opennmt.net/Models-py/#translation): a base Transformer trained with OpenNMT-py on the WMT14 dataset (4.5M lines)
|
|
111
|
+
* [OPUS-MT](https://github.com/Helsinki-NLP/OPUS-MT-train/tree/master/models/en-de#opus-2020-02-26zip): a base Transformer trained with Marian on all OPUS data available on 2020-02-26 (81.9M lines)
|
|
112
|
+
|
|
113
|
+
The benchmark reports the number of target tokens generated per second (higher is better). The results are aggregated over multiple runs. See the [benchmark scripts](tools/benchmark) for more details and reproduce these numbers.
|
|
114
|
+
|
|
115
|
+
**Please note that the results presented below are only valid for the configuration used during this benchmark: absolute and relative performance may change with different settings.**
|
|
116
|
+
|
|
117
|
+
#### CPU
|
|
118
|
+
|
|
119
|
+
| | Tokens per second | Max. memory | BLEU |
|
|
120
|
+
| --- | --- | --- | --- |
|
|
121
|
+
| **OpenNMT-tf WMT14 model** | | | |
|
|
122
|
+
| OpenNMT-tf 2.31.0 (with TensorFlow 2.11.0) | 209.2 | 2653MB | 26.93 |
|
|
123
|
+
| **OpenNMT-py WMT14 model** | | | |
|
|
124
|
+
| OpenNMT-py 3.0.4 (with PyTorch 1.13.1) | 275.8 | 2012MB | 26.77 |
|
|
125
|
+
| - int8 | 323.3 | 1359MB | 26.72 |
|
|
126
|
+
| CTranslate2 3.6.0 | 658.8 | 849MB | 26.77 |
|
|
127
|
+
| - int16 | 733.0 | 672MB | 26.82 |
|
|
128
|
+
| - int8 | 860.2 | 529MB | 26.78 |
|
|
129
|
+
| - int8 + vmap | 1126.2 | 598MB | 26.64 |
|
|
130
|
+
| **OPUS-MT model** | | | |
|
|
131
|
+
| Transformers 4.26.1 (with PyTorch 1.13.1) | 147.3 | 2332MB | 27.90 |
|
|
132
|
+
| Marian 1.11.0 | 344.5 | 7605MB | 27.93 |
|
|
133
|
+
| - int16 | 330.2 | 5901MB | 27.65 |
|
|
134
|
+
| - int8 | 355.8 | 4763MB | 27.27 |
|
|
135
|
+
| CTranslate2 3.6.0 | 525.0 | 721MB | 27.92 |
|
|
136
|
+
| - int16 | 596.1 | 660MB | 27.53 |
|
|
137
|
+
| - int8 | 696.1 | 516MB | 27.65 |
|
|
138
|
+
|
|
139
|
+
Executed with 4 threads on a [*c5.2xlarge*](https://aws.amazon.com/ec2/instance-types/c5/) Amazon EC2 instance equipped with an Intel(R) Xeon(R) Platinum 8275CL CPU.
|
|
140
|
+
|
|
141
|
+
#### GPU
|
|
142
|
+
|
|
143
|
+
| | Tokens per second | Max. GPU memory | Max. CPU memory | BLEU |
|
|
144
|
+
| --- | --- | --- | --- | --- |
|
|
145
|
+
| **OpenNMT-tf WMT14 model** | | | | |
|
|
146
|
+
| OpenNMT-tf 2.31.0 (with TensorFlow 2.11.0) | 1483.5 | 3031MB | 3122MB | 26.94 |
|
|
147
|
+
| **OpenNMT-py WMT14 model** | | | | |
|
|
148
|
+
| OpenNMT-py 3.0.4 (with PyTorch 1.13.1) | 1795.2 | 2973MB | 3099MB | 26.77 |
|
|
149
|
+
| FasterTransformer 5.3 | 6979.0 | 2402MB | 1131MB | 26.77 |
|
|
150
|
+
| - float16 | 8592.5 | 1360MB | 1135MB | 26.80 |
|
|
151
|
+
| CTranslate2 3.6.0 | 6634.7 | 1261MB | 953MB | 26.77 |
|
|
152
|
+
| - int8 | 8567.2 | 1005MB | 807MB | 26.85 |
|
|
153
|
+
| - float16 | 10990.7 | 941MB | 807MB | 26.77 |
|
|
154
|
+
| - int8 + float16 | 8725.4 | 813MB | 800MB | 26.83 |
|
|
155
|
+
| **OPUS-MT model** | | | | |
|
|
156
|
+
| Transformers 4.26.1 (with PyTorch 1.13.1) | 1022.9 | 4097MB | 2109MB | 27.90 |
|
|
157
|
+
| Marian 1.11.0 | 3241.0 | 3381MB | 2156MB | 27.92 |
|
|
158
|
+
| - float16 | 3962.4 | 3239MB | 1976MB | 27.94 |
|
|
159
|
+
| CTranslate2 3.6.0 | 5876.4 | 1197MB | 754MB | 27.92 |
|
|
160
|
+
| - int8 | 7521.9 | 1005MB | 792MB | 27.79 |
|
|
161
|
+
| - float16 | 9296.7 | 909MB | 814MB | 27.90 |
|
|
162
|
+
| - int8 + float16 | 8362.7 | 813MB | 766MB | 27.90 |
|
|
163
|
+
|
|
164
|
+
Executed with CUDA 11 on a [*g5.xlarge*](https://aws.amazon.com/ec2/instance-types/g5/) Amazon EC2 instance equipped with a NVIDIA A10G GPU (driver version: 510.47.03).
|
|
165
|
+
|
|
166
|
+
## Contributing
|
|
167
|
+
|
|
168
|
+
CTranslate2 is a community-driven project. We welcome contributions of all kinds:
|
|
169
|
+
* **New Model Support:** Help us implement more Transformer architectures.
|
|
170
|
+
* **Performance:** Propose optimizations for CPU or GPU kernels.
|
|
171
|
+
* **Bug Reports:** Open an issue if you find something not working as expected.
|
|
172
|
+
* **Documentation:** Improve our guides or add new examples.
|
|
173
|
+
|
|
174
|
+
Check out our [Contributing Guide](CONTRIBUTING.md) to learn how to set up your development environment.
|
|
175
|
+
|
|
176
|
+
## Additional resources
|
|
177
|
+
|
|
178
|
+
* [Documentation](https://opennmt.net/CTranslate2)
|
|
179
|
+
* [Forum](https://forum.opennmt.net)
|
|
180
|
+
* [Gitter](https://gitter.im/OpenNMT/CTranslate2)
|
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
ctranslate2-4.7.0.dist-info/RECORD,,
|
|
2
|
+
ctranslate2-4.7.0.dist-info/WHEEL,sha256=ImwZCSpdf_dQ4UdmgVK5YPBIOjNiy6ra6cv1L_cDIHc,137
|
|
3
|
+
ctranslate2-4.7.0.dist-info/entry_points.txt,sha256=ZHkojut_TmVRHl0bJIGm2b9wqr98GAJqxN9rlJtQshs,466
|
|
4
|
+
ctranslate2-4.7.0.dist-info/top_level.txt,sha256=1hUaWzcFIuSo2BAIUHFA3Osgsu6S1giq0y6Rosv8HOQ,12
|
|
5
|
+
ctranslate2-4.7.0.dist-info/METADATA,sha256=81pYY_hsu3LqOjqnjHMtfVROr8jx47rPgHKjtWqN3jI,10799
|
|
6
|
+
ctranslate2/logging.py,sha256=xmx2LlryOhAPbMmJwMAr-JuXZZogR9j62ONNZZ2fFOQ,1176
|
|
7
|
+
ctranslate2/version.py,sha256=jcqYcCGXta88Qvcim3RIF533IUBl41Zc-OeiDjBRb7M,50
|
|
8
|
+
ctranslate2/__init__.py,sha256=VqUrrLXQWliJjRs0l8LUlSkeq-BApxQm4JMiqe4OYXM,1815
|
|
9
|
+
ctranslate2/extensions.py,sha256=aXg_dmPPvTTfnI1FMBg0lYNm0tZVAVtEqW1UpbqGR0U,21351
|
|
10
|
+
ctranslate2/_ext.cpython-314-darwin.so,sha256=zXgBnK7OnXW84NBHMbrYFnci9c26cj5U_b5Ue35-6oI,1626256
|
|
11
|
+
ctranslate2/specs/model_spec.py,sha256=Vl3Edm8-Oej3i6mUVE6Rda46e553GSi2A_U5UomnU8s,24969
|
|
12
|
+
ctranslate2/specs/transformer_spec.py,sha256=GcxgqdYSWfZ0eEr2dIM0QrcmPN8wSblJ2Eubh9KDbKU,33508
|
|
13
|
+
ctranslate2/specs/wav2vec2bert_spec.py,sha256=NKTC8nNdxMeTBW0Y_EKUNrDpKhPwL67P1l3dwmT-ayk,3432
|
|
14
|
+
ctranslate2/specs/common_spec.py,sha256=7UuQ3YsO-fLAhwFuWl41VbNLQLzNlYTfjrZSgF_4gDk,1542
|
|
15
|
+
ctranslate2/specs/__init__.py,sha256=XE_GwmsYNpaomZwaYJgbJxsXgdKuvGN73yaf3eBCmgs,635
|
|
16
|
+
ctranslate2/specs/attention_spec.py,sha256=Wmibi6rRZOCQjgWLgcQLAxdaxqWTlB4zU-IdkwXu7tk,3394
|
|
17
|
+
ctranslate2/specs/wav2vec2_spec.py,sha256=D6D-N_fbsvOEVe802jS03pd6pt5bOi71yg_1Mfd_hjc,2034
|
|
18
|
+
ctranslate2/specs/whisper_spec.py,sha256=vxxrAsHYGItYC2K9Yw2geFEUbXmeGtcqFYXDMqca0lA,2370
|
|
19
|
+
ctranslate2/converters/opennmt_tf.py,sha256=gie0t84R9vUXH8Q1y-vu6hJBU8ljiOPWdT8xB9LXgxM,15767
|
|
20
|
+
ctranslate2/converters/fairseq.py,sha256=bmv_RujI7ElOqZh6U3FY0FNKDbMBJwy2B7gUKDCXkss,12475
|
|
21
|
+
ctranslate2/converters/transformers.py,sha256=EU_U6AbSBEmeD-f5WLfZEU_Mm8EcBDpBIaVxhjHy3D4,137798
|
|
22
|
+
ctranslate2/converters/opus_mt.py,sha256=XCAb3X4afRCtaZErb7bvIvJBmfVgC9NWa_IF5tOBSmQ,1210
|
|
23
|
+
ctranslate2/converters/converter.py,sha256=xE3CYPDvNzUneBV_NuPJ59SxeNKRUcyBnS_MsxOD3IM,3492
|
|
24
|
+
ctranslate2/converters/__init__.py,sha256=T_yJMns_XXV6pJy3SWpTtgrvbT0owRnxcMn_7i5qop4,499
|
|
25
|
+
ctranslate2/converters/marian.py,sha256=i5piS_4zSM9Fmp12FwIHuot0pM-VPhq2MNxut-4Ua80,10959
|
|
26
|
+
ctranslate2/converters/utils.py,sha256=u51jg3U-zQRMMhMOp-KKHsHWm6tQQ4TArlXiRvLpEyQ,3690
|
|
27
|
+
ctranslate2/converters/eole_ct2.py,sha256=er0RpDplP4-IZ5NRawh5d6h7mNox6LuxwLSMSRYVIJA,12270
|
|
28
|
+
ctranslate2/converters/opennmt_py.py,sha256=uyQWOpEmqvluyOERBwtySyfGqlyMYzALXZ55L7OYIV8,12866
|
|
29
|
+
ctranslate2/converters/openai_gpt2.py,sha256=Rmvxy6Uqa5f9YW9RQJfF4XCeNbXh-Aw61p5N_E139hM,3209
|
|
30
|
+
ctranslate2/models/__init__.py,sha256=ssMbhmQ4v0C5AwkYopmYN_U9rtTFJvNsdlecAcBmHek,479
|
|
31
|
+
ctranslate2/.dylibs/libctranslate2.4.7.0.dylib,sha256=H2QkwbWNlK46_trrrfpjmY8AK1I7V1rgjmCo5Cfo2Io,3070784
|
|
@@ -0,0 +1,8 @@
|
|
|
1
|
+
[console_scripts]
|
|
2
|
+
ct2-fairseq-converter = ctranslate2.converters.fairseq:main
|
|
3
|
+
ct2-marian-converter = ctranslate2.converters.marian:main
|
|
4
|
+
ct2-openai-gpt2-converter = ctranslate2.converters.openai_gpt2:main
|
|
5
|
+
ct2-opennmt-py-converter = ctranslate2.converters.opennmt_py:main
|
|
6
|
+
ct2-opennmt-tf-converter = ctranslate2.converters.opennmt_tf:main
|
|
7
|
+
ct2-opus-mt-converter = ctranslate2.converters.opus_mt:main
|
|
8
|
+
ct2-transformers-converter = ctranslate2.converters.transformers:main
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
ctranslate2
|