langextract-vllm 0.1.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- langextract_vllm-0.1.0/.gitignore +157 -0
- langextract_vllm-0.1.0/LICENSE +21 -0
- langextract_vllm-0.1.0/PKG-INFO +46 -0
- langextract_vllm-0.1.0/README.md +0 -0
- langextract_vllm-0.1.0/langextract_vllm/__init__.py +6 -0
- langextract_vllm-0.1.0/langextract_vllm/provider.py +185 -0
- langextract_vllm-0.1.0/pyproject.toml +162 -0
@@ -0,0 +1,157 @@
|
|
1
|
+
# Byte-compiled / optimized / DLL files
|
2
|
+
__pycache__/
|
3
|
+
*.py[cod]
|
4
|
+
*$py.class
|
5
|
+
|
6
|
+
# C extensions
|
7
|
+
*.so
|
8
|
+
|
9
|
+
# Distribution / packaging
|
10
|
+
.Python
|
11
|
+
build/
|
12
|
+
develop-eggs/
|
13
|
+
dist/
|
14
|
+
downloads/
|
15
|
+
eggs/
|
16
|
+
.eggs/
|
17
|
+
lib/
|
18
|
+
lib64/
|
19
|
+
parts/
|
20
|
+
sdist/
|
21
|
+
var/
|
22
|
+
wheels/
|
23
|
+
pip-wheel-metadata/
|
24
|
+
share/python-wheels/
|
25
|
+
*.egg-info/
|
26
|
+
.installed.cfg
|
27
|
+
*.egg
|
28
|
+
MANIFEST
|
29
|
+
|
30
|
+
# PyInstaller
|
31
|
+
# Usually these files are written by a python script from a template
|
32
|
+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
33
|
+
*.manifest
|
34
|
+
*.spec
|
35
|
+
|
36
|
+
# Installer logs
|
37
|
+
pip-log.txt
|
38
|
+
pip-delete-this-directory.txt
|
39
|
+
|
40
|
+
# Unit test / coverage reports
|
41
|
+
htmlcov/
|
42
|
+
.tox/
|
43
|
+
.nox/
|
44
|
+
.coverage
|
45
|
+
.coverage.*
|
46
|
+
.cache
|
47
|
+
nosetests.xml
|
48
|
+
coverage.xml
|
49
|
+
*.cover
|
50
|
+
*.py,cover
|
51
|
+
.hypothesis/
|
52
|
+
.pytest_cache/
|
53
|
+
|
54
|
+
# Translations
|
55
|
+
*.mo
|
56
|
+
*.pot
|
57
|
+
|
58
|
+
# Django stuff:
|
59
|
+
*.log
|
60
|
+
local_settings.py
|
61
|
+
db.sqlite3
|
62
|
+
db.sqlite3-journal
|
63
|
+
|
64
|
+
# Flask stuff:
|
65
|
+
instance/
|
66
|
+
.webassets-cache
|
67
|
+
|
68
|
+
# Scrapy stuff:
|
69
|
+
.scrapy
|
70
|
+
|
71
|
+
# Sphinx documentation
|
72
|
+
docs/_build/
|
73
|
+
|
74
|
+
# PyBuilder
|
75
|
+
target/
|
76
|
+
|
77
|
+
# Jupyter Notebook
|
78
|
+
.ipynb_checkpoints
|
79
|
+
|
80
|
+
# IPython
|
81
|
+
profile_default/
|
82
|
+
ipython_config.py
|
83
|
+
|
84
|
+
# pyenv
|
85
|
+
.python-version
|
86
|
+
|
87
|
+
# pipenv
|
88
|
+
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
89
|
+
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
90
|
+
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
91
|
+
# install all needed dependencies.
|
92
|
+
#Pipfile.lock
|
93
|
+
|
94
|
+
# PEP 582; used by e.g. github.com/David-OConnor/pyflow
|
95
|
+
__pypackages__/
|
96
|
+
|
97
|
+
# Celery stuff
|
98
|
+
celerybeat-schedule
|
99
|
+
celerybeat.pid
|
100
|
+
|
101
|
+
# SageMath parsed files
|
102
|
+
*.sage.py
|
103
|
+
|
104
|
+
# Environments
|
105
|
+
.env
|
106
|
+
.venv
|
107
|
+
env/
|
108
|
+
venv/
|
109
|
+
ENV/
|
110
|
+
env.bak/
|
111
|
+
venv.bak/
|
112
|
+
|
113
|
+
# Spyder project settings
|
114
|
+
.spyderproject
|
115
|
+
.spyproject
|
116
|
+
|
117
|
+
# Rope project settings
|
118
|
+
.ropeproject
|
119
|
+
|
120
|
+
# mkdocs documentation
|
121
|
+
/site
|
122
|
+
|
123
|
+
# mypy
|
124
|
+
.mypy_cache/
|
125
|
+
.dmypy.json
|
126
|
+
dmypy.json
|
127
|
+
|
128
|
+
# Pyre type checker
|
129
|
+
.pyre/
|
130
|
+
|
131
|
+
# IDE
|
132
|
+
.vscode/
|
133
|
+
.idea/
|
134
|
+
*.swp
|
135
|
+
*.swo
|
136
|
+
*~
|
137
|
+
|
138
|
+
# macOS
|
139
|
+
.DS_Store
|
140
|
+
|
141
|
+
# Model files (optional - uncomment if you don't want to track model files)
|
142
|
+
# *.bin
|
143
|
+
# *.safetensors
|
144
|
+
# *.gguf
|
145
|
+
# *.pt
|
146
|
+
# *.pth
|
147
|
+
|
148
|
+
# Temporary files
|
149
|
+
tmp/
|
150
|
+
temp/
|
151
|
+
.tmp/
|
152
|
+
|
153
|
+
# Log files
|
154
|
+
*.log
|
155
|
+
|
156
|
+
# uv lock file (optional - some projects prefer to include it)
|
157
|
+
uv.lock
|
@@ -0,0 +1,21 @@
|
|
1
|
+
MIT License
|
2
|
+
|
3
|
+
Copyright (c) 2025 Fajar Garnadi
|
4
|
+
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
7
|
+
in the Software without restriction, including without limitation the rights
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
10
|
+
furnished to do so, subject to the following conditions:
|
11
|
+
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
13
|
+
copies or substantial portions of the Software.
|
14
|
+
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
21
|
+
SOFTWARE.
|
@@ -0,0 +1,46 @@
|
|
1
|
+
Metadata-Version: 2.4
|
2
|
+
Name: langextract-vllm
|
3
|
+
Version: 0.1.0
|
4
|
+
Summary: LangExtract provider plugin for VLLM - 高性能大语言模型推理插件
|
5
|
+
Project-URL: Homepage, https://github.com/langextract/langextract-vllm
|
6
|
+
Project-URL: Repository, https://github.com/langextract/langextract-vllm
|
7
|
+
Project-URL: Documentation, https://langextract-vllm.readthedocs.io
|
8
|
+
Project-URL: Bug Tracker, https://github.com/langextract/langextract-vllm/issues
|
9
|
+
Project-URL: Changelog, https://github.com/langextract/langextract-vllm/blob/main/CHANGELOG.md
|
10
|
+
Author-email: LangExtract Team <contact@langextract.dev>
|
11
|
+
Maintainer-email: LangExtract Team <contact@langextract.dev>
|
12
|
+
License: MIT
|
13
|
+
License-File: LICENSE
|
14
|
+
Keywords: extraction,gpu,inference,langextract,llm,nlp,pytorch,vllm
|
15
|
+
Classifier: Development Status :: 4 - Beta
|
16
|
+
Classifier: Intended Audience :: Developers
|
17
|
+
Classifier: Intended Audience :: Science/Research
|
18
|
+
Classifier: License :: OSI Approved :: MIT License
|
19
|
+
Classifier: Operating System :: OS Independent
|
20
|
+
Classifier: Programming Language :: Python :: 3
|
21
|
+
Classifier: Programming Language :: Python :: 3.10
|
22
|
+
Classifier: Programming Language :: Python :: 3.11
|
23
|
+
Classifier: Programming Language :: Python :: 3.12
|
24
|
+
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
25
|
+
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
26
|
+
Classifier: Topic :: Text Processing :: Linguistic
|
27
|
+
Requires-Python: >=3.10
|
28
|
+
Requires-Dist: langextract>=1.0.0
|
29
|
+
Requires-Dist: torch>=2.0.0
|
30
|
+
Requires-Dist: transformers>=4.30.0
|
31
|
+
Requires-Dist: vllm>=0.2.0
|
32
|
+
Provides-Extra: dev
|
33
|
+
Requires-Dist: black>=23.0.0; extra == 'dev'
|
34
|
+
Requires-Dist: flake8>=6.0.0; extra == 'dev'
|
35
|
+
Requires-Dist: isort>=5.12.0; extra == 'dev'
|
36
|
+
Requires-Dist: mypy>=1.0.0; extra == 'dev'
|
37
|
+
Requires-Dist: pytest-cov>=4.0.0; extra == 'dev'
|
38
|
+
Requires-Dist: pytest>=7.0.0; extra == 'dev'
|
39
|
+
Provides-Extra: docs
|
40
|
+
Requires-Dist: myst-parser>=1.0.0; extra == 'docs'
|
41
|
+
Requires-Dist: sphinx-rtd-theme>=1.2.0; extra == 'docs'
|
42
|
+
Requires-Dist: sphinx>=5.0.0; extra == 'docs'
|
43
|
+
Provides-Extra: test
|
44
|
+
Requires-Dist: pytest-cov>=4.0.0; extra == 'test'
|
45
|
+
Requires-Dist: pytest-mock>=3.10.0; extra == 'test'
|
46
|
+
Requires-Dist: pytest>=7.0.0; extra == 'test'
|
File without changes
|
@@ -0,0 +1,185 @@
|
|
1
|
+
"""Provider implementation for vLLM (Direct Library Integration Only)."""
|
2
|
+
|
3
|
+
from typing import List, Generator, Dict, Any
|
4
|
+
|
5
|
+
import langextract as lx
|
6
|
+
from langextract.core import base_model, exceptions, types
|
7
|
+
|
8
|
+
VLLM_PATTERNS = (
|
9
|
+
r"^vllm:",
|
10
|
+
)
|
11
|
+
|
12
|
+
|
13
|
+
@lx.providers.registry.register(*VLLM_PATTERNS, priority=20)
|
14
|
+
class VLLMLanguageModel(base_model.BaseLanguageModel):
|
15
|
+
"""LangExtract provider for direct vLLM library integration.
|
16
|
+
|
17
|
+
This provider handles model IDs matching: ['^vllm:']
|
18
|
+
Uses vLLM library directly for optimal performance.
|
19
|
+
"""
|
20
|
+
|
21
|
+
def __init__(
|
22
|
+
self,
|
23
|
+
model_id: str,
|
24
|
+
max_workers: int = 1,
|
25
|
+
temperature: float = 0.7,
|
26
|
+
top_p: float = 0.9,
|
27
|
+
max_tokens: int = 1024,
|
28
|
+
gpu_memory_utilization: float = 0.7, # 进一步降低GPU内存利用率
|
29
|
+
max_model_len: int = 1024, # 进一步减少最大序列长度
|
30
|
+
**kwargs,
|
31
|
+
):
|
32
|
+
"""Initialize the direct vLLM provider.
|
33
|
+
|
34
|
+
Args:
|
35
|
+
model_id: Model identifier in format "vllm:model_name_or_path"
|
36
|
+
max_workers: Maximum parallel workers (affects batch size)
|
37
|
+
temperature: Sampling temperature
|
38
|
+
top_p: Top-p sampling parameter
|
39
|
+
max_tokens: Maximum tokens to generate
|
40
|
+
gpu_memory_utilization: GPU memory utilization ratio (0.0-1.0)
|
41
|
+
max_model_len: Maximum sequence length (reduces KV cache memory)
|
42
|
+
**kwargs: Additional vLLM initialization parameters
|
43
|
+
"""
|
44
|
+
try:
|
45
|
+
from vllm import LLM, SamplingParams
|
46
|
+
except ImportError:
|
47
|
+
raise exceptions.InferenceConfigError(
|
48
|
+
"vLLM library not installed. Please install with: pip install vllm"
|
49
|
+
)
|
50
|
+
|
51
|
+
super().__init__()
|
52
|
+
|
53
|
+
self.model_id = model_id
|
54
|
+
self.max_workers = max_workers
|
55
|
+
self.gpu_memory_utilization = gpu_memory_utilization
|
56
|
+
self.max_model_len = max_model_len
|
57
|
+
|
58
|
+
# Extract actual model name from identifier
|
59
|
+
if model_id.startswith("vllm:"):
|
60
|
+
actual_model_id = model_id.split(":", 1)[1]
|
61
|
+
else:
|
62
|
+
actual_model_id = model_id
|
63
|
+
|
64
|
+
# Separate vLLM engine kwargs from sampling kwargs
|
65
|
+
engine_kwargs = {
|
66
|
+
k: v for k, v in kwargs.items()
|
67
|
+
if k not in ['temperature', 'top_p', 'max_tokens', 'gpu_memory_utilization', 'max_model_len']
|
68
|
+
}
|
69
|
+
|
70
|
+
# Initialize vLLM engine with memory control
|
71
|
+
# Disable torch.compile to avoid PY_SSIZE_T_CLEAN error
|
72
|
+
engine_kwargs.setdefault('enforce_eager', True)
|
73
|
+
engine_kwargs.setdefault('disable_custom_all_reduce', True)
|
74
|
+
|
75
|
+
self._vllm_engine = LLM(
|
76
|
+
model=actual_model_id,
|
77
|
+
gpu_memory_utilization=gpu_memory_utilization,
|
78
|
+
max_model_len=max_model_len,
|
79
|
+
**engine_kwargs
|
80
|
+
)
|
81
|
+
|
82
|
+
# Configure sampling parameters
|
83
|
+
self._sampling_params = SamplingParams(
|
84
|
+
temperature=temperature,
|
85
|
+
top_p=top_p,
|
86
|
+
max_tokens=max_tokens,
|
87
|
+
)
|
88
|
+
|
89
|
+
def _process_batch(self, prompts: List[str]) -> List[types.ScoredOutput]:
|
90
|
+
"""Process batch using direct vLLM engine."""
|
91
|
+
try:
|
92
|
+
outputs = self._vllm_engine.generate(
|
93
|
+
prompts,
|
94
|
+
self._sampling_params,
|
95
|
+
use_tqdm=False
|
96
|
+
)
|
97
|
+
|
98
|
+
results = []
|
99
|
+
for output in outputs:
|
100
|
+
result = output.outputs[0].text
|
101
|
+
results.append(types.ScoredOutput(score=1.0, output=result))
|
102
|
+
|
103
|
+
return results
|
104
|
+
|
105
|
+
except Exception as e:
|
106
|
+
raise exceptions.InferenceRuntimeError(
|
107
|
+
f"vLLM engine error: {str(e)}", original=e
|
108
|
+
) from e
|
109
|
+
|
110
|
+
def infer(self, batch_prompts: List[str], **kwargs) -> Generator[List[types.ScoredOutput], None, None]:
|
111
|
+
"""Run inference using direct vLLM integration.
|
112
|
+
|
113
|
+
Args:
|
114
|
+
batch_prompts: List of prompts to process.
|
115
|
+
**kwargs: Additional sampling parameters (temperature, top_p, max_tokens).
|
116
|
+
|
117
|
+
Yields:
|
118
|
+
Lists of ScoredOutput objects, one per batch.
|
119
|
+
"""
|
120
|
+
# Update sampling parameters if provided
|
121
|
+
sampling_params = self._sampling_params
|
122
|
+
if kwargs:
|
123
|
+
sampling_kwargs = {
|
124
|
+
k: kwargs[k] for k in ['temperature', 'top_p', 'max_tokens']
|
125
|
+
if k in kwargs
|
126
|
+
}
|
127
|
+
if sampling_kwargs:
|
128
|
+
# Create new sampling params with updated values
|
129
|
+
current_params = {
|
130
|
+
'temperature': sampling_params.temperature,
|
131
|
+
'top_p': sampling_params.top_p,
|
132
|
+
'max_tokens': sampling_params.max_tokens,
|
133
|
+
}
|
134
|
+
current_params.update(sampling_kwargs)
|
135
|
+
|
136
|
+
from vllm import SamplingParams
|
137
|
+
sampling_params = SamplingParams(**current_params)
|
138
|
+
|
139
|
+
# Store original sampling params and update for this inference
|
140
|
+
original_params = self._sampling_params
|
141
|
+
self._sampling_params = sampling_params
|
142
|
+
|
143
|
+
try:
|
144
|
+
# Process in batches for better memory management
|
145
|
+
# Adjust batch size based on max_workers and available resources
|
146
|
+
batch_size = max(1, self.max_workers * 4)
|
147
|
+
|
148
|
+
for i in range(0, len(batch_prompts), batch_size):
|
149
|
+
batch_chunk = batch_prompts[i:i + batch_size]
|
150
|
+
chunk_results = self._process_batch(batch_chunk)
|
151
|
+
yield chunk_results
|
152
|
+
|
153
|
+
finally:
|
154
|
+
# Restore original sampling parameters
|
155
|
+
self._sampling_params = original_params
|
156
|
+
|
157
|
+
@classmethod
|
158
|
+
def get_supported_models(cls) -> List[Dict[str, Any]]:
|
159
|
+
"""Get list of supported models and their capabilities."""
|
160
|
+
return [
|
161
|
+
{
|
162
|
+
"id": "vllm:*",
|
163
|
+
"name": "vLLM Direct",
|
164
|
+
"description": "Direct vLLM library integration with PagedAttention support",
|
165
|
+
"capabilities": ["chat", "completion", "batch_processing"],
|
166
|
+
"max_tokens": 131072, # 128k context window (model dependent)
|
167
|
+
"supports_parallel": True,
|
168
|
+
"supports_batching": True,
|
169
|
+
}
|
170
|
+
]
|
171
|
+
|
172
|
+
def get_model_info(self) -> Dict[str, Any]:
|
173
|
+
"""Get information about the current model configuration."""
|
174
|
+
return {
|
175
|
+
"model_id": self.model_id,
|
176
|
+
"max_workers": self.max_workers,
|
177
|
+
"gpu_memory_utilization": self.gpu_memory_utilization,
|
178
|
+
"max_model_len": self.max_model_len,
|
179
|
+
"sampling_params": {
|
180
|
+
"temperature": self._sampling_params.temperature,
|
181
|
+
"top_p": self._sampling_params.top_p,
|
182
|
+
"max_tokens": self._sampling_params.max_tokens,
|
183
|
+
},
|
184
|
+
"provider_type": "vllm_direct",
|
185
|
+
}
|
@@ -0,0 +1,162 @@
|
|
1
|
+
[build-system]
|
2
|
+
requires = ["hatchling"]
|
3
|
+
build-backend = "hatchling.build"
|
4
|
+
|
5
|
+
[project]
|
6
|
+
name = "langextract-vllm"
|
7
|
+
version = "0.1.0"
|
8
|
+
description = "LangExtract provider plugin for VLLM - 高性能大语言模型推理插件"
|
9
|
+
readme = "README.md"
|
10
|
+
requires-python = ">=3.10"
|
11
|
+
license = {text = "MIT"}
|
12
|
+
authors = [
|
13
|
+
{name = "LangExtract Team", email = "contact@langextract.dev"},
|
14
|
+
]
|
15
|
+
maintainers = [
|
16
|
+
{name = "LangExtract Team", email = "contact@langextract.dev"},
|
17
|
+
]
|
18
|
+
keywords = ["langextract", "vllm", "llm", "nlp", "extraction", "inference", "gpu", "pytorch"]
|
19
|
+
classifiers = [
|
20
|
+
"Development Status :: 4 - Beta",
|
21
|
+
"Intended Audience :: Developers",
|
22
|
+
"Intended Audience :: Science/Research",
|
23
|
+
"License :: OSI Approved :: MIT License",
|
24
|
+
"Operating System :: OS Independent",
|
25
|
+
"Programming Language :: Python :: 3",
|
26
|
+
"Programming Language :: Python :: 3.10",
|
27
|
+
"Programming Language :: Python :: 3.11",
|
28
|
+
"Programming Language :: Python :: 3.12",
|
29
|
+
"Topic :: Scientific/Engineering :: Artificial Intelligence",
|
30
|
+
"Topic :: Software Development :: Libraries :: Python Modules",
|
31
|
+
"Topic :: Text Processing :: Linguistic",
|
32
|
+
]
|
33
|
+
dependencies = [
|
34
|
+
"langextract>=1.0.0",
|
35
|
+
"vllm>=0.2.0",
|
36
|
+
"torch>=2.0.0",
|
37
|
+
"transformers>=4.30.0",
|
38
|
+
]
|
39
|
+
[project.optional-dependencies]
|
40
|
+
dev = [
|
41
|
+
"pytest>=7.0.0",
|
42
|
+
"pytest-cov>=4.0.0",
|
43
|
+
"black>=23.0.0",
|
44
|
+
"isort>=5.12.0",
|
45
|
+
"flake8>=6.0.0",
|
46
|
+
"mypy>=1.0.0",
|
47
|
+
]
|
48
|
+
test = [
|
49
|
+
"pytest>=7.0.0",
|
50
|
+
"pytest-cov>=4.0.0",
|
51
|
+
"pytest-mock>=3.10.0",
|
52
|
+
]
|
53
|
+
docs = [
|
54
|
+
"sphinx>=5.0.0",
|
55
|
+
"sphinx-rtd-theme>=1.2.0",
|
56
|
+
"myst-parser>=1.0.0",
|
57
|
+
]
|
58
|
+
|
59
|
+
[project.urls]
|
60
|
+
Homepage = "https://github.com/langextract/langextract-vllm"
|
61
|
+
Repository = "https://github.com/langextract/langextract-vllm"
|
62
|
+
Documentation = "https://langextract-vllm.readthedocs.io"
|
63
|
+
"Bug Tracker" = "https://github.com/langextract/langextract-vllm/issues"
|
64
|
+
Changelog = "https://github.com/langextract/langextract-vllm/blob/main/CHANGELOG.md"
|
65
|
+
|
66
|
+
[project.scripts]
|
67
|
+
langextract-vllm-test = "langextract_vllm.test:main"
|
68
|
+
|
69
|
+
[project.entry-points."langextract.providers"]
|
70
|
+
vllm = "langextract_vllm.provider:VLLMLanguageModel"
|
71
|
+
|
72
|
+
[tool.hatch.build.targets.wheel]
|
73
|
+
packages = ["langextract_vllm"]
|
74
|
+
|
75
|
+
[tool.hatch.build.targets.sdist]
|
76
|
+
include = [
|
77
|
+
"/langextract_vllm",
|
78
|
+
"/README.md",
|
79
|
+
"/LICENSE",
|
80
|
+
"/pyproject.toml",
|
81
|
+
]
|
82
|
+
|
83
|
+
# uv 配置
|
84
|
+
[tool.uv]
|
85
|
+
dev-dependencies = [
|
86
|
+
"pytest>=7.0.0",
|
87
|
+
"pytest-cov>=4.0.0",
|
88
|
+
"black>=23.0.0",
|
89
|
+
"isort>=5.12.0",
|
90
|
+
"flake8>=6.0.0",
|
91
|
+
"mypy>=1.0.0",
|
92
|
+
"pre-commit>=3.0.0",
|
93
|
+
]
|
94
|
+
|
95
|
+
# 代码格式化配置
|
96
|
+
[tool.black]
|
97
|
+
line-length = 88
|
98
|
+
target-version = ['py310', 'py311', 'py312']
|
99
|
+
include = '\.pyi?$'
|
100
|
+
extend-exclude = '''
|
101
|
+
/(
|
102
|
+
# directories
|
103
|
+
\.eggs
|
104
|
+
| \.git
|
105
|
+
| \.hg
|
106
|
+
| \.mypy_cache
|
107
|
+
| \.tox
|
108
|
+
| \.venv
|
109
|
+
| build
|
110
|
+
| dist
|
111
|
+
)/
|
112
|
+
'''
|
113
|
+
|
114
|
+
[tool.isort]
|
115
|
+
profile = "black"
|
116
|
+
multi_line_output = 3
|
117
|
+
line_length = 88
|
118
|
+
known_first_party = ["langextract_vllm"]
|
119
|
+
|
120
|
+
# 类型检查配置
|
121
|
+
[tool.mypy]
|
122
|
+
python_version = "3.10"
|
123
|
+
warn_return_any = true
|
124
|
+
warn_unused_configs = true
|
125
|
+
disallow_untyped_defs = true
|
126
|
+
disallow_incomplete_defs = true
|
127
|
+
check_untyped_defs = true
|
128
|
+
disallow_untyped_decorators = true
|
129
|
+
no_implicit_optional = true
|
130
|
+
warn_redundant_casts = true
|
131
|
+
warn_unused_ignores = true
|
132
|
+
warn_no_return = true
|
133
|
+
warn_unreachable = true
|
134
|
+
strict_equality = true
|
135
|
+
|
136
|
+
[[tool.mypy.overrides]]
|
137
|
+
module = [
|
138
|
+
"vllm.*",
|
139
|
+
"torch.*",
|
140
|
+
"transformers.*",
|
141
|
+
]
|
142
|
+
ignore_missing_imports = true
|
143
|
+
|
144
|
+
# pytest 配置
|
145
|
+
[tool.pytest.ini_options]
|
146
|
+
testpaths = ["tests"]
|
147
|
+
python_files = ["test_*.py", "*_test.py"]
|
148
|
+
python_classes = ["Test*"]
|
149
|
+
python_functions = ["test_*"]
|
150
|
+
addopts = [
|
151
|
+
"--strict-markers",
|
152
|
+
"--strict-config",
|
153
|
+
"--cov=langextract_vllm",
|
154
|
+
"--cov-report=term-missing",
|
155
|
+
"--cov-report=html",
|
156
|
+
"--cov-report=xml",
|
157
|
+
]
|
158
|
+
markers = [
|
159
|
+
"slow: marks tests as slow (deselect with '-m \"not slow\"')",
|
160
|
+
"integration: marks tests as integration tests",
|
161
|
+
"unit: marks tests as unit tests",
|
162
|
+
]
|