langextract-vllm 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,6 @@
1
+ """LangExtract provider plugin for VLLM."""
2
+
3
+ from langextract_vllm.provider import VLLMLanguageModel
4
+
5
+ __all__ = ['VLLMLanguageModel']
6
+ __version__ = "0.1.0"
@@ -0,0 +1,185 @@
1
+ """Provider implementation for vLLM (Direct Library Integration Only)."""
2
+
3
+ from typing import List, Generator, Dict, Any
4
+
5
+ import langextract as lx
6
+ from langextract.core import base_model, exceptions, types
7
+
8
+ VLLM_PATTERNS = (
9
+ r"^vllm:",
10
+ )
11
+
12
+
13
+ @lx.providers.registry.register(*VLLM_PATTERNS, priority=20)
14
+ class VLLMLanguageModel(base_model.BaseLanguageModel):
15
+ """LangExtract provider for direct vLLM library integration.
16
+
17
+ This provider handles model IDs matching: ['^vllm:']
18
+ Uses vLLM library directly for optimal performance.
19
+ """
20
+
21
+ def __init__(
22
+ self,
23
+ model_id: str,
24
+ max_workers: int = 1,
25
+ temperature: float = 0.7,
26
+ top_p: float = 0.9,
27
+ max_tokens: int = 1024,
28
+ gpu_memory_utilization: float = 0.7, # 进一步降低GPU内存利用率
29
+ max_model_len: int = 1024, # 进一步减少最大序列长度
30
+ **kwargs,
31
+ ):
32
+ """Initialize the direct vLLM provider.
33
+
34
+ Args:
35
+ model_id: Model identifier in format "vllm:model_name_or_path"
36
+ max_workers: Maximum parallel workers (affects batch size)
37
+ temperature: Sampling temperature
38
+ top_p: Top-p sampling parameter
39
+ max_tokens: Maximum tokens to generate
40
+ gpu_memory_utilization: GPU memory utilization ratio (0.0-1.0)
41
+ max_model_len: Maximum sequence length (reduces KV cache memory)
42
+ **kwargs: Additional vLLM initialization parameters
43
+ """
44
+ try:
45
+ from vllm import LLM, SamplingParams
46
+ except ImportError:
47
+ raise exceptions.InferenceConfigError(
48
+ "vLLM library not installed. Please install with: pip install vllm"
49
+ )
50
+
51
+ super().__init__()
52
+
53
+ self.model_id = model_id
54
+ self.max_workers = max_workers
55
+ self.gpu_memory_utilization = gpu_memory_utilization
56
+ self.max_model_len = max_model_len
57
+
58
+ # Extract actual model name from identifier
59
+ if model_id.startswith("vllm:"):
60
+ actual_model_id = model_id.split(":", 1)[1]
61
+ else:
62
+ actual_model_id = model_id
63
+
64
+ # Separate vLLM engine kwargs from sampling kwargs
65
+ engine_kwargs = {
66
+ k: v for k, v in kwargs.items()
67
+ if k not in ['temperature', 'top_p', 'max_tokens', 'gpu_memory_utilization', 'max_model_len']
68
+ }
69
+
70
+ # Initialize vLLM engine with memory control
71
+ # Disable torch.compile to avoid PY_SSIZE_T_CLEAN error
72
+ engine_kwargs.setdefault('enforce_eager', True)
73
+ engine_kwargs.setdefault('disable_custom_all_reduce', True)
74
+
75
+ self._vllm_engine = LLM(
76
+ model=actual_model_id,
77
+ gpu_memory_utilization=gpu_memory_utilization,
78
+ max_model_len=max_model_len,
79
+ **engine_kwargs
80
+ )
81
+
82
+ # Configure sampling parameters
83
+ self._sampling_params = SamplingParams(
84
+ temperature=temperature,
85
+ top_p=top_p,
86
+ max_tokens=max_tokens,
87
+ )
88
+
89
+ def _process_batch(self, prompts: List[str]) -> List[types.ScoredOutput]:
90
+ """Process batch using direct vLLM engine."""
91
+ try:
92
+ outputs = self._vllm_engine.generate(
93
+ prompts,
94
+ self._sampling_params,
95
+ use_tqdm=False
96
+ )
97
+
98
+ results = []
99
+ for output in outputs:
100
+ result = output.outputs[0].text
101
+ results.append(types.ScoredOutput(score=1.0, output=result))
102
+
103
+ return results
104
+
105
+ except Exception as e:
106
+ raise exceptions.InferenceRuntimeError(
107
+ f"vLLM engine error: {str(e)}", original=e
108
+ ) from e
109
+
110
+ def infer(self, batch_prompts: List[str], **kwargs) -> Generator[List[types.ScoredOutput], None, None]:
111
+ """Run inference using direct vLLM integration.
112
+
113
+ Args:
114
+ batch_prompts: List of prompts to process.
115
+ **kwargs: Additional sampling parameters (temperature, top_p, max_tokens).
116
+
117
+ Yields:
118
+ Lists of ScoredOutput objects, one per batch.
119
+ """
120
+ # Update sampling parameters if provided
121
+ sampling_params = self._sampling_params
122
+ if kwargs:
123
+ sampling_kwargs = {
124
+ k: kwargs[k] for k in ['temperature', 'top_p', 'max_tokens']
125
+ if k in kwargs
126
+ }
127
+ if sampling_kwargs:
128
+ # Create new sampling params with updated values
129
+ current_params = {
130
+ 'temperature': sampling_params.temperature,
131
+ 'top_p': sampling_params.top_p,
132
+ 'max_tokens': sampling_params.max_tokens,
133
+ }
134
+ current_params.update(sampling_kwargs)
135
+
136
+ from vllm import SamplingParams
137
+ sampling_params = SamplingParams(**current_params)
138
+
139
+ # Store original sampling params and update for this inference
140
+ original_params = self._sampling_params
141
+ self._sampling_params = sampling_params
142
+
143
+ try:
144
+ # Process in batches for better memory management
145
+ # Adjust batch size based on max_workers and available resources
146
+ batch_size = max(1, self.max_workers * 4)
147
+
148
+ for i in range(0, len(batch_prompts), batch_size):
149
+ batch_chunk = batch_prompts[i:i + batch_size]
150
+ chunk_results = self._process_batch(batch_chunk)
151
+ yield chunk_results
152
+
153
+ finally:
154
+ # Restore original sampling parameters
155
+ self._sampling_params = original_params
156
+
157
+ @classmethod
158
+ def get_supported_models(cls) -> List[Dict[str, Any]]:
159
+ """Get list of supported models and their capabilities."""
160
+ return [
161
+ {
162
+ "id": "vllm:*",
163
+ "name": "vLLM Direct",
164
+ "description": "Direct vLLM library integration with PagedAttention support",
165
+ "capabilities": ["chat", "completion", "batch_processing"],
166
+ "max_tokens": 131072, # 128k context window (model dependent)
167
+ "supports_parallel": True,
168
+ "supports_batching": True,
169
+ }
170
+ ]
171
+
172
+ def get_model_info(self) -> Dict[str, Any]:
173
+ """Get information about the current model configuration."""
174
+ return {
175
+ "model_id": self.model_id,
176
+ "max_workers": self.max_workers,
177
+ "gpu_memory_utilization": self.gpu_memory_utilization,
178
+ "max_model_len": self.max_model_len,
179
+ "sampling_params": {
180
+ "temperature": self._sampling_params.temperature,
181
+ "top_p": self._sampling_params.top_p,
182
+ "max_tokens": self._sampling_params.max_tokens,
183
+ },
184
+ "provider_type": "vllm_direct",
185
+ }
@@ -0,0 +1,46 @@
1
+ Metadata-Version: 2.4
2
+ Name: langextract-vllm
3
+ Version: 0.1.0
4
+ Summary: LangExtract provider plugin for VLLM - 高性能大语言模型推理插件
5
+ Project-URL: Homepage, https://github.com/langextract/langextract-vllm
6
+ Project-URL: Repository, https://github.com/langextract/langextract-vllm
7
+ Project-URL: Documentation, https://langextract-vllm.readthedocs.io
8
+ Project-URL: Bug Tracker, https://github.com/langextract/langextract-vllm/issues
9
+ Project-URL: Changelog, https://github.com/langextract/langextract-vllm/blob/main/CHANGELOG.md
10
+ Author-email: LangExtract Team <contact@langextract.dev>
11
+ Maintainer-email: LangExtract Team <contact@langextract.dev>
12
+ License: MIT
13
+ License-File: LICENSE
14
+ Keywords: extraction,gpu,inference,langextract,llm,nlp,pytorch,vllm
15
+ Classifier: Development Status :: 4 - Beta
16
+ Classifier: Intended Audience :: Developers
17
+ Classifier: Intended Audience :: Science/Research
18
+ Classifier: License :: OSI Approved :: MIT License
19
+ Classifier: Operating System :: OS Independent
20
+ Classifier: Programming Language :: Python :: 3
21
+ Classifier: Programming Language :: Python :: 3.10
22
+ Classifier: Programming Language :: Python :: 3.11
23
+ Classifier: Programming Language :: Python :: 3.12
24
+ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
25
+ Classifier: Topic :: Software Development :: Libraries :: Python Modules
26
+ Classifier: Topic :: Text Processing :: Linguistic
27
+ Requires-Python: >=3.10
28
+ Requires-Dist: langextract>=1.0.0
29
+ Requires-Dist: torch>=2.0.0
30
+ Requires-Dist: transformers>=4.30.0
31
+ Requires-Dist: vllm>=0.2.0
32
+ Provides-Extra: dev
33
+ Requires-Dist: black>=23.0.0; extra == 'dev'
34
+ Requires-Dist: flake8>=6.0.0; extra == 'dev'
35
+ Requires-Dist: isort>=5.12.0; extra == 'dev'
36
+ Requires-Dist: mypy>=1.0.0; extra == 'dev'
37
+ Requires-Dist: pytest-cov>=4.0.0; extra == 'dev'
38
+ Requires-Dist: pytest>=7.0.0; extra == 'dev'
39
+ Provides-Extra: docs
40
+ Requires-Dist: myst-parser>=1.0.0; extra == 'docs'
41
+ Requires-Dist: sphinx-rtd-theme>=1.2.0; extra == 'docs'
42
+ Requires-Dist: sphinx>=5.0.0; extra == 'docs'
43
+ Provides-Extra: test
44
+ Requires-Dist: pytest-cov>=4.0.0; extra == 'test'
45
+ Requires-Dist: pytest-mock>=3.10.0; extra == 'test'
46
+ Requires-Dist: pytest>=7.0.0; extra == 'test'
@@ -0,0 +1,7 @@
1
+ langextract_vllm/__init__.py,sha256=z-S-_mwU-g-J_psIkmrCZLmwG76l6KhLNpS071U8KrM,156
2
+ langextract_vllm/provider.py,sha256=Hqs2vzZYadwmIG-VOnAvodGIthjtOzUV9TpgzULvnPs,6857
3
+ langextract_vllm-0.1.0.dist-info/METADATA,sha256=Vuf-Rwgn73y4XK27n_As2NBP9zwfnexnyznMeTmdzSQ,2169
4
+ langextract_vllm-0.1.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
5
+ langextract_vllm-0.1.0.dist-info/entry_points.txt,sha256=3M2Y4ac8Sd2wJaga8rXJT5Lnk8kUmwGmhnsiSQvhc7o,145
6
+ langextract_vllm-0.1.0.dist-info/licenses/LICENSE,sha256=20V09KLo5sLIeUpiJCTpgvsKqX0uPQrwhaGK1GYb_LM,1069
7
+ langextract_vllm-0.1.0.dist-info/RECORD,,
@@ -0,0 +1,4 @@
1
+ Wheel-Version: 1.0
2
+ Generator: hatchling 1.27.0
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
@@ -0,0 +1,5 @@
1
+ [console_scripts]
2
+ langextract-vllm-test = langextract_vllm.test:main
3
+
4
+ [langextract.providers]
5
+ vllm = langextract_vllm.provider:VLLMLanguageModel
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2025 Fajar Garnadi
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.