llm_batch_helper 0.1.6__py3-none-any.whl → 0.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- llm_batch_helper/__init__.py +1 -1
- llm_batch_helper/config.py +2 -0
- llm_batch_helper/providers.py +65 -1
- {llm_batch_helper-0.1.6.dist-info → llm_batch_helper-0.2.0.dist-info}/METADATA +3 -3
- llm_batch_helper-0.2.0.dist-info/RECORD +10 -0
- llm_batch_helper-0.1.6.dist-info/RECORD +0 -10
- {llm_batch_helper-0.1.6.dist-info → llm_batch_helper-0.2.0.dist-info}/LICENSE +0 -0
- {llm_batch_helper-0.1.6.dist-info → llm_batch_helper-0.2.0.dist-info}/WHEEL +0 -0
llm_batch_helper/__init__.py
CHANGED
llm_batch_helper/config.py
CHANGED
@@ -16,6 +16,7 @@ class LLMConfig:
|
|
16
16
|
verification_callback: Optional[Callable[..., bool]] = None,
|
17
17
|
verification_callback_args: Optional[Dict] = None,
|
18
18
|
max_completion_tokens: Optional[int] = None,
|
19
|
+
**kwargs
|
19
20
|
):
|
20
21
|
self.model_name = model_name
|
21
22
|
self.temperature = temperature
|
@@ -30,3 +31,4 @@ class LLMConfig:
|
|
30
31
|
self.verification_callback_args = (
|
31
32
|
verification_callback_args if verification_callback_args is not None else {}
|
32
33
|
)
|
34
|
+
self.kwargs = kwargs
|
llm_batch_helper/providers.py
CHANGED
@@ -46,6 +46,7 @@ async def _get_openai_response_direct(
|
|
46
46
|
messages=messages,
|
47
47
|
temperature=config.temperature,
|
48
48
|
max_completion_tokens=config.max_completion_tokens,
|
49
|
+
**config.kwargs,
|
49
50
|
)
|
50
51
|
usage_details = {
|
51
52
|
"prompt_token_count": response.usage.prompt_tokens,
|
@@ -94,6 +95,7 @@ async def _get_together_response_direct(
|
|
94
95
|
"messages": messages,
|
95
96
|
"temperature": config.temperature,
|
96
97
|
"max_tokens": config.max_completion_tokens,
|
98
|
+
**config.kwargs,
|
97
99
|
}
|
98
100
|
|
99
101
|
response = await client.post(
|
@@ -116,6 +118,66 @@ async def _get_together_response_direct(
|
|
116
118
|
"usage_details": usage_details,
|
117
119
|
}
|
118
120
|
|
121
|
+
|
122
|
+
@retry(
|
123
|
+
stop=stop_after_attempt(5),
|
124
|
+
wait=wait_exponential(multiplier=1, min=4, max=60),
|
125
|
+
retry=retry_if_exception_type(
|
126
|
+
(
|
127
|
+
ConnectionError,
|
128
|
+
TimeoutError,
|
129
|
+
httpx.HTTPStatusError,
|
130
|
+
httpx.RequestError,
|
131
|
+
)
|
132
|
+
),
|
133
|
+
reraise=True,
|
134
|
+
)
|
135
|
+
async def _get_openrouter_response_direct(
|
136
|
+
prompt: str, config: LLMConfig
|
137
|
+
) -> Dict[str, Union[str, Dict]]:
|
138
|
+
api_key = os.environ.get("OPENROUTER_API_KEY")
|
139
|
+
if not api_key:
|
140
|
+
raise ValueError("OPENROUTER_API_KEY environment variable not set")
|
141
|
+
|
142
|
+
async with httpx.AsyncClient(timeout=1000.0) as client:
|
143
|
+
messages = [
|
144
|
+
{"role": "system", "content": config.system_instruction},
|
145
|
+
{"role": "user", "content": prompt},
|
146
|
+
]
|
147
|
+
|
148
|
+
headers = {
|
149
|
+
"Authorization": f"Bearer {api_key}",
|
150
|
+
"Content-Type": "application/json",
|
151
|
+
}
|
152
|
+
|
153
|
+
payload = {
|
154
|
+
"model": config.model_name,
|
155
|
+
"messages": messages,
|
156
|
+
"temperature": config.temperature,
|
157
|
+
"max_tokens": config.max_completion_tokens,
|
158
|
+
**config.kwargs,
|
159
|
+
}
|
160
|
+
|
161
|
+
response = await client.post(
|
162
|
+
"https://openrouter.ai/api/v1/chat/completions",
|
163
|
+
json=payload,
|
164
|
+
headers=headers,
|
165
|
+
)
|
166
|
+
response.raise_for_status()
|
167
|
+
|
168
|
+
response_data = response.json()
|
169
|
+
usage = response_data.get("usage", {})
|
170
|
+
usage_details = {
|
171
|
+
"prompt_token_count": usage.get("prompt_tokens", 0),
|
172
|
+
"completion_token_count": usage.get("completion_tokens", 0),
|
173
|
+
"total_token_count": usage.get("total_tokens", 0),
|
174
|
+
}
|
175
|
+
|
176
|
+
return {
|
177
|
+
"response_text": response_data["choices"][0]["message"]["content"],
|
178
|
+
"usage_details": usage_details,
|
179
|
+
}
|
180
|
+
|
119
181
|
async def get_llm_response_with_internal_retry(
|
120
182
|
prompt_id: str,
|
121
183
|
prompt: str,
|
@@ -135,6 +197,8 @@ async def get_llm_response_with_internal_retry(
|
|
135
197
|
response = await _get_openai_response_direct(prompt, config)
|
136
198
|
elif provider.lower() == "together":
|
137
199
|
response = await _get_together_response_direct(prompt, config)
|
200
|
+
elif provider.lower() == "openrouter":
|
201
|
+
response = await _get_openrouter_response_direct(prompt, config)
|
138
202
|
else:
|
139
203
|
raise ValueError(f"Unsupported provider: {provider}")
|
140
204
|
|
@@ -165,7 +229,7 @@ async def process_prompts_batch(
|
|
165
229
|
prompts: Optional list of prompts in any supported format (string, tuple, or dict)
|
166
230
|
input_dir: Optional path to directory containing prompt files
|
167
231
|
config: LLM configuration
|
168
|
-
provider: LLM provider to use ("openai", "together", or "
|
232
|
+
provider: LLM provider to use ("openai", "together", or "openrouter")
|
169
233
|
desc: Description for progress bar
|
170
234
|
cache_dir: Optional directory for caching responses
|
171
235
|
force: If True, force regeneration even if cached response exists
|
@@ -1,9 +1,9 @@
|
|
1
1
|
Metadata-Version: 2.3
|
2
2
|
Name: llm_batch_helper
|
3
|
-
Version: 0.
|
3
|
+
Version: 0.2.0
|
4
4
|
Summary: A Python package that enables batch submission of prompts to LLM APIs, with built-in async capabilities and response caching.
|
5
5
|
License: MIT
|
6
|
-
Keywords: llm,openai,together,batch,async,ai,nlp,api
|
6
|
+
Keywords: llm,openai,together,openrouter,batch,async,ai,nlp,api
|
7
7
|
Author: Tianyi Peng
|
8
8
|
Author-email: tianyipeng95@gmail.com
|
9
9
|
Requires-Python: >=3.11,<4.0
|
@@ -245,7 +245,7 @@ Main function for batch processing of prompts.
|
|
245
245
|
```python
|
246
246
|
async def process_prompts_batch(
|
247
247
|
config: LLMConfig,
|
248
|
-
provider: str, # "openai" or "
|
248
|
+
provider: str, # "openai", "together", or "openrouter"
|
249
249
|
prompts: Optional[List[str]] = None,
|
250
250
|
input_dir: Optional[str] = None,
|
251
251
|
cache_dir: str = "llm_cache",
|
@@ -0,0 +1,10 @@
|
|
1
|
+
llm_batch_helper/__init__.py,sha256=Cy4p5YE6m3pbpux7GCjpf7GsyUbO_rUtqJsrhTWpPQY,348
|
2
|
+
llm_batch_helper/cache.py,sha256=QUODQ1tPCvFThO3yvVOTcorcOrmN2dP5HLF1Y2O1bTQ,1276
|
3
|
+
llm_batch_helper/config.py,sha256=RasljP9dzigZpKjm9yW6gU7_e3yjztfokjiDBf77iO4,1372
|
4
|
+
llm_batch_helper/exceptions.py,sha256=59_f3jINUhKFble6HTp8pmtLSFE2MYLHWGclwaQKs28,296
|
5
|
+
llm_batch_helper/input_handlers.py,sha256=IadA732F1Rw0zcBok5hjZr32RUm8eTUOpvLsRuMvaE4,2877
|
6
|
+
llm_batch_helper/providers.py,sha256=lNxAh6hfKjXhHEOMDmoca2dbtcE62UZV-HXlTKemIOE,12230
|
7
|
+
llm_batch_helper-0.2.0.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
|
8
|
+
llm_batch_helper-0.2.0.dist-info/METADATA,sha256=-x5B1uALZBkM2VLf8tB0GI7JcJkiXu7qgPhkPG427rI,11288
|
9
|
+
llm_batch_helper-0.2.0.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
|
10
|
+
llm_batch_helper-0.2.0.dist-info/RECORD,,
|
@@ -1,10 +0,0 @@
|
|
1
|
-
llm_batch_helper/__init__.py,sha256=a9jaWNSp0pqLz3gB_mo9r1aYo9f1i-qtuYhJj8AOPHk,348
|
2
|
-
llm_batch_helper/cache.py,sha256=QUODQ1tPCvFThO3yvVOTcorcOrmN2dP5HLF1Y2O1bTQ,1276
|
3
|
-
llm_batch_helper/config.py,sha256=WcZKTD-Mtocsx1plS9x3hh6MstVmyxD-tyidGUatkPY,1327
|
4
|
-
llm_batch_helper/exceptions.py,sha256=59_f3jINUhKFble6HTp8pmtLSFE2MYLHWGclwaQKs28,296
|
5
|
-
llm_batch_helper/input_handlers.py,sha256=IadA732F1Rw0zcBok5hjZr32RUm8eTUOpvLsRuMvaE4,2877
|
6
|
-
llm_batch_helper/providers.py,sha256=cgccd_4D7J48ClmkigZ7KXOzTnBmaya8soDYF5IlPJs,10212
|
7
|
-
llm_batch_helper-0.1.6.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
|
8
|
-
llm_batch_helper-0.1.6.dist-info/METADATA,sha256=Msg55neTu6jvxLKW6hicJOp-k7Q6Edp8qdIs_AKfVkM,11262
|
9
|
-
llm_batch_helper-0.1.6.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
|
10
|
-
llm_batch_helper-0.1.6.dist-info/RECORD,,
|
File without changes
|
File without changes
|