llm_batch_helper 0.3.0__tar.gz → 0.3.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {llm_batch_helper-0.3.0 → llm_batch_helper-0.3.1}/PKG-INFO +8 -3
- {llm_batch_helper-0.3.0 → llm_batch_helper-0.3.1}/README.md +7 -2
- {llm_batch_helper-0.3.0 → llm_batch_helper-0.3.1}/llm_batch_helper/__init__.py +1 -1
- {llm_batch_helper-0.3.0 → llm_batch_helper-0.3.1}/llm_batch_helper/config.py +2 -2
- {llm_batch_helper-0.3.0 → llm_batch_helper-0.3.1}/pyproject.toml +1 -1
- {llm_batch_helper-0.3.0 → llm_batch_helper-0.3.1}/LICENSE +0 -0
- {llm_batch_helper-0.3.0 → llm_batch_helper-0.3.1}/llm_batch_helper/cache.py +0 -0
- {llm_batch_helper-0.3.0 → llm_batch_helper-0.3.1}/llm_batch_helper/exceptions.py +0 -0
- {llm_batch_helper-0.3.0 → llm_batch_helper-0.3.1}/llm_batch_helper/input_handlers.py +0 -0
- {llm_batch_helper-0.3.0 → llm_batch_helper-0.3.1}/llm_batch_helper/providers.py +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.3
|
2
2
|
Name: llm_batch_helper
|
3
|
-
Version: 0.3.
|
3
|
+
Version: 0.3.1
|
4
4
|
Summary: A Python package that enables batch submission of prompts to LLM APIs, with simplified interface and built-in async capabilities handled implicitly.
|
5
5
|
License: MIT
|
6
6
|
Keywords: llm,openai,together,openrouter,batch,async,ai,nlp,api
|
@@ -276,8 +276,8 @@ LLMConfig(
|
|
276
276
|
max_completion_tokens: Optional[int] = None, # Preferred parameter
|
277
277
|
max_tokens: Optional[int] = None, # Deprecated, kept for backward compatibility
|
278
278
|
system_instruction: Optional[str] = None,
|
279
|
-
max_retries: int =
|
280
|
-
max_concurrent_requests: int =
|
279
|
+
max_retries: int = 5,
|
280
|
+
max_concurrent_requests: int = 30,
|
281
281
|
verification_callback: Optional[Callable] = None,
|
282
282
|
verification_callback_args: Optional[Dict] = None
|
283
283
|
)
|
@@ -400,6 +400,11 @@ This project is licensed under the MIT License - see the [LICENSE](LICENSE) file
|
|
400
400
|
|
401
401
|
## Changelog
|
402
402
|
|
403
|
+
### v0.3.1
|
404
|
+
- **🔧 Configuration Updates**: Optimized default values for better performance
|
405
|
+
- Updated `max_retries` from 10 to 5 for faster failure detection
|
406
|
+
- Updated `max_concurrent_requests` from 5 to 30 for improved batch processing performance
|
407
|
+
|
403
408
|
### v0.3.0
|
404
409
|
- **🎉 Major Update**: Simplified API - async operations handled implicitly, no async/await required!
|
405
410
|
- **📓 Jupyter Support**: Works seamlessly in notebooks without event loop issues
|
@@ -249,8 +249,8 @@ LLMConfig(
|
|
249
249
|
max_completion_tokens: Optional[int] = None, # Preferred parameter
|
250
250
|
max_tokens: Optional[int] = None, # Deprecated, kept for backward compatibility
|
251
251
|
system_instruction: Optional[str] = None,
|
252
|
-
max_retries: int =
|
253
|
-
max_concurrent_requests: int =
|
252
|
+
max_retries: int = 5,
|
253
|
+
max_concurrent_requests: int = 30,
|
254
254
|
verification_callback: Optional[Callable] = None,
|
255
255
|
verification_callback_args: Optional[Dict] = None
|
256
256
|
)
|
@@ -373,6 +373,11 @@ This project is licensed under the MIT License - see the [LICENSE](LICENSE) file
|
|
373
373
|
|
374
374
|
## Changelog
|
375
375
|
|
376
|
+
### v0.3.1
|
377
|
+
- **🔧 Configuration Updates**: Optimized default values for better performance
|
378
|
+
- Updated `max_retries` from 10 to 5 for faster failure detection
|
379
|
+
- Updated `max_concurrent_requests` from 5 to 30 for improved batch processing performance
|
380
|
+
|
376
381
|
### v0.3.0
|
377
382
|
- **🎉 Major Update**: Simplified API - async operations handled implicitly, no async/await required!
|
378
383
|
- **📓 Jupyter Support**: Works seamlessly in notebooks without event loop issues
|
@@ -11,8 +11,8 @@ class LLMConfig:
|
|
11
11
|
temperature: float = 1.0,
|
12
12
|
max_tokens: Optional[int] = None,
|
13
13
|
system_instruction: Optional[str] = None,
|
14
|
-
max_retries: int =
|
15
|
-
max_concurrent_requests: int =
|
14
|
+
max_retries: int = 5, # Max retries for the combined LLM call + Verification
|
15
|
+
max_concurrent_requests: int = 30,
|
16
16
|
verification_callback: Optional[Callable[..., bool]] = None,
|
17
17
|
verification_callback_args: Optional[Dict] = None,
|
18
18
|
max_completion_tokens: Optional[int] = None,
|
@@ -1,6 +1,6 @@
|
|
1
1
|
[tool.poetry]
|
2
2
|
name = "llm_batch_helper"
|
3
|
-
version = "0.3.
|
3
|
+
version = "0.3.1"
|
4
4
|
description = "A Python package that enables batch submission of prompts to LLM APIs, with simplified interface and built-in async capabilities handled implicitly."
|
5
5
|
authors = ["Tianyi Peng <tianyipeng95@gmail.com>"]
|
6
6
|
readme = "README.md"
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|