llm_batch_helper 0.3.0__tar.gz → 0.3.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: llm_batch_helper
3
- Version: 0.3.0
3
+ Version: 0.3.1
4
4
  Summary: A Python package that enables batch submission of prompts to LLM APIs, with simplified interface and built-in async capabilities handled implicitly.
5
5
  License: MIT
6
6
  Keywords: llm,openai,together,openrouter,batch,async,ai,nlp,api
@@ -276,8 +276,8 @@ LLMConfig(
276
276
  max_completion_tokens: Optional[int] = None, # Preferred parameter
277
277
  max_tokens: Optional[int] = None, # Deprecated, kept for backward compatibility
278
278
  system_instruction: Optional[str] = None,
279
- max_retries: int = 10,
280
- max_concurrent_requests: int = 5,
279
+ max_retries: int = 5,
280
+ max_concurrent_requests: int = 30,
281
281
  verification_callback: Optional[Callable] = None,
282
282
  verification_callback_args: Optional[Dict] = None
283
283
  )
@@ -400,6 +400,11 @@ This project is licensed under the MIT License - see the [LICENSE](LICENSE) file
400
400
 
401
401
  ## Changelog
402
402
 
403
+ ### v0.3.1
404
+ - **🔧 Configuration Updates**: Optimized default values for better performance
405
+ - Updated `max_retries` from 10 to 5 for faster failure detection
406
+ - Updated `max_concurrent_requests` from 5 to 30 for improved batch processing performance
407
+
403
408
  ### v0.3.0
404
409
  - **🎉 Major Update**: Simplified API - async operations handled implicitly, no async/await required!
405
410
  - **📓 Jupyter Support**: Works seamlessly in notebooks without event loop issues
@@ -249,8 +249,8 @@ LLMConfig(
249
249
  max_completion_tokens: Optional[int] = None, # Preferred parameter
250
250
  max_tokens: Optional[int] = None, # Deprecated, kept for backward compatibility
251
251
  system_instruction: Optional[str] = None,
252
- max_retries: int = 10,
253
- max_concurrent_requests: int = 5,
252
+ max_retries: int = 5,
253
+ max_concurrent_requests: int = 30,
254
254
  verification_callback: Optional[Callable] = None,
255
255
  verification_callback_args: Optional[Dict] = None
256
256
  )
@@ -373,6 +373,11 @@ This project is licensed under the MIT License - see the [LICENSE](LICENSE) file
373
373
 
374
374
  ## Changelog
375
375
 
376
+ ### v0.3.1
377
+ - **🔧 Configuration Updates**: Optimized default values for better performance
378
+ - Updated `max_retries` from 10 to 5 for faster failure detection
379
+ - Updated `max_concurrent_requests` from 5 to 30 for improved batch processing performance
380
+
376
381
  ### v0.3.0
377
382
  - **🎉 Major Update**: Simplified API - async operations handled implicitly, no async/await required!
378
383
  - **📓 Jupyter Support**: Works seamlessly in notebooks without event loop issues
@@ -3,7 +3,7 @@ from .config import LLMConfig
3
3
  from .input_handlers import get_prompts, read_prompt_files, read_prompt_list
4
4
  from .providers import process_prompts_batch, process_prompts_batch_async
5
5
 
6
- __version__ = "0.3.0"
6
+ __version__ = "0.3.1"
7
7
 
8
8
  __all__ = [
9
9
  "LLMCache",
@@ -11,8 +11,8 @@ class LLMConfig:
11
11
  temperature: float = 1.0,
12
12
  max_tokens: Optional[int] = None,
13
13
  system_instruction: Optional[str] = None,
14
- max_retries: int = 10, # Max retries for the combined LLM call + Verification
15
- max_concurrent_requests: int = 5,
14
+ max_retries: int = 5, # Max retries for the combined LLM call + Verification
15
+ max_concurrent_requests: int = 30,
16
16
  verification_callback: Optional[Callable[..., bool]] = None,
17
17
  verification_callback_args: Optional[Dict] = None,
18
18
  max_completion_tokens: Optional[int] = None,
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "llm_batch_helper"
3
- version = "0.3.0"
3
+ version = "0.3.1"
4
4
  description = "A Python package that enables batch submission of prompts to LLM APIs, with simplified interface and built-in async capabilities handled implicitly."
5
5
  authors = ["Tianyi Peng <tianyipeng95@gmail.com>"]
6
6
  readme = "README.md"