llm_batch_helper 0.1.3__py3-none-any.whl → 0.1.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -3,7 +3,7 @@ from .config import LLMConfig
3
3
  from .input_handlers import get_prompts, read_prompt_files, read_prompt_list
4
4
  from .providers import process_prompts_batch
5
5
 
6
- __version__ = "0.1.1"
6
+ __version__ = "0.1.5"
7
7
 
8
8
  __all__ = [
9
9
  "LLMCache",
@@ -60,6 +60,65 @@ async def _get_openai_response_direct(
60
60
  "usage_details": usage_details,
61
61
  }
62
62
 
63
+
64
+ @retry(
65
+ stop=stop_after_attempt(5),
66
+ wait=wait_exponential(multiplier=1, min=4, max=60),
67
+ retry=retry_if_exception_type(
68
+ (
69
+ ConnectionError,
70
+ TimeoutError,
71
+ httpx.HTTPStatusError,
72
+ httpx.RequestError,
73
+ )
74
+ ),
75
+ reraise=True,
76
+ )
77
+ async def _get_together_response_direct(
78
+ prompt: str, config: LLMConfig
79
+ ) -> Dict[str, Union[str, Dict]]:
80
+ api_key = os.environ.get("TOGETHER_API_KEY")
81
+ if not api_key:
82
+ raise ValueError("TOGETHER_API_KEY environment variable not set")
83
+
84
+ async with httpx.AsyncClient(timeout=1000.0) as client:
85
+ messages = [
86
+ {"role": "system", "content": config.system_instruction},
87
+ {"role": "user", "content": prompt},
88
+ ]
89
+
90
+ headers = {
91
+ "Authorization": f"Bearer {api_key}",
92
+ "Content-Type": "application/json",
93
+ }
94
+
95
+ payload = {
96
+ "model": config.model_name,
97
+ "messages": messages,
98
+ "temperature": config.temperature,
99
+ "max_tokens": config.max_completion_tokens,
100
+ }
101
+
102
+ response = await client.post(
103
+ "https://api.together.xyz/chat/completions",
104
+ json=payload,
105
+ headers=headers,
106
+ )
107
+ response.raise_for_status()
108
+
109
+ response_data = response.json()
110
+ usage = response_data.get("usage", {})
111
+ usage_details = {
112
+ "prompt_token_count": usage.get("prompt_tokens", 0),
113
+ "completion_token_count": usage.get("completion_tokens", 0),
114
+ "total_token_count": usage.get("total_tokens", 0),
115
+ }
116
+
117
+ return {
118
+ "response_text": response_data["choices"][0]["message"]["content"],
119
+ "usage_details": usage_details,
120
+ }
121
+
63
122
  async def get_llm_response_with_internal_retry(
64
123
  prompt_id: str,
65
124
  prompt: str,
@@ -77,6 +136,8 @@ async def get_llm_response_with_internal_retry(
77
136
  try:
78
137
  if provider.lower() == "openai":
79
138
  response = await _get_openai_response_direct(prompt, config)
139
+ elif provider.lower() == "together":
140
+ response = await _get_together_response_direct(prompt, config)
80
141
  else:
81
142
  raise ValueError(f"Unsupported provider: {provider}")
82
143
 
@@ -107,7 +168,7 @@ async def process_prompts_batch(
107
168
  prompts: Optional list of prompts in any supported format (string, tuple, or dict)
108
169
  input_dir: Optional path to directory containing prompt files
109
170
  config: LLM configuration
110
- provider: LLM provider to use ("openai" or "gemini")
171
+ provider: LLM provider to use ("openai", "together", or "gemini")
111
172
  desc: Description for progress bar
112
173
  cache_dir: Optional directory for caching responses
113
174
  force: If True, force regeneration even if cached response exists
@@ -1,9 +1,9 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: llm_batch_helper
3
- Version: 0.1.3
3
+ Version: 0.1.5
4
4
  Summary: A Python package that enables batch submission of prompts to LLM APIs, with built-in async capabilities and response caching.
5
5
  License: MIT
6
- Keywords: llm,openai,batch,async,ai,nlp,api
6
+ Keywords: llm,openai,together,batch,async,ai,nlp,api
7
7
  Author: Tianyi Peng
8
8
  Author-email: tianyipeng95@gmail.com
9
9
  Requires-Python: >=3.11,<4.0
@@ -17,7 +17,7 @@ Classifier: Programming Language :: Python :: 3.13
17
17
  Classifier: Programming Language :: Python :: 3.10
18
18
  Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
19
19
  Classifier: Topic :: Software Development :: Libraries :: Python Modules
20
- Requires-Dist: httpx (>=0.24.0,<0.25.0)
20
+ Requires-Dist: httpx (>=0.24.0,<2.0.0)
21
21
  Requires-Dist: openai (>=1.0.0,<2.0.0)
22
22
  Requires-Dist: python-dotenv (>=1.0.0,<2.0.0)
23
23
  Requires-Dist: tenacity (>=8.0.0,<9.0.0)
@@ -35,7 +35,7 @@ A Python package that enables batch submission of prompts to LLM APIs, with buil
35
35
  - **Async Processing**: Submit multiple prompts concurrently for faster processing
36
36
  - **Response Caching**: Automatically cache responses to avoid redundant API calls
37
37
  - **Multiple Input Formats**: Support for both file-based and list-based prompts
38
- - **Provider Support**: Works with OpenAI API
38
+ - **Provider Support**: Works with OpenAI and Together.ai APIs
39
39
  - **Retry Logic**: Built-in retry mechanism with exponential backoff
40
40
  - **Verification Callbacks**: Custom verification for response quality
41
41
  - **Progress Tracking**: Real-time progress bars for batch operations
@@ -70,6 +70,9 @@ poetry shell
70
70
  ```bash
71
71
  # For OpenAI
72
72
  export OPENAI_API_KEY="your-openai-api-key"
73
+
74
+ # For Together.ai
75
+ export TOGETHER_API_KEY="your-together-api-key"
73
76
  ```
74
77
 
75
78
  ### 2. Interactive Tutorial (Recommended)
@@ -89,7 +92,7 @@ async def main():
89
92
  config = LLMConfig(
90
93
  model_name="gpt-4o-mini",
91
94
  temperature=0.7,
92
- max_tokens=100,
95
+ max_completion_tokens=100, # or use max_tokens for backward compatibility
93
96
  max_concurrent_requests=30 # number of concurrent requests with asyncIO
94
97
  )
95
98
 
@@ -127,7 +130,7 @@ async def process_files():
127
130
  config = LLMConfig(
128
131
  model_name="gpt-4o-mini",
129
132
  temperature=0.7,
130
- max_tokens=200
133
+ max_completion_tokens=200
131
134
  )
132
135
 
133
136
  # Process all .txt files in a directory
@@ -183,7 +186,8 @@ Configuration class for LLM requests.
183
186
  LLMConfig(
184
187
  model_name: str,
185
188
  temperature: float = 0.7,
186
- max_tokens: Optional[int] = None,
189
+ max_completion_tokens: Optional[int] = None, # Preferred parameter
190
+ max_tokens: Optional[int] = None, # Deprecated, kept for backward compatibility
187
191
  system_instruction: Optional[str] = None,
188
192
  max_retries: int = 10,
189
193
  max_concurrent_requests: int = 5,
@@ -199,7 +203,7 @@ Main function for batch processing of prompts.
199
203
  ```python
200
204
  async def process_prompts_batch(
201
205
  config: LLMConfig,
202
- provider: str, # "openai"
206
+ provider: str, # "openai" or "together"
203
207
  prompts: Optional[List[str]] = None,
204
208
  input_dir: Optional[str] = None,
205
209
  cache_dir: str = "llm_cache",
@@ -256,6 +260,23 @@ llm_batch_helper/
256
260
  - gpt-4
257
261
  - gpt-3.5-turbo
258
262
 
263
+ ### Together.ai
264
+ - meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo
265
+ - meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo
266
+ - mistralai/Mixtral-8x7B-Instruct-v0.1
267
+ - And many other open-source models
268
+
269
+ ## Documentation
270
+
271
+ 📖 **[Complete Documentation](https://llm-batch-helper.readthedocs.io/)** - Comprehensive docs on Read the Docs
272
+
273
+ ### Quick Links:
274
+ - [Quick Start Guide](https://llm-batch-helper.readthedocs.io/en/latest/quickstart.html) - Get started quickly
275
+ - [API Reference](https://llm-batch-helper.readthedocs.io/en/latest/api.html) - Complete API documentation
276
+ - [Examples](https://llm-batch-helper.readthedocs.io/en/latest/examples.html) - Practical usage examples
277
+ - [Tutorials](https://llm-batch-helper.readthedocs.io/en/latest/tutorials.html) - Step-by-step tutorials
278
+ - [Provider Guide](https://llm-batch-helper.readthedocs.io/en/latest/providers.html) - OpenAI & Together.ai setup
279
+
259
280
  ## Contributing
260
281
 
261
282
  1. Fork the repository
@@ -271,6 +292,12 @@ This project is licensed under the MIT License - see the [LICENSE](LICENSE) file
271
292
 
272
293
  ## Changelog
273
294
 
295
+ ### v0.1.5
296
+ - Added Together.ai provider support
297
+ - Support for open-source models (Llama, Mixtral, etc.)
298
+ - Enhanced documentation with Read the Docs
299
+ - Updated examples and tutorials
300
+
274
301
  ### v0.1.0
275
302
  - Initial release
276
303
  - Support for OpenAI API
@@ -0,0 +1,10 @@
1
+ llm_batch_helper/__init__.py,sha256=POB4Fodeltq96NbiaLh7YSEPwEu50Giz46V2qyVZZoY,348
2
+ llm_batch_helper/cache.py,sha256=QUODQ1tPCvFThO3yvVOTcorcOrmN2dP5HLF1Y2O1bTQ,1276
3
+ llm_batch_helper/config.py,sha256=WcZKTD-Mtocsx1plS9x3hh6MstVmyxD-tyidGUatkPY,1327
4
+ llm_batch_helper/exceptions.py,sha256=59_f3jINUhKFble6HTp8pmtLSFE2MYLHWGclwaQKs28,296
5
+ llm_batch_helper/input_handlers.py,sha256=IadA732F1Rw0zcBok5hjZr32RUm8eTUOpvLsRuMvaE4,2877
6
+ llm_batch_helper/providers.py,sha256=aV6IbGfRqFoxCQ90yd3UsCqmyeOBMRC9YW8VVq6ghq8,10258
7
+ llm_batch_helper-0.1.5.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
8
+ llm_batch_helper-0.1.5.dist-info/METADATA,sha256=58ray3o9P37IjYcgXfPa_SS5YnQhz3M212zrxa0e3L0,8882
9
+ llm_batch_helper-0.1.5.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
10
+ llm_batch_helper-0.1.5.dist-info/RECORD,,
@@ -1,10 +0,0 @@
1
- llm_batch_helper/__init__.py,sha256=WuZSgItWxi-uoND-X-tOmYaxJaxPXAUa9AXZM58bNuc,348
2
- llm_batch_helper/cache.py,sha256=QUODQ1tPCvFThO3yvVOTcorcOrmN2dP5HLF1Y2O1bTQ,1276
3
- llm_batch_helper/config.py,sha256=WcZKTD-Mtocsx1plS9x3hh6MstVmyxD-tyidGUatkPY,1327
4
- llm_batch_helper/exceptions.py,sha256=59_f3jINUhKFble6HTp8pmtLSFE2MYLHWGclwaQKs28,296
5
- llm_batch_helper/input_handlers.py,sha256=IadA732F1Rw0zcBok5hjZr32RUm8eTUOpvLsRuMvaE4,2877
6
- llm_batch_helper/providers.py,sha256=ccvDqn136fMWPWaqfSFUToeOJFFhSI86YatLoH1wrQs,8332
7
- llm_batch_helper-0.1.3.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
8
- llm_batch_helper-0.1.3.dist-info/METADATA,sha256=oJgWbwZQpPicAv63GX83kF1I6kNxXqPKQBIgCBqHgoc,7525
9
- llm_batch_helper-0.1.3.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
10
- llm_batch_helper-0.1.3.dist-info/RECORD,,