llm_batch_helper 0.2.0__py3-none-any.whl → 0.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,15 +1,16 @@
1
1
  from .cache import LLMCache
2
2
  from .config import LLMConfig
3
3
  from .input_handlers import get_prompts, read_prompt_files, read_prompt_list
4
- from .providers import process_prompts_batch
4
+ from .providers import process_prompts_batch, process_prompts_batch_async
5
5
 
6
- __version__ = "0.2.0"
6
+ __version__ = "0.3.0"
7
7
 
8
8
  __all__ = [
9
9
  "LLMCache",
10
10
  "LLMConfig",
11
11
  "get_prompts",
12
12
  "process_prompts_batch",
13
+ "process_prompts_batch_async", # For backward compatibility
13
14
  "read_prompt_files",
14
15
  "read_prompt_list",
15
16
  ]
@@ -8,7 +8,7 @@ class LLMConfig:
8
8
  def __init__(
9
9
  self,
10
10
  model_name: str,
11
- temperature: float = 0.7,
11
+ temperature: float = 1.0,
12
12
  max_tokens: Optional[int] = None,
13
13
  system_instruction: Optional[str] = None,
14
14
  max_retries: int = 10, # Max retries for the combined LLM call + Verification
@@ -1,10 +1,12 @@
1
1
  import asyncio
2
2
  import os
3
3
  from typing import Any, Dict, List, Optional, Tuple, Union
4
+ from datetime import datetime
5
+ import warnings
4
6
 
5
7
  import httpx
6
8
  import openai
7
- from tenacity import retry, retry_if_exception_type, stop_after_attempt, wait_exponential
9
+ from tenacity import retry, retry_if_exception_type, stop_after_attempt, wait_exponential, before_sleep_log
8
10
  from tqdm.asyncio import tqdm_asyncio
9
11
 
10
12
  from .cache import LLMCache
@@ -12,6 +14,55 @@ from .config import LLMConfig
12
14
  from .input_handlers import get_prompts
13
15
 
14
16
 
17
+ def _run_async_function(async_func, *args, **kwargs):
18
+ """
19
+ Run an async function in a way that works in both regular Python and Jupyter notebooks.
20
+
21
+ This handles the event loop management properly for different environments.
22
+ """
23
+ try:
24
+ # Try to get the current event loop
25
+ loop = asyncio.get_running_loop()
26
+ # If we're in a running loop (like Jupyter), we need to use nest_asyncio
27
+ try:
28
+ import nest_asyncio
29
+ nest_asyncio.apply()
30
+ return asyncio.run(async_func(*args, **kwargs))
31
+ except ImportError:
32
+ # If nest_asyncio is not available, try to run in the current loop
33
+ # This is a fallback that might work in some cases
34
+ import concurrent.futures
35
+ with concurrent.futures.ThreadPoolExecutor() as executor:
36
+ future = executor.submit(asyncio.run, async_func(*args, **kwargs))
37
+ return future.result()
38
+ except RuntimeError:
39
+ # No event loop running, we can use asyncio.run directly
40
+ return asyncio.run(async_func(*args, **kwargs))
41
+
42
+
43
+ def log_retry_attempt(retry_state):
44
+ """Custom logging function for retry attempts."""
45
+ attempt_number = retry_state.attempt_number
46
+ exception = retry_state.outcome.exception()
47
+ wait_time = retry_state.next_action.sleep if retry_state.next_action else 0
48
+
49
+ error_type = type(exception).__name__
50
+ error_msg = str(exception)
51
+
52
+ # Extract status code if available
53
+ status_code = "unknown"
54
+ if hasattr(exception, 'status_code'):
55
+ status_code = exception.status_code
56
+ elif hasattr(exception, 'response') and hasattr(exception.response, 'status_code'):
57
+ status_code = exception.response.status_code
58
+
59
+ print(f"🔄 [{datetime.now().strftime('%H:%M:%S')}] Retry attempt {attempt_number}/5:")
60
+ print(f" Error: {error_type} (status: {status_code})")
61
+ print(f" Message: {error_msg[:100]}{'...' if len(error_msg) > 100 else ''}")
62
+ print(f" Waiting {wait_time:.1f}s before next attempt...")
63
+ print()
64
+
65
+
15
66
  @retry(
16
67
  stop=stop_after_attempt(5),
17
68
  wait=wait_exponential(multiplier=1, min=4, max=60),
@@ -25,6 +76,7 @@ from .input_handlers import get_prompts
25
76
  openai.APIError,
26
77
  )
27
78
  ),
79
+ before_sleep=log_retry_attempt,
28
80
  reraise=True,
29
81
  )
30
82
  async def _get_openai_response_direct(
@@ -130,6 +182,7 @@ async def _get_together_response_direct(
130
182
  httpx.RequestError,
131
183
  )
132
184
  ),
185
+ before_sleep=log_retry_attempt,
133
186
  reraise=True,
134
187
  )
135
188
  async def _get_openrouter_response_direct(
@@ -214,7 +267,7 @@ async def get_llm_response_with_internal_retry(
214
267
  }
215
268
 
216
269
 
217
- async def process_prompts_batch(
270
+ async def process_prompts_batch_async(
218
271
  prompts: Optional[List[Union[str, Tuple[str, str], Dict[str, Any]]]] = None,
219
272
  input_dir: Optional[str] = None,
220
273
  config: LLMConfig = None,
@@ -270,6 +323,57 @@ async def process_prompts_batch(
270
323
  return results
271
324
 
272
325
 
326
+ def process_prompts_batch(
327
+ prompts: Optional[List[Union[str, Tuple[str, str], Dict[str, Any]]]] = None,
328
+ input_dir: Optional[str] = None,
329
+ config: LLMConfig = None,
330
+ provider: str = "openai",
331
+ desc: str = "Processing prompts",
332
+ cache_dir: Optional[str] = None,
333
+ force: bool = False,
334
+ ) -> Dict[str, Dict[str, Union[str, Dict]]]:
335
+ """
336
+ Process a batch of prompts through the LLM (synchronous version).
337
+
338
+ This is the main user-facing function that works in both regular Python scripts
339
+ and Jupyter notebooks without requiring async/await syntax.
340
+
341
+ Args:
342
+ prompts: Optional list of prompts in any supported format (string, tuple, or dict)
343
+ input_dir: Optional path to directory containing prompt files
344
+ config: LLM configuration
345
+ provider: LLM provider to use ("openai", "together", or "openrouter")
346
+ desc: Description for progress bar
347
+ cache_dir: Optional directory for caching responses
348
+ force: If True, force regeneration even if cached response exists
349
+
350
+ Returns:
351
+ Dict mapping prompt IDs to their responses
352
+
353
+ Note:
354
+ Either prompts or input_dir must be provided, but not both.
355
+
356
+ Example:
357
+ >>> from llm_batch_helper import LLMConfig, process_prompts_batch
358
+ >>> config = LLMConfig(model_name="gpt-4o-mini")
359
+ >>> results = process_prompts_batch(
360
+ ... prompts=["What is 2+2?", "What is the capital of France?"],
361
+ ... config=config,
362
+ ... provider="openai"
363
+ ... )
364
+ """
365
+ return _run_async_function(
366
+ process_prompts_batch_async,
367
+ prompts=prompts,
368
+ input_dir=input_dir,
369
+ config=config,
370
+ provider=provider,
371
+ desc=desc,
372
+ cache_dir=cache_dir,
373
+ force=force,
374
+ )
375
+
376
+
273
377
  async def _process_single_prompt_attempt_with_verification(
274
378
  prompt_id: str,
275
379
  prompt_text: str,
@@ -302,6 +406,9 @@ async def _process_single_prompt_attempt_with_verification(
302
406
  # Process the prompt
303
407
  last_exception_details = None
304
408
  for attempt in range(config.max_retries):
409
+ if attempt > 0:
410
+ print(f"🔁 [{datetime.now().strftime('%H:%M:%S')}] Application-level retry {attempt+1}/{config.max_retries} for prompt: {prompt_id}")
411
+
305
412
  try:
306
413
  # Get LLM response
307
414
  llm_response_data = await get_llm_response_with_internal_retry(
@@ -309,7 +416,12 @@ async def _process_single_prompt_attempt_with_verification(
309
416
  )
310
417
 
311
418
  if "error" in llm_response_data:
419
+ print(f"❌ [{datetime.now().strftime('%H:%M:%S')}] API call failed on attempt {attempt+1}: {llm_response_data.get('error', 'Unknown error')}")
312
420
  last_exception_details = llm_response_data
421
+ if attempt < config.max_retries - 1:
422
+ wait_time = min(2 * 2**attempt, 30)
423
+ print(f" Waiting {wait_time}s before next application retry...")
424
+ await asyncio.sleep(wait_time)
313
425
  continue
314
426
 
315
427
  # Verify response if callback provided
@@ -329,7 +441,6 @@ async def _process_single_prompt_attempt_with_verification(
329
441
  }
330
442
  if attempt == config.max_retries - 1:
331
443
  return prompt_id, last_exception_details
332
- await asyncio.sleep(min(2 * 2**attempt, 30))
333
444
  continue
334
445
 
335
446
  # Save to cache if cache_dir provided
@@ -346,7 +457,7 @@ async def _process_single_prompt_attempt_with_verification(
346
457
  }
347
458
  if attempt == config.max_retries - 1:
348
459
  return prompt_id, last_exception_details
349
- await asyncio.sleep(min(2 * 2**attempt, 30))
460
+ # Sleep is now handled above with logging
350
461
  continue
351
462
 
352
463
  return prompt_id, last_exception_details or {
@@ -1,7 +1,7 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: llm_batch_helper
3
- Version: 0.2.0
4
- Summary: A Python package that enables batch submission of prompts to LLM APIs, with built-in async capabilities and response caching.
3
+ Version: 0.3.0
4
+ Summary: A Python package that enables batch submission of prompts to LLM APIs, with simplified interface and built-in async capabilities handled implicitly.
5
5
  License: MIT
6
6
  Keywords: llm,openai,together,openrouter,batch,async,ai,nlp,api
7
7
  Author: Tianyi Peng
@@ -56,10 +56,12 @@ This package is designed to solve these exact pain points with async processing,
56
56
  - **Async Processing**: Submit multiple prompts concurrently for faster processing
57
57
  - **Response Caching**: Automatically cache responses to avoid redundant API calls
58
58
  - **Multiple Input Formats**: Support for both file-based and list-based prompts
59
- - **Provider Support**: Works with OpenAI and Together.ai APIs
60
- - **Retry Logic**: Built-in retry mechanism with exponential backoff
61
- - **Verification Callbacks**: Custom verification for response quality
59
+ - **Provider Support**: Works with OpenAI (all models including GPT-5), OpenRouter (100+ models), and Together.ai APIs
60
+ - **Retry Logic**: Built-in retry mechanism with exponential backoff and detailed logging
61
+ - **Verification Callbacks**: Custom verification for response quality
62
62
  - **Progress Tracking**: Real-time progress bars for batch operations
63
+ - **Simplified API**: Async operations handled implicitly - no async/await needed (v0.3.0+)
64
+ - **Detailed Error Logging**: See exactly what happens during retries with timestamps and error details
63
65
 
64
66
  ## Installation
65
67
 
@@ -90,9 +92,12 @@ poetry shell
90
92
 
91
93
  **Option A: Environment Variables**
92
94
  ```bash
93
- # For OpenAI
95
+ # For OpenAI (all models including GPT-5)
94
96
  export OPENAI_API_KEY="your-openai-api-key"
95
97
 
98
+ # For OpenRouter (100+ models - Recommended)
99
+ export OPENROUTER_API_KEY="your-openrouter-api-key"
100
+
96
101
  # For Together.ai
97
102
  export TOGETHER_API_KEY="your-together-api-key"
98
103
  ```
@@ -122,71 +127,111 @@ The tutorial covers all features with interactive examples!
122
127
  ### 3. Basic usage
123
128
 
124
129
  ```python
125
- import asyncio
126
130
  from dotenv import load_dotenv # Optional: for .env file support
127
131
  from llm_batch_helper import LLMConfig, process_prompts_batch
128
132
 
129
133
  # Optional: Load environment variables from .env file
130
134
  load_dotenv()
131
135
 
136
+ # Create configuration
137
+ config = LLMConfig(
138
+ model_name="gpt-4o-mini",
139
+ temperature=1.0,
140
+ max_completion_tokens=100,
141
+ max_concurrent_requests=30 # number of concurrent requests with asyncIO
142
+ )
143
+
144
+ # Process prompts - no async/await needed!
145
+ prompts = [
146
+ "What is the capital of France?",
147
+ "What is 2+2?",
148
+ "Who wrote 'Hamlet'?"
149
+ ]
150
+
151
+ results = process_prompts_batch(
152
+ config=config,
153
+ provider="openai",
154
+ prompts=prompts,
155
+ cache_dir="cache"
156
+ )
157
+
158
+ # Print results
159
+ for prompt_id, response in results.items():
160
+ print(f"{prompt_id}: {response['response_text']}")
161
+ ```
162
+
163
+ **🎉 New in v0.3.0**: `process_prompts_batch` now handles async operations **implicitly** - no more async/await syntax needed! Works seamlessly in Jupyter notebooks.
164
+
165
+ ### 🔄 Backward Compatibility
166
+
167
+ For users who prefer the async version or have existing code, the async API is still available:
168
+
169
+ ```python
170
+ import asyncio
171
+ from llm_batch_helper import process_prompts_batch_async
172
+
132
173
  async def main():
133
- # Create configuration
134
- config = LLMConfig(
135
- model_name="gpt-4o-mini",
136
- temperature=0.7,
137
- max_completion_tokens=100, # or use max_tokens for backward compatibility
138
- max_concurrent_requests=30 # number of concurrent requests with asyncIO
139
- )
140
-
141
- # Process prompts
142
- prompts = [
143
- "What is the capital of France?",
144
- "What is 2+2?",
145
- "Who wrote 'Hamlet'?"
146
- ]
147
-
148
- results = await process_prompts_batch(
174
+ results = await process_prompts_batch_async(
175
+ prompts=["Hello world!"],
149
176
  config=config,
150
- provider="openai",
151
- prompts=prompts,
152
- cache_dir="cache"
177
+ provider="openai"
153
178
  )
154
-
155
- # Print results
156
- for prompt_id, response in results.items():
157
- print(f"{prompt_id}: {response['response_text']}")
179
+ return results
158
180
 
159
- if __name__ == "__main__":
160
- asyncio.run(main())
181
+ results = asyncio.run(main())
161
182
  ```
162
183
 
163
184
  ## Usage Examples
164
185
 
186
+ ### OpenRouter (Recommended - 100+ Models)
187
+
188
+ ```python
189
+ from llm_batch_helper import LLMConfig, process_prompts_batch
190
+
191
+ # Access 100+ models through OpenRouter
192
+ config = LLMConfig(
193
+ model_name="deepseek/deepseek-v3.1-base", # or openai/gpt-4o, anthropic/claude-3-5-sonnet
194
+ temperature=1.0,
195
+ max_completion_tokens=500
196
+ )
197
+
198
+ prompts = [
199
+ "Explain quantum computing briefly.",
200
+ "What are the benefits of renewable energy?",
201
+ "How does machine learning work?"
202
+ ]
203
+
204
+ results = process_prompts_batch(
205
+ prompts=prompts,
206
+ config=config,
207
+ provider="openrouter" # Access to 100+ models!
208
+ )
209
+
210
+ for prompt_id, result in results.items():
211
+ print(f"Response: {result['response_text']}")
212
+ ```
213
+
165
214
  ### File-based Prompts
166
215
 
167
216
  ```python
168
- import asyncio
169
217
  from llm_batch_helper import LLMConfig, process_prompts_batch
170
218
 
171
- async def process_files():
172
- config = LLMConfig(
173
- model_name="gpt-4o-mini",
174
- temperature=0.7,
175
- max_completion_tokens=200
176
- )
177
-
178
- # Process all .txt files in a directory
179
- results = await process_prompts_batch(
180
- config=config,
181
- provider="openai",
182
- input_dir="prompts", # Directory containing .txt files
183
- cache_dir="cache",
184
- force=False # Use cached responses if available
185
- )
186
-
187
- return results
219
+ config = LLMConfig(
220
+ model_name="gpt-4o-mini",
221
+ temperature=1.0,
222
+ max_completion_tokens=200
223
+ )
224
+
225
+ # Process all .txt files in a directory
226
+ results = process_prompts_batch(
227
+ config=config,
228
+ provider="openai",
229
+ input_dir="prompts", # Directory containing .txt files
230
+ cache_dir="cache",
231
+ force=False # Use cached responses if available
232
+ )
188
233
 
189
- asyncio.run(process_files())
234
+ print(f"Processed {len(results)} prompts from files")
190
235
  ```
191
236
 
192
237
  ### Custom Verification
@@ -210,7 +255,7 @@ def verify_response(prompt_id, llm_response_data, original_prompt_text, **kwargs
210
255
 
211
256
  config = LLMConfig(
212
257
  model_name="gpt-4o-mini",
213
- temperature=0.7,
258
+ temperature=1.0,
214
259
  verification_callback=verify_response,
215
260
  verification_callback_args={"min_length": 20}
216
261
  )
@@ -227,7 +272,7 @@ Configuration class for LLM requests.
227
272
  ```python
228
273
  LLMConfig(
229
274
  model_name: str,
230
- temperature: float = 0.7,
275
+ temperature: float = 1.0,
231
276
  max_completion_tokens: Optional[int] = None, # Preferred parameter
232
277
  max_tokens: Optional[int] = None, # Deprecated, kept for backward compatibility
233
278
  system_instruction: Optional[str] = None,
@@ -240,12 +285,28 @@ LLMConfig(
240
285
 
241
286
  ### process_prompts_batch
242
287
 
243
- Main function for batch processing of prompts.
288
+ Main function for batch processing of prompts (async operations handled implicitly).
244
289
 
245
290
  ```python
246
- async def process_prompts_batch(
291
+ def process_prompts_batch(
247
292
  config: LLMConfig,
248
- provider: str, # "openai", "together", or "openrouter"
293
+ provider: str, # "openai", "openrouter" (recommended), or "together"
294
+ prompts: Optional[List[str]] = None,
295
+ input_dir: Optional[str] = None,
296
+ cache_dir: str = "llm_cache",
297
+ force: bool = False,
298
+ desc: str = "Processing prompts"
299
+ ) -> Dict[str, Dict[str, Any]]
300
+ ```
301
+
302
+ ### process_prompts_batch_async
303
+
304
+ Async version for backward compatibility and advanced use cases.
305
+
306
+ ```python
307
+ async def process_prompts_batch_async(
308
+ config: LLMConfig,
309
+ provider: str, # "openai", "openrouter" (recommended), or "together"
249
310
  prompts: Optional[List[str]] = None,
250
311
  input_dir: Optional[str] = None,
251
312
  cache_dir: str = "llm_cache",
@@ -297,10 +358,15 @@ llm_batch_helper/
297
358
  ## Supported Models
298
359
 
299
360
  ### OpenAI
300
- - gpt-4o-mini
301
- - gpt-4o
302
- - gpt-4
303
- - gpt-3.5-turbo
361
+ - **All OpenAI models**
362
+
363
+ ### OpenRouter (Recommended - 100+ Models)
364
+ - **OpenAI models**: `openai/gpt-4o`, `openai/gpt-4o-mini`
365
+ - **Anthropic models**: `anthropic/claude-3-5-sonnet`, `anthropic/claude-3-haiku`
366
+ - **DeepSeek models**: `deepseek/deepseek-v3.1-base`, `deepseek/deepseek-chat`
367
+ - **Meta models**: `meta-llama/llama-3.1-405b-instruct`
368
+ - **Google models**: `google/gemini-pro-1.5`
369
+ - **And 90+ more models** from all major providers
304
370
 
305
371
  ### Together.ai
306
372
  - meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo
@@ -317,7 +383,7 @@ llm_batch_helper/
317
383
  - [API Reference](https://llm-batch-helper.readthedocs.io/en/latest/api.html) - Complete API documentation
318
384
  - [Examples](https://llm-batch-helper.readthedocs.io/en/latest/examples.html) - Practical usage examples
319
385
  - [Tutorials](https://llm-batch-helper.readthedocs.io/en/latest/tutorials.html) - Step-by-step tutorials
320
- - [Provider Guide](https://llm-batch-helper.readthedocs.io/en/latest/providers.html) - OpenAI & Together.ai setup
386
+ - [Provider Guide](https://llm-batch-helper.readthedocs.io/en/latest/providers.html) - OpenAI, OpenRouter & Together.ai setup
321
387
 
322
388
  ## Contributing
323
389
 
@@ -334,6 +400,19 @@ This project is licensed under the MIT License - see the [LICENSE](LICENSE) file
334
400
 
335
401
  ## Changelog
336
402
 
403
+ ### v0.3.0
404
+ - **🎉 Major Update**: Simplified API - async operations handled implicitly, no async/await required!
405
+ - **📓 Jupyter Support**: Works seamlessly in notebooks without event loop issues
406
+ - **🔍 Detailed Retry Logging**: See exactly what happens during retries with timestamps
407
+ - **🔄 Backward Compatibility**: Original async API still available as `process_prompts_batch_async`
408
+ - **📚 Updated Examples**: All documentation updated to show simplified usage
409
+ - **⚡ Smart Event Loop Handling**: Automatically detects and handles different Python environments
410
+
411
+ ### v0.2.0
412
+ - Enhanced API stability
413
+ - Improved error handling
414
+ - Better documentation
415
+
337
416
  ### v0.1.5
338
417
  - Added Together.ai provider support
339
418
  - Support for open-source models (Llama, Mixtral, etc.)
@@ -0,0 +1,10 @@
1
+ llm_batch_helper/__init__.py,sha256=IFBwr-pFiSeJdC2LzRFbVj0sF07r7i9Wy1Dcs5bjmVQ,442
2
+ llm_batch_helper/cache.py,sha256=QUODQ1tPCvFThO3yvVOTcorcOrmN2dP5HLF1Y2O1bTQ,1276
3
+ llm_batch_helper/config.py,sha256=wOI-JmxzJIM-w1-rrLqyqfPa_RyuI6hQlBWSCb44Z4U,1372
4
+ llm_batch_helper/exceptions.py,sha256=59_f3jINUhKFble6HTp8pmtLSFE2MYLHWGclwaQKs28,296
5
+ llm_batch_helper/input_handlers.py,sha256=IadA732F1Rw0zcBok5hjZr32RUm8eTUOpvLsRuMvaE4,2877
6
+ llm_batch_helper/providers.py,sha256=zv7dCiKZtSOcdV-4kvd3WKhClOv1jio9neZqGcYskm8,16794
7
+ llm_batch_helper-0.3.0.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
8
+ llm_batch_helper-0.3.0.dist-info/METADATA,sha256=ndbraa5EA5D9-WoWN7o0aLt6NThLTv6zFuZxDsVy3VQ,14254
9
+ llm_batch_helper-0.3.0.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
10
+ llm_batch_helper-0.3.0.dist-info/RECORD,,
@@ -1,10 +0,0 @@
1
- llm_batch_helper/__init__.py,sha256=Cy4p5YE6m3pbpux7GCjpf7GsyUbO_rUtqJsrhTWpPQY,348
2
- llm_batch_helper/cache.py,sha256=QUODQ1tPCvFThO3yvVOTcorcOrmN2dP5HLF1Y2O1bTQ,1276
3
- llm_batch_helper/config.py,sha256=RasljP9dzigZpKjm9yW6gU7_e3yjztfokjiDBf77iO4,1372
4
- llm_batch_helper/exceptions.py,sha256=59_f3jINUhKFble6HTp8pmtLSFE2MYLHWGclwaQKs28,296
5
- llm_batch_helper/input_handlers.py,sha256=IadA732F1Rw0zcBok5hjZr32RUm8eTUOpvLsRuMvaE4,2877
6
- llm_batch_helper/providers.py,sha256=lNxAh6hfKjXhHEOMDmoca2dbtcE62UZV-HXlTKemIOE,12230
7
- llm_batch_helper-0.2.0.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
8
- llm_batch_helper-0.2.0.dist-info/METADATA,sha256=-x5B1uALZBkM2VLf8tB0GI7JcJkiXu7qgPhkPG427rI,11288
9
- llm_batch_helper-0.2.0.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
10
- llm_batch_helper-0.2.0.dist-info/RECORD,,