llm_batch_helper 0.3.1__tar.gz → 0.3.2__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {llm_batch_helper-0.3.1 → llm_batch_helper-0.3.2}/PKG-INFO +3 -3
- {llm_batch_helper-0.3.1 → llm_batch_helper-0.3.2}/llm_batch_helper/providers.py +16 -3
- {llm_batch_helper-0.3.1 → llm_batch_helper-0.3.2}/pyproject.toml +2 -2
- {llm_batch_helper-0.3.1 → llm_batch_helper-0.3.2}/LICENSE +0 -0
- {llm_batch_helper-0.3.1 → llm_batch_helper-0.3.2}/README.md +0 -0
- {llm_batch_helper-0.3.1 → llm_batch_helper-0.3.2}/llm_batch_helper/__init__.py +0 -0
- {llm_batch_helper-0.3.1 → llm_batch_helper-0.3.2}/llm_batch_helper/cache.py +0 -0
- {llm_batch_helper-0.3.1 → llm_batch_helper-0.3.2}/llm_batch_helper/config.py +0 -0
- {llm_batch_helper-0.3.1 → llm_batch_helper-0.3.2}/llm_batch_helper/exceptions.py +0 -0
- {llm_batch_helper-0.3.1 → llm_batch_helper-0.3.2}/llm_batch_helper/input_handlers.py +0 -0
@@ -1,20 +1,20 @@
|
|
1
1
|
Metadata-Version: 2.3
|
2
2
|
Name: llm_batch_helper
|
3
|
-
Version: 0.3.
|
3
|
+
Version: 0.3.2
|
4
4
|
Summary: A Python package that enables batch submission of prompts to LLM APIs, with simplified interface and built-in async capabilities handled implicitly.
|
5
5
|
License: MIT
|
6
6
|
Keywords: llm,openai,together,openrouter,batch,async,ai,nlp,api
|
7
7
|
Author: Tianyi Peng
|
8
8
|
Author-email: tianyipeng95@gmail.com
|
9
|
-
Requires-Python: >=3.
|
9
|
+
Requires-Python: >=3.10,<4.0
|
10
10
|
Classifier: Development Status :: 4 - Beta
|
11
11
|
Classifier: Intended Audience :: Developers
|
12
12
|
Classifier: License :: OSI Approved :: MIT License
|
13
13
|
Classifier: Programming Language :: Python :: 3
|
14
|
+
Classifier: Programming Language :: Python :: 3.10
|
14
15
|
Classifier: Programming Language :: Python :: 3.11
|
15
16
|
Classifier: Programming Language :: Python :: 3.12
|
16
17
|
Classifier: Programming Language :: Python :: 3.13
|
17
|
-
Classifier: Programming Language :: Python :: 3.10
|
18
18
|
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
19
19
|
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
20
20
|
Requires-Dist: httpx (>=0.24.0,<2.0.0)
|
@@ -288,10 +288,11 @@ async def process_prompts_batch_async(
|
|
288
288
|
force: If True, force regeneration even if cached response exists
|
289
289
|
|
290
290
|
Returns:
|
291
|
-
Dict mapping prompt IDs to their responses
|
291
|
+
Dict mapping prompt IDs to their responses, ordered by input sequence
|
292
292
|
|
293
293
|
Note:
|
294
294
|
Either prompts or input_dir must be provided, but not both.
|
295
|
+
Results are returned in the same order as the input prompts.
|
295
296
|
"""
|
296
297
|
if prompts is None and input_dir is None:
|
297
298
|
raise ValueError("Either prompts or input_dir must be provided")
|
@@ -309,6 +310,9 @@ async def process_prompts_batch_async(
|
|
309
310
|
|
310
311
|
# Process prompts
|
311
312
|
results = {}
|
313
|
+
# Keep track of original order for sorting results
|
314
|
+
prompt_order = {prompt_id: idx for idx, (prompt_id, _) in enumerate(prompts)}
|
315
|
+
|
312
316
|
tasks = [
|
313
317
|
_process_single_prompt_attempt_with_verification(
|
314
318
|
prompt_id, prompt_text, config, provider, semaphore, cache_dir, force
|
@@ -320,7 +324,14 @@ async def process_prompts_batch_async(
|
|
320
324
|
prompt_id, response_data = await future
|
321
325
|
results[prompt_id] = response_data
|
322
326
|
|
323
|
-
|
327
|
+
# Sort results by original input order to maintain input sequence
|
328
|
+
# Note: Python 3.7+ guarantees dict insertion order, we explicitly sort
|
329
|
+
# to ensure results match the original prompt order regardless of completion order
|
330
|
+
ordered_results = {}
|
331
|
+
for prompt_id in sorted(results.keys(), key=lambda pid: prompt_order[pid]):
|
332
|
+
ordered_results[prompt_id] = results[prompt_id]
|
333
|
+
|
334
|
+
return ordered_results
|
324
335
|
|
325
336
|
|
326
337
|
def process_prompts_batch(
|
@@ -348,10 +359,11 @@ def process_prompts_batch(
|
|
348
359
|
force: If True, force regeneration even if cached response exists
|
349
360
|
|
350
361
|
Returns:
|
351
|
-
Dict mapping prompt IDs to their responses
|
362
|
+
Dict mapping prompt IDs to their responses, ordered by input sequence
|
352
363
|
|
353
364
|
Note:
|
354
365
|
Either prompts or input_dir must be provided, but not both.
|
366
|
+
Results are returned in the same order as the input prompts.
|
355
367
|
|
356
368
|
Example:
|
357
369
|
>>> from llm_batch_helper import LLMConfig, process_prompts_batch
|
@@ -361,6 +373,7 @@ def process_prompts_batch(
|
|
361
373
|
... config=config,
|
362
374
|
... provider="openai"
|
363
375
|
... )
|
376
|
+
>>> # Results will be in the same order as input prompts
|
364
377
|
"""
|
365
378
|
return _run_async_function(
|
366
379
|
process_prompts_batch_async,
|
@@ -1,6 +1,6 @@
|
|
1
1
|
[tool.poetry]
|
2
2
|
name = "llm_batch_helper"
|
3
|
-
version = "0.3.
|
3
|
+
version = "0.3.2"
|
4
4
|
description = "A Python package that enables batch submission of prompts to LLM APIs, with simplified interface and built-in async capabilities handled implicitly."
|
5
5
|
authors = ["Tianyi Peng <tianyipeng95@gmail.com>"]
|
6
6
|
readme = "README.md"
|
@@ -22,7 +22,7 @@ classifiers = [
|
|
22
22
|
packages = [{include = "llm_batch_helper"}]
|
23
23
|
|
24
24
|
[tool.poetry.dependencies]
|
25
|
-
python = "^3.
|
25
|
+
python = "^3.10"
|
26
26
|
httpx = ">=0.24.0,<2.0.0"
|
27
27
|
openai = "^1.0.0"
|
28
28
|
tenacity = "^8.0.0"
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|