lm-deluge 0.0.39__tar.gz → 0.0.40__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of lm-deluge might be problematic. Click here for more details.
- {lm_deluge-0.0.39/src/lm_deluge.egg-info → lm_deluge-0.0.40}/PKG-INFO +7 -4
- {lm_deluge-0.0.39 → lm_deluge-0.0.40}/README.md +6 -3
- {lm_deluge-0.0.39 → lm_deluge-0.0.40}/pyproject.toml +1 -1
- {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/client.py +37 -1
- {lm_deluge-0.0.39 → lm_deluge-0.0.40/src/lm_deluge.egg-info}/PKG-INFO +7 -4
- {lm_deluge-0.0.39 → lm_deluge-0.0.40}/LICENSE +0 -0
- {lm_deluge-0.0.39 → lm_deluge-0.0.40}/setup.cfg +0 -0
- {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/__init__.py +0 -0
- {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/agent.py +0 -0
- {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/api_requests/__init__.py +0 -0
- {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/api_requests/anthropic.py +0 -0
- {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/api_requests/base.py +0 -0
- {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/api_requests/bedrock.py +0 -0
- {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/api_requests/common.py +0 -0
- {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/api_requests/deprecated/bedrock.py +0 -0
- {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/api_requests/deprecated/cohere.py +0 -0
- {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/api_requests/deprecated/deepseek.py +0 -0
- {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/api_requests/deprecated/mistral.py +0 -0
- {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/api_requests/deprecated/vertex.py +0 -0
- {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/api_requests/gemini.py +0 -0
- {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/api_requests/mistral.py +0 -0
- {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/api_requests/openai.py +0 -0
- {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/api_requests/response.py +0 -0
- {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/batches.py +0 -0
- {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/built_in_tools/anthropic/__init__.py +0 -0
- {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/built_in_tools/anthropic/bash.py +0 -0
- {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/built_in_tools/anthropic/computer_use.py +0 -0
- {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/built_in_tools/anthropic/editor.py +0 -0
- {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/built_in_tools/base.py +0 -0
- {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/built_in_tools/openai.py +0 -0
- {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/cache.py +0 -0
- {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/cli.py +0 -0
- {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/config.py +0 -0
- {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/embed.py +0 -0
- {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/errors.py +0 -0
- {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/file.py +0 -0
- {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/gemini_limits.py +0 -0
- {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/image.py +0 -0
- {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/llm_tools/__init__.py +0 -0
- {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/llm_tools/classify.py +0 -0
- {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/llm_tools/extract.py +0 -0
- {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/llm_tools/locate.py +0 -0
- {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/llm_tools/ocr.py +0 -0
- {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/llm_tools/score.py +0 -0
- {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/llm_tools/translate.py +0 -0
- {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/models/__init__.py +0 -0
- {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/models/anthropic.py +0 -0
- {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/models/bedrock.py +0 -0
- {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/models/cerebras.py +0 -0
- {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/models/cohere.py +0 -0
- {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/models/deepseek.py +0 -0
- {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/models/fireworks.py +0 -0
- {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/models/google.py +0 -0
- {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/models/grok.py +0 -0
- {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/models/groq.py +0 -0
- {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/models/meta.py +0 -0
- {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/models/mistral.py +0 -0
- {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/models/openai.py +0 -0
- {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/models/openrouter.py +0 -0
- {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/models/together.py +0 -0
- {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/prompt.py +0 -0
- {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/request_context.py +0 -0
- {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/rerank.py +0 -0
- {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/tool.py +0 -0
- {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/tracker.py +0 -0
- {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/usage.py +0 -0
- {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/util/harmony.py +0 -0
- {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/util/json.py +0 -0
- {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/util/logprobs.py +0 -0
- {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/util/spatial.py +0 -0
- {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/util/validation.py +0 -0
- {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/util/xml.py +0 -0
- {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge.egg-info/SOURCES.txt +0 -0
- {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge.egg-info/dependency_links.txt +0 -0
- {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge.egg-info/requires.txt +0 -0
- {lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge.egg-info/top_level.txt +0 -0
- {lm_deluge-0.0.39 → lm_deluge-0.0.40}/tests/test_builtin_tools.py +0 -0
- {lm_deluge-0.0.39 → lm_deluge-0.0.40}/tests/test_native_mcp_server.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: lm_deluge
|
|
3
|
-
Version: 0.0.
|
|
3
|
+
Version: 0.0.40
|
|
4
4
|
Summary: Python utility for using LLM API models.
|
|
5
5
|
Author-email: Benjamin Anderson <ben@trytaylor.ai>
|
|
6
6
|
Requires-Python: >=3.10
|
|
@@ -111,14 +111,17 @@ await client.process_prompts_async(
|
|
|
111
111
|
|
|
112
112
|
### Queueing individual prompts
|
|
113
113
|
|
|
114
|
-
You can queue prompts one at a time and track progress explicitly
|
|
114
|
+
You can queue prompts one at a time and track progress explicitly. Iterate over
|
|
115
|
+
results as they finish with `as_completed` (or gather them all at once with
|
|
116
|
+
`wait_for_all`):
|
|
115
117
|
|
|
116
118
|
```python
|
|
117
119
|
client = LLMClient("gpt-4.1-mini", progress="tqdm")
|
|
118
120
|
client.open()
|
|
119
|
-
|
|
121
|
+
client.start_nowait("hello there")
|
|
120
122
|
# ... queue more tasks ...
|
|
121
|
-
|
|
123
|
+
async for task_id, result in client.as_completed():
|
|
124
|
+
print(task_id, result.completion)
|
|
122
125
|
client.close()
|
|
123
126
|
```
|
|
124
127
|
|
|
@@ -84,14 +84,17 @@ await client.process_prompts_async(
|
|
|
84
84
|
|
|
85
85
|
### Queueing individual prompts
|
|
86
86
|
|
|
87
|
-
You can queue prompts one at a time and track progress explicitly
|
|
87
|
+
You can queue prompts one at a time and track progress explicitly. Iterate over
|
|
88
|
+
results as they finish with `as_completed` (or gather them all at once with
|
|
89
|
+
`wait_for_all`):
|
|
88
90
|
|
|
89
91
|
```python
|
|
90
92
|
client = LLMClient("gpt-4.1-mini", progress="tqdm")
|
|
91
93
|
client.open()
|
|
92
|
-
|
|
94
|
+
client.start_nowait("hello there")
|
|
93
95
|
# ... queue more tasks ...
|
|
94
|
-
|
|
96
|
+
async for task_id, result in client.as_completed():
|
|
97
|
+
print(task_id, result.completion)
|
|
95
98
|
client.close()
|
|
96
99
|
```
|
|
97
100
|
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
import asyncio
|
|
2
2
|
import random
|
|
3
|
-
from typing import Any, Callable, Literal, Self, Sequence, overload
|
|
3
|
+
from typing import Any, AsyncGenerator, Callable, Literal, Self, Sequence, overload
|
|
4
4
|
|
|
5
5
|
import numpy as np
|
|
6
6
|
import yaml
|
|
@@ -557,6 +557,42 @@ class _LLMClient(BaseModel):
|
|
|
557
557
|
task_ids = list(self._tasks.keys())
|
|
558
558
|
return [await self.wait_for(tid) for tid in task_ids]
|
|
559
559
|
|
|
560
|
+
async def as_completed(
|
|
561
|
+
self, task_ids: Sequence[int] | None = None
|
|
562
|
+
) -> AsyncGenerator[tuple[int, APIResponse | None], None]:
|
|
563
|
+
"""Yield ``(task_id, result)`` pairs as tasks complete.
|
|
564
|
+
|
|
565
|
+
Args:
|
|
566
|
+
task_ids: Optional sequence of task IDs to wait on. If ``None``,
|
|
567
|
+
all queued tasks are watched.
|
|
568
|
+
|
|
569
|
+
Yields:
|
|
570
|
+
Tuples of task ID and ``APIResponse`` as each task finishes.
|
|
571
|
+
"""
|
|
572
|
+
|
|
573
|
+
if task_ids is None:
|
|
574
|
+
tasks_map: dict[asyncio.Task, int] = {
|
|
575
|
+
task: tid for tid, task in self._tasks.items()
|
|
576
|
+
}
|
|
577
|
+
else:
|
|
578
|
+
tasks_map = {
|
|
579
|
+
self._tasks[tid]: tid for tid in task_ids if tid in self._tasks
|
|
580
|
+
}
|
|
581
|
+
|
|
582
|
+
# Yield any tasks that have already completed
|
|
583
|
+
for task in list(tasks_map.keys()):
|
|
584
|
+
if task.done():
|
|
585
|
+
tid = tasks_map.pop(task)
|
|
586
|
+
yield tid, self._results.get(tid, await task)
|
|
587
|
+
|
|
588
|
+
while tasks_map:
|
|
589
|
+
done, _ = await asyncio.wait(
|
|
590
|
+
set(tasks_map.keys()), return_when=asyncio.FIRST_COMPLETED
|
|
591
|
+
)
|
|
592
|
+
for task in done:
|
|
593
|
+
tid = tasks_map.pop(task)
|
|
594
|
+
yield tid, self._results.get(tid, await task)
|
|
595
|
+
|
|
560
596
|
async def stream(
|
|
561
597
|
self,
|
|
562
598
|
prompt: str | Conversation,
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: lm_deluge
|
|
3
|
-
Version: 0.0.
|
|
3
|
+
Version: 0.0.40
|
|
4
4
|
Summary: Python utility for using LLM API models.
|
|
5
5
|
Author-email: Benjamin Anderson <ben@trytaylor.ai>
|
|
6
6
|
Requires-Python: >=3.10
|
|
@@ -111,14 +111,17 @@ await client.process_prompts_async(
|
|
|
111
111
|
|
|
112
112
|
### Queueing individual prompts
|
|
113
113
|
|
|
114
|
-
You can queue prompts one at a time and track progress explicitly
|
|
114
|
+
You can queue prompts one at a time and track progress explicitly. Iterate over
|
|
115
|
+
results as they finish with `as_completed` (or gather them all at once with
|
|
116
|
+
`wait_for_all`):
|
|
115
117
|
|
|
116
118
|
```python
|
|
117
119
|
client = LLMClient("gpt-4.1-mini", progress="tqdm")
|
|
118
120
|
client.open()
|
|
119
|
-
|
|
121
|
+
client.start_nowait("hello there")
|
|
120
122
|
# ... queue more tasks ...
|
|
121
|
-
|
|
123
|
+
async for task_id, result in client.as_completed():
|
|
124
|
+
print(task_id, result.completion)
|
|
122
125
|
client.close()
|
|
123
126
|
```
|
|
124
127
|
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{lm_deluge-0.0.39 → lm_deluge-0.0.40}/src/lm_deluge/built_in_tools/anthropic/computer_use.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|