lm-deluge 0.0.38__tar.gz → 0.0.40__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of lm-deluge might be problematic. Click here for more details.
- {lm_deluge-0.0.38/src/lm_deluge.egg-info → lm_deluge-0.0.40}/PKG-INFO +7 -4
- {lm_deluge-0.0.38 → lm_deluge-0.0.40}/README.md +6 -3
- {lm_deluge-0.0.38 → lm_deluge-0.0.40}/pyproject.toml +1 -1
- {lm_deluge-0.0.38 → lm_deluge-0.0.40}/src/lm_deluge/client.py +49 -11
- {lm_deluge-0.0.38 → lm_deluge-0.0.40}/src/lm_deluge/prompt.py +5 -0
- {lm_deluge-0.0.38 → lm_deluge-0.0.40}/src/lm_deluge/tracker.py +3 -4
- {lm_deluge-0.0.38 → lm_deluge-0.0.40/src/lm_deluge.egg-info}/PKG-INFO +7 -4
- {lm_deluge-0.0.38 → lm_deluge-0.0.40}/LICENSE +0 -0
- {lm_deluge-0.0.38 → lm_deluge-0.0.40}/setup.cfg +0 -0
- {lm_deluge-0.0.38 → lm_deluge-0.0.40}/src/lm_deluge/__init__.py +0 -0
- {lm_deluge-0.0.38 → lm_deluge-0.0.40}/src/lm_deluge/agent.py +0 -0
- {lm_deluge-0.0.38 → lm_deluge-0.0.40}/src/lm_deluge/api_requests/__init__.py +0 -0
- {lm_deluge-0.0.38 → lm_deluge-0.0.40}/src/lm_deluge/api_requests/anthropic.py +0 -0
- {lm_deluge-0.0.38 → lm_deluge-0.0.40}/src/lm_deluge/api_requests/base.py +0 -0
- {lm_deluge-0.0.38 → lm_deluge-0.0.40}/src/lm_deluge/api_requests/bedrock.py +0 -0
- {lm_deluge-0.0.38 → lm_deluge-0.0.40}/src/lm_deluge/api_requests/common.py +0 -0
- {lm_deluge-0.0.38 → lm_deluge-0.0.40}/src/lm_deluge/api_requests/deprecated/bedrock.py +0 -0
- {lm_deluge-0.0.38 → lm_deluge-0.0.40}/src/lm_deluge/api_requests/deprecated/cohere.py +0 -0
- {lm_deluge-0.0.38 → lm_deluge-0.0.40}/src/lm_deluge/api_requests/deprecated/deepseek.py +0 -0
- {lm_deluge-0.0.38 → lm_deluge-0.0.40}/src/lm_deluge/api_requests/deprecated/mistral.py +0 -0
- {lm_deluge-0.0.38 → lm_deluge-0.0.40}/src/lm_deluge/api_requests/deprecated/vertex.py +0 -0
- {lm_deluge-0.0.38 → lm_deluge-0.0.40}/src/lm_deluge/api_requests/gemini.py +0 -0
- {lm_deluge-0.0.38 → lm_deluge-0.0.40}/src/lm_deluge/api_requests/mistral.py +0 -0
- {lm_deluge-0.0.38 → lm_deluge-0.0.40}/src/lm_deluge/api_requests/openai.py +0 -0
- {lm_deluge-0.0.38 → lm_deluge-0.0.40}/src/lm_deluge/api_requests/response.py +0 -0
- {lm_deluge-0.0.38 → lm_deluge-0.0.40}/src/lm_deluge/batches.py +0 -0
- {lm_deluge-0.0.38 → lm_deluge-0.0.40}/src/lm_deluge/built_in_tools/anthropic/__init__.py +0 -0
- {lm_deluge-0.0.38 → lm_deluge-0.0.40}/src/lm_deluge/built_in_tools/anthropic/bash.py +0 -0
- {lm_deluge-0.0.38 → lm_deluge-0.0.40}/src/lm_deluge/built_in_tools/anthropic/computer_use.py +0 -0
- {lm_deluge-0.0.38 → lm_deluge-0.0.40}/src/lm_deluge/built_in_tools/anthropic/editor.py +0 -0
- {lm_deluge-0.0.38 → lm_deluge-0.0.40}/src/lm_deluge/built_in_tools/base.py +0 -0
- {lm_deluge-0.0.38 → lm_deluge-0.0.40}/src/lm_deluge/built_in_tools/openai.py +0 -0
- {lm_deluge-0.0.38 → lm_deluge-0.0.40}/src/lm_deluge/cache.py +0 -0
- {lm_deluge-0.0.38 → lm_deluge-0.0.40}/src/lm_deluge/cli.py +0 -0
- {lm_deluge-0.0.38 → lm_deluge-0.0.40}/src/lm_deluge/config.py +0 -0
- {lm_deluge-0.0.38 → lm_deluge-0.0.40}/src/lm_deluge/embed.py +0 -0
- {lm_deluge-0.0.38 → lm_deluge-0.0.40}/src/lm_deluge/errors.py +0 -0
- {lm_deluge-0.0.38 → lm_deluge-0.0.40}/src/lm_deluge/file.py +0 -0
- {lm_deluge-0.0.38 → lm_deluge-0.0.40}/src/lm_deluge/gemini_limits.py +0 -0
- {lm_deluge-0.0.38 → lm_deluge-0.0.40}/src/lm_deluge/image.py +0 -0
- {lm_deluge-0.0.38 → lm_deluge-0.0.40}/src/lm_deluge/llm_tools/__init__.py +0 -0
- {lm_deluge-0.0.38 → lm_deluge-0.0.40}/src/lm_deluge/llm_tools/classify.py +0 -0
- {lm_deluge-0.0.38 → lm_deluge-0.0.40}/src/lm_deluge/llm_tools/extract.py +0 -0
- {lm_deluge-0.0.38 → lm_deluge-0.0.40}/src/lm_deluge/llm_tools/locate.py +0 -0
- {lm_deluge-0.0.38 → lm_deluge-0.0.40}/src/lm_deluge/llm_tools/ocr.py +0 -0
- {lm_deluge-0.0.38 → lm_deluge-0.0.40}/src/lm_deluge/llm_tools/score.py +0 -0
- {lm_deluge-0.0.38 → lm_deluge-0.0.40}/src/lm_deluge/llm_tools/translate.py +0 -0
- {lm_deluge-0.0.38 → lm_deluge-0.0.40}/src/lm_deluge/models/__init__.py +0 -0
- {lm_deluge-0.0.38 → lm_deluge-0.0.40}/src/lm_deluge/models/anthropic.py +0 -0
- {lm_deluge-0.0.38 → lm_deluge-0.0.40}/src/lm_deluge/models/bedrock.py +0 -0
- {lm_deluge-0.0.38 → lm_deluge-0.0.40}/src/lm_deluge/models/cerebras.py +0 -0
- {lm_deluge-0.0.38 → lm_deluge-0.0.40}/src/lm_deluge/models/cohere.py +0 -0
- {lm_deluge-0.0.38 → lm_deluge-0.0.40}/src/lm_deluge/models/deepseek.py +0 -0
- {lm_deluge-0.0.38 → lm_deluge-0.0.40}/src/lm_deluge/models/fireworks.py +0 -0
- {lm_deluge-0.0.38 → lm_deluge-0.0.40}/src/lm_deluge/models/google.py +0 -0
- {lm_deluge-0.0.38 → lm_deluge-0.0.40}/src/lm_deluge/models/grok.py +0 -0
- {lm_deluge-0.0.38 → lm_deluge-0.0.40}/src/lm_deluge/models/groq.py +0 -0
- {lm_deluge-0.0.38 → lm_deluge-0.0.40}/src/lm_deluge/models/meta.py +0 -0
- {lm_deluge-0.0.38 → lm_deluge-0.0.40}/src/lm_deluge/models/mistral.py +0 -0
- {lm_deluge-0.0.38 → lm_deluge-0.0.40}/src/lm_deluge/models/openai.py +0 -0
- {lm_deluge-0.0.38 → lm_deluge-0.0.40}/src/lm_deluge/models/openrouter.py +0 -0
- {lm_deluge-0.0.38 → lm_deluge-0.0.40}/src/lm_deluge/models/together.py +0 -0
- {lm_deluge-0.0.38 → lm_deluge-0.0.40}/src/lm_deluge/request_context.py +0 -0
- {lm_deluge-0.0.38 → lm_deluge-0.0.40}/src/lm_deluge/rerank.py +0 -0
- {lm_deluge-0.0.38 → lm_deluge-0.0.40}/src/lm_deluge/tool.py +0 -0
- {lm_deluge-0.0.38 → lm_deluge-0.0.40}/src/lm_deluge/usage.py +0 -0
- {lm_deluge-0.0.38 → lm_deluge-0.0.40}/src/lm_deluge/util/harmony.py +0 -0
- {lm_deluge-0.0.38 → lm_deluge-0.0.40}/src/lm_deluge/util/json.py +0 -0
- {lm_deluge-0.0.38 → lm_deluge-0.0.40}/src/lm_deluge/util/logprobs.py +0 -0
- {lm_deluge-0.0.38 → lm_deluge-0.0.40}/src/lm_deluge/util/spatial.py +0 -0
- {lm_deluge-0.0.38 → lm_deluge-0.0.40}/src/lm_deluge/util/validation.py +0 -0
- {lm_deluge-0.0.38 → lm_deluge-0.0.40}/src/lm_deluge/util/xml.py +0 -0
- {lm_deluge-0.0.38 → lm_deluge-0.0.40}/src/lm_deluge.egg-info/SOURCES.txt +0 -0
- {lm_deluge-0.0.38 → lm_deluge-0.0.40}/src/lm_deluge.egg-info/dependency_links.txt +0 -0
- {lm_deluge-0.0.38 → lm_deluge-0.0.40}/src/lm_deluge.egg-info/requires.txt +0 -0
- {lm_deluge-0.0.38 → lm_deluge-0.0.40}/src/lm_deluge.egg-info/top_level.txt +0 -0
- {lm_deluge-0.0.38 → lm_deluge-0.0.40}/tests/test_builtin_tools.py +0 -0
- {lm_deluge-0.0.38 → lm_deluge-0.0.40}/tests/test_native_mcp_server.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: lm_deluge
|
|
3
|
-
Version: 0.0.
|
|
3
|
+
Version: 0.0.40
|
|
4
4
|
Summary: Python utility for using LLM API models.
|
|
5
5
|
Author-email: Benjamin Anderson <ben@trytaylor.ai>
|
|
6
6
|
Requires-Python: >=3.10
|
|
@@ -111,14 +111,17 @@ await client.process_prompts_async(
|
|
|
111
111
|
|
|
112
112
|
### Queueing individual prompts
|
|
113
113
|
|
|
114
|
-
You can queue prompts one at a time and track progress explicitly
|
|
114
|
+
You can queue prompts one at a time and track progress explicitly. Iterate over
|
|
115
|
+
results as they finish with `as_completed` (or gather them all at once with
|
|
116
|
+
`wait_for_all`):
|
|
115
117
|
|
|
116
118
|
```python
|
|
117
119
|
client = LLMClient("gpt-4.1-mini", progress="tqdm")
|
|
118
120
|
client.open()
|
|
119
|
-
|
|
121
|
+
client.start_nowait("hello there")
|
|
120
122
|
# ... queue more tasks ...
|
|
121
|
-
|
|
123
|
+
async for task_id, result in client.as_completed():
|
|
124
|
+
print(task_id, result.completion)
|
|
122
125
|
client.close()
|
|
123
126
|
```
|
|
124
127
|
|
|
@@ -84,14 +84,17 @@ await client.process_prompts_async(
|
|
|
84
84
|
|
|
85
85
|
### Queueing individual prompts
|
|
86
86
|
|
|
87
|
-
You can queue prompts one at a time and track progress explicitly
|
|
87
|
+
You can queue prompts one at a time and track progress explicitly. Iterate over
|
|
88
|
+
results as they finish with `as_completed` (or gather them all at once with
|
|
89
|
+
`wait_for_all`):
|
|
88
90
|
|
|
89
91
|
```python
|
|
90
92
|
client = LLMClient("gpt-4.1-mini", progress="tqdm")
|
|
91
93
|
client.open()
|
|
92
|
-
|
|
94
|
+
client.start_nowait("hello there")
|
|
93
95
|
# ... queue more tasks ...
|
|
94
|
-
|
|
96
|
+
async for task_id, result in client.as_completed():
|
|
97
|
+
print(task_id, result.completion)
|
|
95
98
|
client.close()
|
|
96
99
|
```
|
|
97
100
|
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
import asyncio
|
|
2
2
|
import random
|
|
3
|
-
from typing import Any, Literal, Self, Sequence,
|
|
3
|
+
from typing import Any, AsyncGenerator, Callable, Literal, Self, Sequence, overload
|
|
4
4
|
|
|
5
5
|
import numpy as np
|
|
6
6
|
import yaml
|
|
@@ -375,16 +375,16 @@ class _LLMClient(BaseModel):
|
|
|
375
375
|
|
|
376
376
|
# Main dispatch loop - using original pattern but with all prompts
|
|
377
377
|
next_context = None # Persist across iterations like original
|
|
378
|
+
next_is_retry = False # Track whether next_context is a retry
|
|
378
379
|
prompts_not_finished = True
|
|
379
380
|
prompts_iter = iter(zip(ids, prompts))
|
|
380
381
|
|
|
381
382
|
while True:
|
|
382
383
|
# Get next context (retry or new) - only if we don't already have one waiting
|
|
383
|
-
retry_request = False
|
|
384
384
|
if next_context is None:
|
|
385
385
|
if not retry_queue.empty():
|
|
386
386
|
next_context = retry_queue.get_nowait()
|
|
387
|
-
|
|
387
|
+
next_is_retry = True
|
|
388
388
|
print(f"Retrying request {next_context.task_id}.")
|
|
389
389
|
elif prompts_not_finished:
|
|
390
390
|
try:
|
|
@@ -405,6 +405,8 @@ class _LLMClient(BaseModel):
|
|
|
405
405
|
extra_headers=self.extra_headers,
|
|
406
406
|
force_local_mcp=self.force_local_mcp,
|
|
407
407
|
)
|
|
408
|
+
|
|
409
|
+
next_is_retry = False
|
|
408
410
|
except StopIteration:
|
|
409
411
|
prompts_not_finished = False
|
|
410
412
|
|
|
@@ -413,7 +415,7 @@ class _LLMClient(BaseModel):
|
|
|
413
415
|
|
|
414
416
|
# Dispatch if capacity available - original logic
|
|
415
417
|
if next_context:
|
|
416
|
-
if tracker.check_capacity(next_context.num_tokens, retry=
|
|
418
|
+
if tracker.check_capacity(next_context.num_tokens, retry=next_is_retry):
|
|
417
419
|
tracker.set_limiting_factor(None)
|
|
418
420
|
|
|
419
421
|
# Launch simplified request processing
|
|
@@ -441,16 +443,16 @@ class _LLMClient(BaseModel):
|
|
|
441
443
|
|
|
442
444
|
asyncio.create_task(process_and_store(next_context))
|
|
443
445
|
next_context = None # Reset after successful dispatch
|
|
446
|
+
next_is_retry = False
|
|
444
447
|
|
|
445
448
|
# Update progress - original logic
|
|
446
449
|
tracker.update_pbar()
|
|
447
450
|
|
|
448
|
-
# Check completion -
|
|
449
|
-
if (
|
|
450
|
-
|
|
451
|
-
|
|
452
|
-
|
|
453
|
-
):
|
|
451
|
+
# Check completion: consider final outcomes, not in-progress count
|
|
452
|
+
# This avoids rare hangs if in-progress is miscounted (e.g., double-increment).
|
|
453
|
+
if (tracker.num_tasks_succeeded + tracker.num_tasks_failed) >= len(
|
|
454
|
+
prompts
|
|
455
|
+
) and retry_queue.empty():
|
|
454
456
|
break
|
|
455
457
|
|
|
456
458
|
# Sleep - original logic
|
|
@@ -555,6 +557,42 @@ class _LLMClient(BaseModel):
|
|
|
555
557
|
task_ids = list(self._tasks.keys())
|
|
556
558
|
return [await self.wait_for(tid) for tid in task_ids]
|
|
557
559
|
|
|
560
|
+
async def as_completed(
|
|
561
|
+
self, task_ids: Sequence[int] | None = None
|
|
562
|
+
) -> AsyncGenerator[tuple[int, APIResponse | None], None]:
|
|
563
|
+
"""Yield ``(task_id, result)`` pairs as tasks complete.
|
|
564
|
+
|
|
565
|
+
Args:
|
|
566
|
+
task_ids: Optional sequence of task IDs to wait on. If ``None``,
|
|
567
|
+
all queued tasks are watched.
|
|
568
|
+
|
|
569
|
+
Yields:
|
|
570
|
+
Tuples of task ID and ``APIResponse`` as each task finishes.
|
|
571
|
+
"""
|
|
572
|
+
|
|
573
|
+
if task_ids is None:
|
|
574
|
+
tasks_map: dict[asyncio.Task, int] = {
|
|
575
|
+
task: tid for tid, task in self._tasks.items()
|
|
576
|
+
}
|
|
577
|
+
else:
|
|
578
|
+
tasks_map = {
|
|
579
|
+
self._tasks[tid]: tid for tid in task_ids if tid in self._tasks
|
|
580
|
+
}
|
|
581
|
+
|
|
582
|
+
# Yield any tasks that have already completed
|
|
583
|
+
for task in list(tasks_map.keys()):
|
|
584
|
+
if task.done():
|
|
585
|
+
tid = tasks_map.pop(task)
|
|
586
|
+
yield tid, self._results.get(tid, await task)
|
|
587
|
+
|
|
588
|
+
while tasks_map:
|
|
589
|
+
done, _ = await asyncio.wait(
|
|
590
|
+
set(tasks_map.keys()), return_when=asyncio.FIRST_COMPLETED
|
|
591
|
+
)
|
|
592
|
+
for task in done:
|
|
593
|
+
tid = tasks_map.pop(task)
|
|
594
|
+
yield tid, self._results.get(tid, await task)
|
|
595
|
+
|
|
558
596
|
async def stream(
|
|
559
597
|
self,
|
|
560
598
|
prompt: str | Conversation,
|
|
@@ -616,7 +654,7 @@ class _LLMClient(BaseModel):
|
|
|
616
654
|
if last_response is None or last_response.content is None:
|
|
617
655
|
break
|
|
618
656
|
|
|
619
|
-
conversation.
|
|
657
|
+
conversation = conversation.with_message(last_response.content)
|
|
620
658
|
|
|
621
659
|
tool_calls = last_response.content.tool_calls
|
|
622
660
|
if not tool_calls:
|
|
@@ -658,6 +658,11 @@ class Conversation:
|
|
|
658
658
|
self.messages.append(msg)
|
|
659
659
|
return self
|
|
660
660
|
|
|
661
|
+
# another way of doing the same thing
|
|
662
|
+
def add(self, msg: Message) -> "Conversation":
|
|
663
|
+
self.messages.append(msg)
|
|
664
|
+
return self
|
|
665
|
+
|
|
661
666
|
def with_tool_result(
|
|
662
667
|
self, tool_call_id: str, result: str | list[ToolResultPart]
|
|
663
668
|
) -> "Conversation":
|
|
@@ -10,6 +10,7 @@ from rich.progress import (
|
|
|
10
10
|
MofNCompleteColumn,
|
|
11
11
|
Progress,
|
|
12
12
|
SpinnerColumn,
|
|
13
|
+
TaskID,
|
|
13
14
|
TextColumn,
|
|
14
15
|
)
|
|
15
16
|
from rich.text import Text
|
|
@@ -44,7 +45,7 @@ class StatusTracker:
|
|
|
44
45
|
_rich_console: Console | None = None
|
|
45
46
|
_rich_live: object | None = None
|
|
46
47
|
_rich_progress: Progress | None = None
|
|
47
|
-
_rich_task_id:
|
|
48
|
+
_rich_task_id: TaskID | None = None
|
|
48
49
|
_rich_display_task: asyncio.Task | None = None
|
|
49
50
|
_rich_stop_event: asyncio.Event | None = None
|
|
50
51
|
|
|
@@ -244,9 +245,7 @@ class StatusTracker:
|
|
|
244
245
|
"""Initialize manual progress printer."""
|
|
245
246
|
self.progress_bar_total = total
|
|
246
247
|
self._manual_stop_event = asyncio.Event()
|
|
247
|
-
self._manual_display_task = asyncio.create_task(
|
|
248
|
-
self._manual_display_updater()
|
|
249
|
-
)
|
|
248
|
+
self._manual_display_task = asyncio.create_task(self._manual_display_updater())
|
|
250
249
|
|
|
251
250
|
async def _manual_display_updater(self):
|
|
252
251
|
if self._manual_stop_event is None:
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: lm_deluge
|
|
3
|
-
Version: 0.0.
|
|
3
|
+
Version: 0.0.40
|
|
4
4
|
Summary: Python utility for using LLM API models.
|
|
5
5
|
Author-email: Benjamin Anderson <ben@trytaylor.ai>
|
|
6
6
|
Requires-Python: >=3.10
|
|
@@ -111,14 +111,17 @@ await client.process_prompts_async(
|
|
|
111
111
|
|
|
112
112
|
### Queueing individual prompts
|
|
113
113
|
|
|
114
|
-
You can queue prompts one at a time and track progress explicitly
|
|
114
|
+
You can queue prompts one at a time and track progress explicitly. Iterate over
|
|
115
|
+
results as they finish with `as_completed` (or gather them all at once with
|
|
116
|
+
`wait_for_all`):
|
|
115
117
|
|
|
116
118
|
```python
|
|
117
119
|
client = LLMClient("gpt-4.1-mini", progress="tqdm")
|
|
118
120
|
client.open()
|
|
119
|
-
|
|
121
|
+
client.start_nowait("hello there")
|
|
120
122
|
# ... queue more tasks ...
|
|
121
|
-
|
|
123
|
+
async for task_id, result in client.as_completed():
|
|
124
|
+
print(task_id, result.completion)
|
|
122
125
|
client.close()
|
|
123
126
|
```
|
|
124
127
|
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{lm_deluge-0.0.38 → lm_deluge-0.0.40}/src/lm_deluge/built_in_tools/anthropic/computer_use.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|