lm-deluge 0.0.75__tar.gz → 0.0.78__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {lm_deluge-0.0.75/src/lm_deluge.egg-info → lm_deluge-0.0.78}/PKG-INFO +2 -1
- {lm_deluge-0.0.75 → lm_deluge-0.0.78}/README.md +1 -0
- {lm_deluge-0.0.75 → lm_deluge-0.0.78}/pyproject.toml +1 -1
- {lm_deluge-0.0.75 → lm_deluge-0.0.78}/src/lm_deluge/client.py +101 -20
- {lm_deluge-0.0.75 → lm_deluge-0.0.78}/src/lm_deluge/llm_tools/__init__.py +8 -1
- lm_deluge-0.0.78/src/lm_deluge/llm_tools/filesystem.py +821 -0
- lm_deluge-0.0.78/src/lm_deluge/llm_tools/subagents.py +233 -0
- lm_deluge-0.0.78/src/lm_deluge/llm_tools/todos.py +342 -0
- {lm_deluge-0.0.75 → lm_deluge-0.0.78/src/lm_deluge.egg-info}/PKG-INFO +2 -1
- {lm_deluge-0.0.75 → lm_deluge-0.0.78}/src/lm_deluge.egg-info/SOURCES.txt +5 -0
- lm_deluge-0.0.78/tests/test_filesystem.py +119 -0
- lm_deluge-0.0.78/tests/test_filesystem_live.py +82 -0
- {lm_deluge-0.0.75 → lm_deluge-0.0.78}/LICENSE +0 -0
- {lm_deluge-0.0.75 → lm_deluge-0.0.78}/setup.cfg +0 -0
- {lm_deluge-0.0.75 → lm_deluge-0.0.78}/src/lm_deluge/__init__.py +0 -0
- {lm_deluge-0.0.75 → lm_deluge-0.0.78}/src/lm_deluge/api_requests/__init__.py +0 -0
- {lm_deluge-0.0.75 → lm_deluge-0.0.78}/src/lm_deluge/api_requests/anthropic.py +0 -0
- {lm_deluge-0.0.75 → lm_deluge-0.0.78}/src/lm_deluge/api_requests/base.py +0 -0
- {lm_deluge-0.0.75 → lm_deluge-0.0.78}/src/lm_deluge/api_requests/bedrock.py +0 -0
- {lm_deluge-0.0.75 → lm_deluge-0.0.78}/src/lm_deluge/api_requests/chat_reasoning.py +0 -0
- {lm_deluge-0.0.75 → lm_deluge-0.0.78}/src/lm_deluge/api_requests/common.py +0 -0
- {lm_deluge-0.0.75 → lm_deluge-0.0.78}/src/lm_deluge/api_requests/deprecated/bedrock.py +0 -0
- {lm_deluge-0.0.75 → lm_deluge-0.0.78}/src/lm_deluge/api_requests/deprecated/cohere.py +0 -0
- {lm_deluge-0.0.75 → lm_deluge-0.0.78}/src/lm_deluge/api_requests/deprecated/deepseek.py +0 -0
- {lm_deluge-0.0.75 → lm_deluge-0.0.78}/src/lm_deluge/api_requests/deprecated/mistral.py +0 -0
- {lm_deluge-0.0.75 → lm_deluge-0.0.78}/src/lm_deluge/api_requests/deprecated/vertex.py +0 -0
- {lm_deluge-0.0.75 → lm_deluge-0.0.78}/src/lm_deluge/api_requests/gemini.py +0 -0
- {lm_deluge-0.0.75 → lm_deluge-0.0.78}/src/lm_deluge/api_requests/mistral.py +0 -0
- {lm_deluge-0.0.75 → lm_deluge-0.0.78}/src/lm_deluge/api_requests/openai.py +0 -0
- {lm_deluge-0.0.75 → lm_deluge-0.0.78}/src/lm_deluge/api_requests/response.py +0 -0
- {lm_deluge-0.0.75 → lm_deluge-0.0.78}/src/lm_deluge/batches.py +0 -0
- {lm_deluge-0.0.75 → lm_deluge-0.0.78}/src/lm_deluge/built_in_tools/anthropic/__init__.py +0 -0
- {lm_deluge-0.0.75 → lm_deluge-0.0.78}/src/lm_deluge/built_in_tools/anthropic/bash.py +0 -0
- {lm_deluge-0.0.75 → lm_deluge-0.0.78}/src/lm_deluge/built_in_tools/anthropic/computer_use.py +0 -0
- {lm_deluge-0.0.75 → lm_deluge-0.0.78}/src/lm_deluge/built_in_tools/anthropic/editor.py +0 -0
- {lm_deluge-0.0.75 → lm_deluge-0.0.78}/src/lm_deluge/built_in_tools/base.py +0 -0
- {lm_deluge-0.0.75 → lm_deluge-0.0.78}/src/lm_deluge/built_in_tools/openai.py +0 -0
- {lm_deluge-0.0.75 → lm_deluge-0.0.78}/src/lm_deluge/cache.py +0 -0
- {lm_deluge-0.0.75 → lm_deluge-0.0.78}/src/lm_deluge/cli.py +0 -0
- {lm_deluge-0.0.75 → lm_deluge-0.0.78}/src/lm_deluge/config.py +0 -0
- {lm_deluge-0.0.75 → lm_deluge-0.0.78}/src/lm_deluge/embed.py +0 -0
- {lm_deluge-0.0.75 → lm_deluge-0.0.78}/src/lm_deluge/errors.py +0 -0
- {lm_deluge-0.0.75 → lm_deluge-0.0.78}/src/lm_deluge/file.py +0 -0
- {lm_deluge-0.0.75 → lm_deluge-0.0.78}/src/lm_deluge/image.py +0 -0
- {lm_deluge-0.0.75 → lm_deluge-0.0.78}/src/lm_deluge/llm_tools/classify.py +0 -0
- {lm_deluge-0.0.75 → lm_deluge-0.0.78}/src/lm_deluge/llm_tools/extract.py +0 -0
- {lm_deluge-0.0.75 → lm_deluge-0.0.78}/src/lm_deluge/llm_tools/locate.py +0 -0
- {lm_deluge-0.0.75 → lm_deluge-0.0.78}/src/lm_deluge/llm_tools/ocr.py +0 -0
- {lm_deluge-0.0.75 → lm_deluge-0.0.78}/src/lm_deluge/llm_tools/score.py +0 -0
- {lm_deluge-0.0.75 → lm_deluge-0.0.78}/src/lm_deluge/llm_tools/translate.py +0 -0
- {lm_deluge-0.0.75 → lm_deluge-0.0.78}/src/lm_deluge/mock_openai.py +0 -0
- {lm_deluge-0.0.75 → lm_deluge-0.0.78}/src/lm_deluge/models/__init__.py +0 -0
- {lm_deluge-0.0.75 → lm_deluge-0.0.78}/src/lm_deluge/models/anthropic.py +0 -0
- {lm_deluge-0.0.75 → lm_deluge-0.0.78}/src/lm_deluge/models/bedrock.py +0 -0
- {lm_deluge-0.0.75 → lm_deluge-0.0.78}/src/lm_deluge/models/cerebras.py +0 -0
- {lm_deluge-0.0.75 → lm_deluge-0.0.78}/src/lm_deluge/models/cohere.py +0 -0
- {lm_deluge-0.0.75 → lm_deluge-0.0.78}/src/lm_deluge/models/deepseek.py +0 -0
- {lm_deluge-0.0.75 → lm_deluge-0.0.78}/src/lm_deluge/models/fireworks.py +0 -0
- {lm_deluge-0.0.75 → lm_deluge-0.0.78}/src/lm_deluge/models/google.py +0 -0
- {lm_deluge-0.0.75 → lm_deluge-0.0.78}/src/lm_deluge/models/grok.py +0 -0
- {lm_deluge-0.0.75 → lm_deluge-0.0.78}/src/lm_deluge/models/groq.py +0 -0
- {lm_deluge-0.0.75 → lm_deluge-0.0.78}/src/lm_deluge/models/kimi.py +0 -0
- {lm_deluge-0.0.75 → lm_deluge-0.0.78}/src/lm_deluge/models/meta.py +0 -0
- {lm_deluge-0.0.75 → lm_deluge-0.0.78}/src/lm_deluge/models/minimax.py +0 -0
- {lm_deluge-0.0.75 → lm_deluge-0.0.78}/src/lm_deluge/models/mistral.py +0 -0
- {lm_deluge-0.0.75 → lm_deluge-0.0.78}/src/lm_deluge/models/openai.py +0 -0
- {lm_deluge-0.0.75 → lm_deluge-0.0.78}/src/lm_deluge/models/openrouter.py +0 -0
- {lm_deluge-0.0.75 → lm_deluge-0.0.78}/src/lm_deluge/models/together.py +0 -0
- {lm_deluge-0.0.75 → lm_deluge-0.0.78}/src/lm_deluge/presets/cerebras.py +0 -0
- {lm_deluge-0.0.75 → lm_deluge-0.0.78}/src/lm_deluge/presets/meta.py +0 -0
- {lm_deluge-0.0.75 → lm_deluge-0.0.78}/src/lm_deluge/prompt.py +0 -0
- {lm_deluge-0.0.75 → lm_deluge-0.0.78}/src/lm_deluge/request_context.py +0 -0
- {lm_deluge-0.0.75 → lm_deluge-0.0.78}/src/lm_deluge/rerank.py +0 -0
- {lm_deluge-0.0.75 → lm_deluge-0.0.78}/src/lm_deluge/tool.py +0 -0
- {lm_deluge-0.0.75 → lm_deluge-0.0.78}/src/lm_deluge/tracker.py +0 -0
- {lm_deluge-0.0.75 → lm_deluge-0.0.78}/src/lm_deluge/usage.py +0 -0
- {lm_deluge-0.0.75 → lm_deluge-0.0.78}/src/lm_deluge/util/harmony.py +0 -0
- {lm_deluge-0.0.75 → lm_deluge-0.0.78}/src/lm_deluge/util/json.py +0 -0
- {lm_deluge-0.0.75 → lm_deluge-0.0.78}/src/lm_deluge/util/logprobs.py +0 -0
- {lm_deluge-0.0.75 → lm_deluge-0.0.78}/src/lm_deluge/util/schema.py +0 -0
- {lm_deluge-0.0.75 → lm_deluge-0.0.78}/src/lm_deluge/util/spatial.py +0 -0
- {lm_deluge-0.0.75 → lm_deluge-0.0.78}/src/lm_deluge/util/validation.py +0 -0
- {lm_deluge-0.0.75 → lm_deluge-0.0.78}/src/lm_deluge/util/xml.py +0 -0
- {lm_deluge-0.0.75 → lm_deluge-0.0.78}/src/lm_deluge/warnings.py +0 -0
- {lm_deluge-0.0.75 → lm_deluge-0.0.78}/src/lm_deluge.egg-info/dependency_links.txt +0 -0
- {lm_deluge-0.0.75 → lm_deluge-0.0.78}/src/lm_deluge.egg-info/requires.txt +0 -0
- {lm_deluge-0.0.75 → lm_deluge-0.0.78}/src/lm_deluge.egg-info/top_level.txt +0 -0
- {lm_deluge-0.0.75 → lm_deluge-0.0.78}/tests/test_builtin_tools.py +0 -0
- {lm_deluge-0.0.75 → lm_deluge-0.0.78}/tests/test_file_upload.py +0 -0
- {lm_deluge-0.0.75 → lm_deluge-0.0.78}/tests/test_mock_openai.py +0 -0
- {lm_deluge-0.0.75 → lm_deluge-0.0.78}/tests/test_native_mcp_server.py +0 -0
- {lm_deluge-0.0.75 → lm_deluge-0.0.78}/tests/test_openrouter_generic.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: lm_deluge
|
|
3
|
-
Version: 0.0.
|
|
3
|
+
Version: 0.0.78
|
|
4
4
|
Summary: Python utility for using LLM API models.
|
|
5
5
|
Author-email: Benjamin Anderson <ben@trytaylor.ai>
|
|
6
6
|
Requires-Python: >=3.10
|
|
@@ -301,5 +301,6 @@ The `lm_deluge.llm_tools` package exposes a few helper functions:
|
|
|
301
301
|
- `extract` – structure text or images into a Pydantic model based on a schema.
|
|
302
302
|
- `translate` – translate a list of strings to English.
|
|
303
303
|
- `score_llm` – simple yes/no style scoring with optional log probability output.
|
|
304
|
+
- `FilesystemManager` – expose a sandboxed read/write filesystem tool (with optional regex search and `apply_patch` support) that agents can call without touching the host machine.
|
|
304
305
|
|
|
305
306
|
Experimental embeddings (`embed.embed_parallel_async`) and document reranking (`rerank.rerank_parallel_async`) clients are also provided.
|
|
@@ -272,5 +272,6 @@ The `lm_deluge.llm_tools` package exposes a few helper functions:
|
|
|
272
272
|
- `extract` – structure text or images into a Pydantic model based on a schema.
|
|
273
273
|
- `translate` – translate a list of strings to English.
|
|
274
274
|
- `score_llm` – simple yes/no style scoring with optional log probability output.
|
|
275
|
+
- `FilesystemManager` – expose a sandboxed read/write filesystem tool (with optional regex search and `apply_patch` support) that agents can call without touching the host machine.
|
|
275
276
|
|
|
276
277
|
Experimental embeddings (`embed.embed_parallel_async`) and document reranking (`rerank.rerank_parallel_async`) clients are also provided.
|
|
@@ -1,4 +1,5 @@
|
|
|
1
1
|
import asyncio
|
|
2
|
+
from dataclasses import dataclass
|
|
2
3
|
from typing import (
|
|
3
4
|
Any,
|
|
4
5
|
AsyncGenerator,
|
|
@@ -37,6 +38,14 @@ from .request_context import RequestContext
|
|
|
37
38
|
from .tracker import StatusTracker
|
|
38
39
|
|
|
39
40
|
|
|
41
|
+
@dataclass
|
|
42
|
+
class AgentLoopResponse:
|
|
43
|
+
"""Wrapper for agent loop results to distinguish from single request results."""
|
|
44
|
+
|
|
45
|
+
conversation: Conversation
|
|
46
|
+
final_response: APIResponse
|
|
47
|
+
|
|
48
|
+
|
|
40
49
|
# TODO: add optional max_input_tokens to client so we can reject long prompts to prevent abuse
|
|
41
50
|
class _LLMClient(BaseModel):
|
|
42
51
|
"""
|
|
@@ -88,7 +97,9 @@ class _LLMClient(BaseModel):
|
|
|
88
97
|
# Internal state for async task handling
|
|
89
98
|
_next_task_id: int = PrivateAttr(default=0)
|
|
90
99
|
_tasks: dict[int, asyncio.Task] = PrivateAttr(default_factory=dict)
|
|
91
|
-
_results: dict[int, APIResponse] = PrivateAttr(
|
|
100
|
+
_results: dict[int, APIResponse | AgentLoopResponse] = PrivateAttr(
|
|
101
|
+
default_factory=dict
|
|
102
|
+
)
|
|
92
103
|
_tracker: StatusTracker | None = PrivateAttr(default=None)
|
|
93
104
|
_capacity_lock: asyncio.Lock = PrivateAttr(default_factory=asyncio.Lock)
|
|
94
105
|
|
|
@@ -747,11 +758,11 @@ class _LLMClient(BaseModel):
|
|
|
747
758
|
async def wait_for(self, task_id: int) -> APIResponse:
|
|
748
759
|
task = self._tasks.get(task_id)
|
|
749
760
|
if task:
|
|
750
|
-
|
|
751
|
-
res = self._results.get(task_id)
|
|
752
|
-
if res:
|
|
753
|
-
return res
|
|
761
|
+
result = await task
|
|
754
762
|
else:
|
|
763
|
+
result = self._results.get(task_id)
|
|
764
|
+
|
|
765
|
+
if result is None:
|
|
755
766
|
return APIResponse(
|
|
756
767
|
id=-1,
|
|
757
768
|
model_internal="",
|
|
@@ -762,6 +773,11 @@ class _LLMClient(BaseModel):
|
|
|
762
773
|
error_message="Task not found",
|
|
763
774
|
)
|
|
764
775
|
|
|
776
|
+
assert isinstance(
|
|
777
|
+
result, APIResponse
|
|
778
|
+
), f"Expected APIResponse, got {type(result)}. Use wait_for_agent_loop for agent loop tasks."
|
|
779
|
+
return result
|
|
780
|
+
|
|
765
781
|
async def wait_for_all(
|
|
766
782
|
self, task_ids: Sequence[int] | None = None
|
|
767
783
|
) -> list[APIResponse]:
|
|
@@ -797,6 +813,9 @@ class _LLMClient(BaseModel):
|
|
|
797
813
|
tid = tasks_map.pop(task)
|
|
798
814
|
task_result = self._results.get(tid, await task)
|
|
799
815
|
assert task_result
|
|
816
|
+
assert isinstance(
|
|
817
|
+
task_result, APIResponse
|
|
818
|
+
), f"Expected APIResponse, got {type(task_result)}. as_completed() only works with single requests, not agent loops."
|
|
800
819
|
yield tid, task_result
|
|
801
820
|
|
|
802
821
|
while tasks_map:
|
|
@@ -807,6 +826,9 @@ class _LLMClient(BaseModel):
|
|
|
807
826
|
tid = tasks_map.pop(task)
|
|
808
827
|
task_result = self._results.get(tid, await task)
|
|
809
828
|
assert task_result
|
|
829
|
+
assert isinstance(
|
|
830
|
+
task_result, APIResponse
|
|
831
|
+
), f"Expected APIResponse, got {type(task_result)}. as_completed() only works with single requests, not agent loops."
|
|
810
832
|
yield tid, task_result
|
|
811
833
|
|
|
812
834
|
async def stream(
|
|
@@ -828,24 +850,15 @@ class _LLMClient(BaseModel):
|
|
|
828
850
|
return self.postprocess(item)
|
|
829
851
|
return item
|
|
830
852
|
|
|
831
|
-
async def
|
|
853
|
+
async def _run_agent_loop_internal(
|
|
832
854
|
self,
|
|
833
|
-
|
|
855
|
+
task_id: int,
|
|
856
|
+
conversation: Conversation,
|
|
834
857
|
*,
|
|
835
858
|
tools: list[Tool | dict | MCPServer] | None = None,
|
|
836
859
|
max_rounds: int = 5,
|
|
837
|
-
|
|
838
|
-
|
|
839
|
-
"""Run a simple agent loop until no more tool calls are returned.
|
|
840
|
-
|
|
841
|
-
The provided ``conversation`` will be mutated and returned alongside the
|
|
842
|
-
final ``APIResponse`` from the model. ``tools`` may include ``Tool``
|
|
843
|
-
instances or built‑in tool dictionaries.
|
|
844
|
-
"""
|
|
845
|
-
|
|
846
|
-
if not isinstance(conversation, Conversation):
|
|
847
|
-
conversation = prompts_to_conversations([conversation])[0]
|
|
848
|
-
assert isinstance(conversation, Conversation)
|
|
860
|
+
) -> AgentLoopResponse:
|
|
861
|
+
"""Internal method to run agent loop and return wrapped result."""
|
|
849
862
|
|
|
850
863
|
# Expand MCPServer objects to their constituent tools for tool execution
|
|
851
864
|
expanded_tools: list[Tool] = []
|
|
@@ -898,7 +911,75 @@ class _LLMClient(BaseModel):
|
|
|
898
911
|
if response is None:
|
|
899
912
|
raise RuntimeError("model did not return a response")
|
|
900
913
|
|
|
901
|
-
|
|
914
|
+
result = AgentLoopResponse(conversation=conversation, final_response=response)
|
|
915
|
+
self._results[task_id] = result
|
|
916
|
+
return result
|
|
917
|
+
|
|
918
|
+
def start_agent_loop_nowait(
|
|
919
|
+
self,
|
|
920
|
+
conversation: Prompt,
|
|
921
|
+
*,
|
|
922
|
+
tools: list[Tool | dict | MCPServer] | None = None,
|
|
923
|
+
max_rounds: int = 5,
|
|
924
|
+
) -> int:
|
|
925
|
+
"""Start an agent loop without waiting for it to complete.
|
|
926
|
+
|
|
927
|
+
Returns a task_id that can be used with wait_for_agent_loop().
|
|
928
|
+
"""
|
|
929
|
+
if not isinstance(conversation, Conversation):
|
|
930
|
+
conversation = prompts_to_conversations([conversation])[0]
|
|
931
|
+
assert isinstance(conversation, Conversation)
|
|
932
|
+
|
|
933
|
+
task_id = self._next_task_id
|
|
934
|
+
self._next_task_id += 1
|
|
935
|
+
|
|
936
|
+
task = asyncio.create_task(
|
|
937
|
+
self._run_agent_loop_internal(
|
|
938
|
+
task_id, conversation, tools=tools, max_rounds=max_rounds
|
|
939
|
+
)
|
|
940
|
+
)
|
|
941
|
+
self._tasks[task_id] = task
|
|
942
|
+
return task_id
|
|
943
|
+
|
|
944
|
+
async def wait_for_agent_loop(
|
|
945
|
+
self, task_id: int
|
|
946
|
+
) -> tuple[Conversation, APIResponse]:
|
|
947
|
+
"""Wait for an agent loop task to complete.
|
|
948
|
+
|
|
949
|
+
Returns the conversation and final response from the agent loop.
|
|
950
|
+
"""
|
|
951
|
+
task = self._tasks.get(task_id)
|
|
952
|
+
if task:
|
|
953
|
+
result = await task
|
|
954
|
+
else:
|
|
955
|
+
result = self._results.get(task_id)
|
|
956
|
+
|
|
957
|
+
if result is None:
|
|
958
|
+
raise RuntimeError(f"Agent loop task {task_id} not found")
|
|
959
|
+
|
|
960
|
+
assert isinstance(
|
|
961
|
+
result, AgentLoopResponse
|
|
962
|
+
), f"Expected AgentLoopResponse, got {type(result)}"
|
|
963
|
+
return result.conversation, result.final_response
|
|
964
|
+
|
|
965
|
+
async def run_agent_loop(
|
|
966
|
+
self,
|
|
967
|
+
conversation: Prompt,
|
|
968
|
+
*,
|
|
969
|
+
tools: list[Tool | dict | MCPServer] | None = None,
|
|
970
|
+
max_rounds: int = 5,
|
|
971
|
+
show_progress: bool = False,
|
|
972
|
+
) -> tuple[Conversation, APIResponse]:
|
|
973
|
+
"""Run a simple agent loop until no more tool calls are returned.
|
|
974
|
+
|
|
975
|
+
The provided ``conversation`` will be mutated and returned alongside the
|
|
976
|
+
final ``APIResponse`` from the model. ``tools`` may include ``Tool``
|
|
977
|
+
instances or built‑in tool dictionaries.
|
|
978
|
+
"""
|
|
979
|
+
task_id = self.start_agent_loop_nowait(
|
|
980
|
+
conversation, tools=tools, max_rounds=max_rounds
|
|
981
|
+
)
|
|
982
|
+
return await self.wait_for_agent_loop(task_id)
|
|
902
983
|
|
|
903
984
|
def run_agent_loop_sync(
|
|
904
985
|
self,
|
|
@@ -1,11 +1,18 @@
|
|
|
1
1
|
from .extract import extract, extract_async
|
|
2
|
-
from .translate import translate, translate_async
|
|
3
2
|
from .score import score_llm
|
|
3
|
+
from .subagents import SubAgentManager
|
|
4
|
+
from .todos import TodoItem, TodoManager, TodoPriority, TodoStatus
|
|
5
|
+
from .translate import translate, translate_async
|
|
4
6
|
|
|
5
7
|
__all__ = [
|
|
6
8
|
"extract",
|
|
7
9
|
"extract_async",
|
|
10
|
+
"TodoItem",
|
|
11
|
+
"TodoManager",
|
|
12
|
+
"TodoPriority",
|
|
13
|
+
"TodoStatus",
|
|
8
14
|
"translate",
|
|
9
15
|
"translate_async",
|
|
10
16
|
"score_llm",
|
|
17
|
+
"SubAgentManager",
|
|
11
18
|
]
|