lm-deluge 0.0.80__py3-none-any.whl → 0.0.82__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (36) hide show
  1. lm_deluge/__init__.py +1 -2
  2. lm_deluge/api_requests/anthropic.py +2 -1
  3. lm_deluge/api_requests/base.py +13 -0
  4. lm_deluge/api_requests/gemini.py +1 -1
  5. lm_deluge/api_requests/openai.py +3 -2
  6. lm_deluge/client.py +16 -11
  7. lm_deluge/llm_tools/__init__.py +12 -5
  8. lm_deluge/pipelines/__init__.py +11 -0
  9. lm_deluge/{llm_tools → pipelines}/score.py +2 -2
  10. lm_deluge/{llm_tools → pipelines}/translate.py +5 -3
  11. lm_deluge/prompt.py +105 -0
  12. lm_deluge/request_context.py +2 -2
  13. lm_deluge/{tool.py → tool/__init__.py} +531 -314
  14. lm_deluge/tool/prefab/__init__.py +29 -0
  15. lm_deluge/tool/prefab/batch_tool.py +156 -0
  16. lm_deluge/{llm_tools → tool/prefab}/filesystem.py +1 -1
  17. lm_deluge/tool/prefab/memory.py +190 -0
  18. lm_deluge/tool/prefab/otc/__init__.py +165 -0
  19. lm_deluge/tool/prefab/otc/executor.py +281 -0
  20. lm_deluge/tool/prefab/otc/parse.py +188 -0
  21. lm_deluge/{llm_tools → tool/prefab}/sandbox.py +251 -61
  22. lm_deluge/{llm_tools → tool/prefab}/todos.py +1 -1
  23. lm_deluge/tool/prefab/tool_search.py +169 -0
  24. lm_deluge/tracker.py +16 -13
  25. {lm_deluge-0.0.80.dist-info → lm_deluge-0.0.82.dist-info}/METADATA +2 -3
  26. {lm_deluge-0.0.80.dist-info → lm_deluge-0.0.82.dist-info}/RECORD +34 -28
  27. lm_deluge/presets/cerebras.py +0 -17
  28. lm_deluge/presets/meta.py +0 -13
  29. /lm_deluge/{llm_tools → pipelines}/classify.py +0 -0
  30. /lm_deluge/{llm_tools → pipelines}/extract.py +0 -0
  31. /lm_deluge/{llm_tools → pipelines}/locate.py +0 -0
  32. /lm_deluge/{llm_tools → pipelines}/ocr.py +0 -0
  33. /lm_deluge/{llm_tools → tool/prefab}/subagents.py +0 -0
  34. {lm_deluge-0.0.80.dist-info → lm_deluge-0.0.82.dist-info}/WHEEL +0 -0
  35. {lm_deluge-0.0.80.dist-info → lm_deluge-0.0.82.dist-info}/licenses/LICENSE +0 -0
  36. {lm_deluge-0.0.80.dist-info → lm_deluge-0.0.82.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,29 @@
1
+ from .filesystem import (
2
+ FilesystemManager,
3
+ FilesystemParams,
4
+ InMemoryWorkspaceBackend,
5
+ WorkspaceBackend,
6
+ )
7
+ from .batch_tool import BatchTool
8
+ from .tool_search import ToolSearchTool
9
+ from .otc import ToolComposer
10
+ from .sandbox import DaytonaSandbox, ModalSandbox
11
+ from .subagents import SubAgentManager
12
+ from .todos import TodoItem, TodoManager, TodoPriority, TodoStatus
13
+
14
+ __all__ = [
15
+ "BatchTool",
16
+ "ToolSearchTool",
17
+ "ToolComposer",
18
+ "TodoItem",
19
+ "TodoManager",
20
+ "TodoPriority",
21
+ "TodoStatus",
22
+ "SubAgentManager",
23
+ "FilesystemManager",
24
+ "FilesystemParams",
25
+ "InMemoryWorkspaceBackend",
26
+ "WorkspaceBackend",
27
+ "ModalSandbox",
28
+ "DaytonaSandbox",
29
+ ]
@@ -0,0 +1,156 @@
1
+ """Batch tool for issuing multiple tool calls in a single roundtrip."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import json
6
+ from typing import Any
7
+
8
+ from .. import Tool
9
+
10
+
11
+ class BatchTool:
12
+ """Expose a single tool that runs multiple other tools in one request."""
13
+
14
+ def __init__(
15
+ self,
16
+ tools: list[Tool],
17
+ *,
18
+ batch_tool_name: str = "batch",
19
+ include_tools_in_prompt: bool = True,
20
+ ):
21
+ self.tools = tools
22
+ self.batch_tool_name = batch_tool_name
23
+ self.include_tools_in_prompt = include_tools_in_prompt
24
+ self._tool_index = {tool.name: tool for tool in tools}
25
+
26
+ def _arguments_schema(self, tool: Tool) -> dict[str, Any]:
27
+ """Build JSON Schema for a single tool's arguments."""
28
+ schema: dict[str, Any] = {
29
+ "type": "object",
30
+ "properties": tool.parameters or {},
31
+ "required": tool.required or [],
32
+ }
33
+ if tool.additionalProperties is not None:
34
+ schema["additionalProperties"] = tool.additionalProperties
35
+ return schema
36
+
37
+ def _build_definitions(self) -> dict[str, Any]:
38
+ """Create $defs entries for each wrapped tool."""
39
+ definitions: dict[str, Any] = {}
40
+ for tool in self.tools:
41
+ definition_name = f"{tool.name}_call"
42
+ definitions[definition_name] = {
43
+ "type": "object",
44
+ "description": tool.description or "",
45
+ "properties": {
46
+ "tool": {"type": "string", "enum": [tool.name]},
47
+ "arguments": self._arguments_schema(tool),
48
+ },
49
+ "required": ["tool", "arguments"],
50
+ "additionalProperties": False,
51
+ }
52
+ return definitions
53
+
54
+ def _build_parameters(self) -> tuple[dict[str, Any], dict[str, Any]]:
55
+ """Create parameters and $defs for the batch tool."""
56
+ definitions = self._build_definitions()
57
+ if definitions:
58
+ items_schema: dict[str, Any] = {
59
+ "anyOf": [
60
+ {"$ref": f"#/$defs/{name}"} for name in sorted(definitions.keys())
61
+ ]
62
+ }
63
+ else:
64
+ items_schema = {
65
+ "type": "object",
66
+ "properties": {
67
+ "tool": {"type": "string"},
68
+ "arguments": {"type": "object"},
69
+ },
70
+ "required": ["tool", "arguments"],
71
+ "additionalProperties": False,
72
+ }
73
+
74
+ parameters: dict[str, Any] = {
75
+ "calls": {
76
+ "type": "array",
77
+ "description": "List of tool calls to execute in order. "
78
+ "Each item selects a tool and provides its arguments.",
79
+ "items": items_schema,
80
+ "minItems": 1,
81
+ }
82
+ }
83
+
84
+ return parameters, definitions
85
+
86
+ def _tool_summary(self, tool: Tool) -> str:
87
+ """Render a short signature for the batch tool description."""
88
+ params = []
89
+ for name, schema in (tool.parameters or {}).items():
90
+ json_type = schema.get("type", "any")
91
+ params.append(f"{name}: {json_type}")
92
+ signature = f"{tool.name}({', '.join(params)})" if params else f"{tool.name}()"
93
+ desc = tool.description or "No description provided."
94
+ return f"- {signature}: {desc}"
95
+
96
+ def _build_description(self) -> str:
97
+ header = (
98
+ "Submit several tool calls at once to reduce roundtrips. "
99
+ "Provide `calls` as an array of objects with `tool` and `arguments`. "
100
+ "Calls run sequentially and results are returned in order."
101
+ )
102
+ if not self.include_tools_in_prompt:
103
+ return header
104
+
105
+ summaries = "\n".join(self._tool_summary(tool) for tool in self.tools)
106
+ return f"{header}\n\nAvailable tools:\n{summaries}"
107
+
108
+ async def _run(self, calls: list[dict[str, Any]]) -> str:
109
+ """Execute each requested tool and return ordered results as JSON."""
110
+ results: list[dict[str, Any]] = []
111
+
112
+ for call in calls:
113
+ tool_name = call.get("tool", "")
114
+
115
+ arguments = call.get("arguments") or {}
116
+ tool = self._tool_index.get(tool_name)
117
+ if tool is None:
118
+ results.append(
119
+ {
120
+ "tool": tool_name or "",
121
+ "status": "error",
122
+ "error": f"Unknown tool '{tool_name}'",
123
+ }
124
+ )
125
+ continue
126
+
127
+ try:
128
+ output = await tool.acall(**arguments)
129
+ results.append({"tool": tool.name, "status": "ok", "result": output})
130
+ except Exception as exc: # pragma: no cover - defensive
131
+ results.append(
132
+ {
133
+ "tool": tool.name,
134
+ "status": "error",
135
+ "error": f"{type(exc).__name__}: {exc}",
136
+ }
137
+ )
138
+
139
+ return json.dumps(results)
140
+
141
+ def get_tool(self) -> Tool:
142
+ """Return the batch tool definition."""
143
+ parameters, definitions = self._build_parameters()
144
+
145
+ return Tool(
146
+ name=self.batch_tool_name,
147
+ description=self._build_description(),
148
+ run=self._run,
149
+ parameters=parameters,
150
+ required=["calls"],
151
+ definitions=definitions or None,
152
+ )
153
+
154
+ def get_tools(self) -> list[Tool]:
155
+ """Convenience helper to match other prefab managers."""
156
+ return [self.get_tool()]
@@ -11,7 +11,7 @@ from typing import Any, Dict, Literal, Optional, Protocol
11
11
 
12
12
  from pydantic import BaseModel, Field
13
13
 
14
- from ..tool import Tool
14
+ from .. import Tool
15
15
 
16
16
  FS_DESCRIPTION = """Interact with an isolated virtual filesystem that belongs to this session.
17
17
 
@@ -0,0 +1,190 @@
1
+ from __future__ import annotations
2
+
3
+ import json
4
+ from collections import Counter
5
+ from typing import Any, Sequence, TypeAlias
6
+
7
+ import yaml
8
+ from pydantic import BaseModel, Field
9
+ from typing_extensions import TypedDict
10
+
11
+ # from rapidfuzz import fuzz, process
12
+ from .. import Tool
13
+
14
+ MEMORY_DESCRIPTION = """Use this tool to search, read, and update a long-term "memory" that can be used across sessions, when previous messages are cleared. Whether and when to use memory depends on the situation—for complex tasks, it can store information about work so far, what needs to be done next, why you're doing what you're doing, etc. For personal conversations, it can be used to save "memories" that can be referenced later."""
15
+
16
+ MEMORY_WRITE = """
17
+
18
+ """
19
+
20
+ MEMORY_READ = """
21
+
22
+ """
23
+
24
+
25
+ class MemoryItem(BaseModel):
26
+ """Structured representation of a single memory."""
27
+
28
+ id: int
29
+ description: str = Field(
30
+ description='Short description ("preview") of the memory (1 sentence)'
31
+ )
32
+ content: str = Field(
33
+ description="Full content of the memory. May use Markdown for formatting."
34
+ )
35
+
36
+
37
+ class MemoryItemDict(TypedDict):
38
+ id: int
39
+ description: str
40
+ content: str
41
+
42
+
43
+ MemoryLike: TypeAlias = MemoryItem | MemoryItemDict
44
+
45
+
46
+ class MemoryManager:
47
+ """Stateful todo scratchpad that exposes read/write tools."""
48
+
49
+ def __init__(
50
+ self,
51
+ memories: Sequence[MemoryLike] | dict[int, MemoryLike] | None = None,
52
+ *,
53
+ write_tool_name: str = "memwrite",
54
+ read_tool_name: str = "memread",
55
+ search_tool_name: str = "memsearch",
56
+ update_tool_name: str = "memupdate",
57
+ delete_tool_name: str = "memdelete",
58
+ ):
59
+ self.write_tool_name = write_tool_name
60
+ self.read_tool_name = read_tool_name
61
+ self.search_tool_name = search_tool_name
62
+ self.update_tool_name = update_tool_name
63
+ self.delete_tool_name = delete_tool_name
64
+ self._memories: dict[int, MemoryItem] = {}
65
+ self._tools: list[Tool] | None = None
66
+
67
+ if memories:
68
+ if isinstance(memories, dict):
69
+ self._memories = {k: self._coerce(v) for k, v in memories.items()}
70
+ else:
71
+ coerced = [self._coerce(mem) for mem in memories]
72
+ self._memories = {x.id: x for x in coerced}
73
+
74
+ @classmethod
75
+ def from_file(
76
+ cls,
77
+ file: str,
78
+ ) -> MemoryManager:
79
+ # file should be a json file
80
+ with open(file) as f:
81
+ memories = json.load(f)
82
+ return cls(memories)
83
+
84
+ def _coerce(self, mem: MemoryLike) -> MemoryItem:
85
+ if isinstance(mem, MemoryItem):
86
+ return mem
87
+ if isinstance(mem, dict):
88
+ return MemoryItem(**mem)
89
+ raise TypeError("Memories must be MemoryItem instances or dicts")
90
+
91
+ def _serialize(self) -> list[dict[str, Any]]:
92
+ return [mem.model_dump() for mem in self._memories.values()]
93
+
94
+ def to_file(self, file: str):
95
+ mems = self._serialize()
96
+ with open(file, "w") as f:
97
+ f.write(json.dumps(mems))
98
+
99
+ @staticmethod
100
+ def _format_memory(mem: MemoryItem, include_content: bool = True) -> str:
101
+ dumped = mem.model_dump()
102
+ if not include_content:
103
+ dumped.pop("content")
104
+ return yaml.safe_dump(dumped)
105
+
106
+ # helpers
107
+ def _search(self, queries: list[str], limit: int = 5) -> list[MemoryItem]:
108
+ hits = Counter()
109
+ for q in queries:
110
+ keywords = q.lower().split()
111
+ for k in keywords:
112
+ for mem_id, mem in self._memories.items():
113
+ if k in mem.description.lower() or k in mem.content.lower():
114
+ hits[mem_id] += 1
115
+
116
+ top_k = hits.most_common(limit)
117
+
118
+ return self._read([hit[0] for hit in top_k if hit[1] > 0])
119
+
120
+ def _read(self, memory_ids: list[int]) -> list[MemoryItem]:
121
+ return [
122
+ mem
123
+ for mem_id in memory_ids
124
+ if mem_id is not None and (mem := self._memories.get(mem_id)) is not None
125
+ ]
126
+
127
+ def _add(self, description: str, content: str):
128
+ new_id = max(self._memories) + 1 if self._memories else 1
129
+ self._memories[new_id] = self._coerce(
130
+ {"id": new_id, "description": description, "content": content}
131
+ )
132
+ return new_id
133
+
134
+ def _update(self, mem_id: int, description: str, content: str):
135
+ self._memories[mem_id].description = description
136
+ self._memories[mem_id].content = content
137
+
138
+ def _delete(self, mem_id: int):
139
+ self._memories.pop(mem_id)
140
+
141
+ def get_tools(self) -> list[Tool]:
142
+ """Return Tool instances bound to this manager's state."""
143
+ if self._tools is not None:
144
+ return self._tools
145
+
146
+ def search_tool(queries: list[str], limit: int = 5) -> str:
147
+ """Search for memories using keyword search. Use as many queries as you want, the top results will be fused into one list. Search results include just id and description."""
148
+ mems = self._search(queries, limit=limit)
149
+ return "\n---\n".join(
150
+ [self._format_memory(mem, include_content=False) for mem in mems]
151
+ )
152
+
153
+ def read_tool(mem_ids: list[int]) -> str:
154
+ """Read the full contents of one or more memories."""
155
+ mems = self._read(mem_ids)
156
+ return "\n---\n".join(
157
+ [self._format_memory(mem, include_content=True) for mem in mems]
158
+ )
159
+
160
+ def add_tool(description: str, content: str):
161
+ """Add a new memory."""
162
+ return self._add(description, content)
163
+
164
+ def update_tool(mem_id: int, description: str, content: str) -> str:
165
+ """Update a memory by ID. Must provide content and description, even if only changing one of them."""
166
+ self._update(mem_id, description, content)
167
+
168
+ return f"Memory {mem_id} updated successfully."
169
+
170
+ def delete_tool(mem_id: int) -> str:
171
+ """Delete a memory by ID."""
172
+ self._delete(mem_id)
173
+ return f"Memory {mem_id} deleted successfully."
174
+
175
+ def _rename(tool: Tool, name: str) -> Tool:
176
+ if tool.name == name:
177
+ return tool
178
+ return tool.model_copy(update={"name": name})
179
+
180
+ self._tools = [
181
+ _rename(Tool.from_function(search_tool), self.search_tool_name),
182
+ _rename(Tool.from_function(read_tool), self.read_tool_name),
183
+ _rename(Tool.from_function(add_tool), self.write_tool_name),
184
+ _rename(Tool.from_function(update_tool), self.update_tool_name),
185
+ _rename(Tool.from_function(delete_tool), self.delete_tool_name),
186
+ ]
187
+ return self._tools
188
+
189
+
190
+ __all__ = ["MemoryManager"]
@@ -0,0 +1,165 @@
1
+ """
2
+ Open Tool Composition (OTC) for lm-deluge.
3
+
4
+ Allows LLMs to write Python code that orchestrates multiple tool calls,
5
+ with only the final result entering the model's context.
6
+ """
7
+
8
+ from lm_deluge.tool import Tool
9
+
10
+ from .executor import OTCExecutor
11
+ from .parse import OTCExecutionError, OTCSecurityError
12
+
13
+ OTC_PROMPT = (
14
+ """The "compose" tool allows you to write code"""
15
+ "that orchestrates multiple tool calls. "
16
+ "The purpose is to compose tool calls to get a "
17
+ "final result, without wasting network roundtrips "
18
+ "or input tokens on intermediate results. Use this to:\n"
19
+ " - Call multiple tools and combine their results\n"
20
+ " - Filter or aggregate data from tool results\n"
21
+ " - Implement conditional logic based on tool outputs\n"
22
+ " - Process large amounts of data without polluting your context\n"
23
+ "The code you write is a restricted subset of Python that runs in a "
24
+ "sandboxed environment, with access to each of your tools as a function. "
25
+ "Only the final output (via print() or a 'result' variable) will be returned to you.\n\n"
26
+ "IMPORTANT:\n"
27
+ " - Tools are called synchronously (no await needed)\n"
28
+ " - Use print() or set a 'result' variable for output\n"
29
+ " - You can use `json` and standard builtins (list, dict, sum, len, etc.) without importing anything\n"
30
+ " - Imports, file I/O, and network access are disabled\n\n"
31
+ "<<AVAILABLE_TOOLS>>\n\n"
32
+ "Example:\n"
33
+ "```python\n"
34
+ "# Get team members and their expenses\n"
35
+ 'team = get_team_members(department="engineering")\n'
36
+ 'expenses = [get_expenses(user_id=m["id"], quarter="Q3") for m in team]\n\n'
37
+ "# Find who exceeded budget\n"
38
+ "over_budget = []\n"
39
+ "for member, exp in zip(team, expenses):\n"
40
+ '\ttotal = sum(e["amount"] for e in exp)\n'
41
+ "\tif total > 10000:\n"
42
+ '\t\tover_budget.append({"name": member["name"], "total": total})\n\n'
43
+ "print(json.dumps(over_budget))\n"
44
+ "```"
45
+ )
46
+
47
+
48
+ class ToolComposer:
49
+ """Manages OTC for a set of tools, exposing a compose tool.
50
+
51
+ Similar to SubAgentManager but for tool composition instead of subagents.
52
+
53
+ Example:
54
+ >>> composer = ToolComposer(tools=[search_tool, fetch_tool, calculator_tool])
55
+ >>> all_tools = composer.get_all_tools() # Original tools + compose tool
56
+ >>> # LLM can now call compose() to orchestrate the other tools
57
+ """
58
+
59
+ def __init__(
60
+ self,
61
+ tools: list[Tool],
62
+ compose_tool_name: str = "compose",
63
+ include_tools_in_prompt: bool = False,
64
+ ):
65
+ """Initialize the ToolComposer.
66
+
67
+ Args:
68
+ tools: Tools available for composition
69
+ compose_tool_name: Name for the composition tool
70
+ include_tools_in_prompt: Whether to include tool signatures in compose description
71
+ """
72
+ self.tools = tools
73
+ self.compose_tool_name = compose_tool_name
74
+ self.include_tools_in_prompt = include_tools_in_prompt
75
+ self.executor = OTCExecutor(tools)
76
+
77
+ def _generate_tool_signatures(self) -> str:
78
+ """Generate Python-style signatures for available tools."""
79
+ signatures = []
80
+ for tool in self.tools:
81
+ params = []
82
+ for name, schema in (tool.parameters or {}).items():
83
+ param_type = schema.get("type", "any")
84
+ if param_type == "string":
85
+ param_type = "str"
86
+ elif param_type == "integer":
87
+ param_type = "int"
88
+ elif param_type == "number":
89
+ param_type = "float"
90
+ elif param_type == "boolean":
91
+ param_type = "bool"
92
+ elif param_type == "array":
93
+ param_type = "list"
94
+ elif param_type == "object":
95
+ param_type = "dict"
96
+
97
+ required = tool.required and name in tool.required
98
+ if required:
99
+ params.append(f"{name}: {param_type}")
100
+ else:
101
+ params.append(f"{name}: {param_type} = None")
102
+
103
+ sig = f"{tool.name}({', '.join(params)})"
104
+ desc = tool.description or "No description"
105
+ # Truncate long descriptions
106
+ if len(desc) > 500:
107
+ desc = desc[:497] + "..."
108
+ signatures.append(f" {sig}\n {desc}")
109
+
110
+ return "\n".join(signatures)
111
+
112
+ def _build_compose_description(self) -> str:
113
+ """Build the description for the compose tool."""
114
+ base_desc = OTC_PROMPT
115
+
116
+ if self.include_tools_in_prompt:
117
+ tool_sigs = self._generate_tool_signatures()
118
+ base_desc = base_desc.replace(
119
+ "<<AVAILABLE_TOOLS>>", f"# Available Tools\n{tool_sigs}"
120
+ )
121
+ else:
122
+ base_desc = base_desc.replace(
123
+ "<<AVAILABLE_TOOLS>>",
124
+ "# Available Tools\nYou can use any tool available to you, but you must use it as a Python function.",
125
+ )
126
+
127
+ return base_desc
128
+
129
+ async def _compose(self, code: str) -> str:
130
+ """Execute composition code."""
131
+ try:
132
+ return await self.executor.execute(code)
133
+ except OTCSecurityError as e:
134
+ return f"Security error: {e}"
135
+ except OTCExecutionError as e:
136
+ return f"Execution error: {e}"
137
+ except Exception as e:
138
+ return f"Unexpected error: {type(e).__name__}: {e}"
139
+
140
+ def get_compose_tool(self) -> Tool:
141
+ """Get the composition tool."""
142
+ return Tool(
143
+ name=self.compose_tool_name,
144
+ description=self._build_compose_description(),
145
+ run=self._compose,
146
+ parameters={
147
+ "code": {
148
+ "type": "string",
149
+ "description": "Python code to execute. Use available tools as functions.",
150
+ }
151
+ },
152
+ required=["code"],
153
+ )
154
+
155
+ def get_all_tools(self) -> list[Tool]:
156
+ """Get all tools including the compose tool.
157
+
158
+ Returns tools in order: [compose_tool, ...original_tools]
159
+ The compose tool is first to encourage the model to consider composition.
160
+ """
161
+ return [self.get_compose_tool()] + self.tools
162
+
163
+ def get_tools_without_compose(self) -> list[Tool]:
164
+ """Get just the original tools without the compose tool."""
165
+ return self.tools