copex 0.8.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- copex/__init__.py +69 -0
- copex/checkpoint.py +445 -0
- copex/cli.py +1106 -0
- copex/client.py +725 -0
- copex/config.py +311 -0
- copex/mcp.py +561 -0
- copex/metrics.py +383 -0
- copex/models.py +50 -0
- copex/persistence.py +324 -0
- copex/plan.py +358 -0
- copex/ralph.py +247 -0
- copex/tools.py +404 -0
- copex/ui.py +971 -0
- copex-0.8.4.dist-info/METADATA +511 -0
- copex-0.8.4.dist-info/RECORD +18 -0
- copex-0.8.4.dist-info/WHEEL +4 -0
- copex-0.8.4.dist-info/entry_points.txt +2 -0
- copex-0.8.4.dist-info/licenses/LICENSE +21 -0
copex/ralph.py
ADDED
|
@@ -0,0 +1,247 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Ralph Wiggum - Iterative Development Loops for Copex.
|
|
3
|
+
|
|
4
|
+
Implements the Ralph Wiggum technique pioneered by Geoffrey Huntley:
|
|
5
|
+
https://ghuntley.com/ralph/
|
|
6
|
+
|
|
7
|
+
Core concept: The same prompt is fed to the AI repeatedly. The AI sees its
|
|
8
|
+
own previous work in the conversation history, allowing it to iteratively
|
|
9
|
+
improve until the task is complete.
|
|
10
|
+
|
|
11
|
+
"Me fail English? That's unpossible!" - Ralph Wiggum
|
|
12
|
+
"""
|
|
13
|
+
|
|
14
|
+
from __future__ import annotations
|
|
15
|
+
|
|
16
|
+
import asyncio
|
|
17
|
+
import re
|
|
18
|
+
from dataclasses import dataclass, field
|
|
19
|
+
from datetime import datetime
|
|
20
|
+
from typing import Any, Callable
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
@dataclass
|
|
24
|
+
class RalphState:
|
|
25
|
+
"""State of a Ralph loop."""
|
|
26
|
+
|
|
27
|
+
prompt: str
|
|
28
|
+
iteration: int = 0
|
|
29
|
+
max_iterations: int | None = None
|
|
30
|
+
completion_promise: str | None = None
|
|
31
|
+
started_at: datetime = field(default_factory=datetime.now)
|
|
32
|
+
completed: bool = False
|
|
33
|
+
completion_reason: str | None = None
|
|
34
|
+
history: list[str] = field(default_factory=list)
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
@dataclass
|
|
38
|
+
class RalphConfig:
|
|
39
|
+
"""Configuration for Ralph loops."""
|
|
40
|
+
|
|
41
|
+
max_iterations: int | None = None # None = unlimited
|
|
42
|
+
completion_promise: str | None = None # Text that signals completion
|
|
43
|
+
delay_between_iterations: float = 1.0 # Seconds between iterations
|
|
44
|
+
show_iteration_header: bool = True
|
|
45
|
+
continue_on_error: bool = True
|
|
46
|
+
max_consecutive_errors: int = 3
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
class RalphWiggum:
|
|
50
|
+
"""
|
|
51
|
+
Ralph Wiggum - Iterative AI development loops.
|
|
52
|
+
|
|
53
|
+
Usage:
|
|
54
|
+
ralph = RalphWiggum(copex_client)
|
|
55
|
+
result = await ralph.loop(
|
|
56
|
+
prompt="Build a REST API with CRUD operations",
|
|
57
|
+
completion_promise="API COMPLETE",
|
|
58
|
+
max_iterations=30,
|
|
59
|
+
)
|
|
60
|
+
"""
|
|
61
|
+
|
|
62
|
+
def __init__(self, client: Any, config: RalphConfig | None = None):
|
|
63
|
+
"""
|
|
64
|
+
Initialize Ralph.
|
|
65
|
+
|
|
66
|
+
Args:
|
|
67
|
+
client: A Copex client instance
|
|
68
|
+
config: Optional configuration
|
|
69
|
+
"""
|
|
70
|
+
self.client = client
|
|
71
|
+
self.config = config or RalphConfig()
|
|
72
|
+
self._state: RalphState | None = None
|
|
73
|
+
self._cancelled = False
|
|
74
|
+
|
|
75
|
+
@property
|
|
76
|
+
def state(self) -> RalphState | None:
|
|
77
|
+
"""Get current loop state."""
|
|
78
|
+
return self._state
|
|
79
|
+
|
|
80
|
+
@property
|
|
81
|
+
def is_running(self) -> bool:
|
|
82
|
+
"""Check if a loop is currently running."""
|
|
83
|
+
return self._state is not None and not self._state.completed
|
|
84
|
+
|
|
85
|
+
def cancel(self) -> None:
|
|
86
|
+
"""Cancel the current loop."""
|
|
87
|
+
self._cancelled = True
|
|
88
|
+
if self._state:
|
|
89
|
+
self._state.completed = True
|
|
90
|
+
self._state.completion_reason = "cancelled"
|
|
91
|
+
|
|
92
|
+
async def loop(
|
|
93
|
+
self,
|
|
94
|
+
prompt: str,
|
|
95
|
+
*,
|
|
96
|
+
max_iterations: int | None = None,
|
|
97
|
+
completion_promise: str | None = None,
|
|
98
|
+
on_iteration: Callable[[int, str], None] | None = None,
|
|
99
|
+
on_complete: Callable[[RalphState], None] | None = None,
|
|
100
|
+
) -> RalphState:
|
|
101
|
+
"""
|
|
102
|
+
Run a Ralph loop.
|
|
103
|
+
|
|
104
|
+
The same prompt is sent repeatedly. The AI sees the conversation
|
|
105
|
+
history and can iterate on its previous work.
|
|
106
|
+
|
|
107
|
+
Args:
|
|
108
|
+
prompt: The task prompt to repeat
|
|
109
|
+
max_iterations: Maximum iterations (None = unlimited)
|
|
110
|
+
completion_promise: Text that signals completion (e.g., "TASK COMPLETE")
|
|
111
|
+
on_iteration: Callback called after each iteration with (iteration, response)
|
|
112
|
+
on_complete: Callback called when loop completes
|
|
113
|
+
|
|
114
|
+
Returns:
|
|
115
|
+
RalphState with final state and history
|
|
116
|
+
"""
|
|
117
|
+
self._cancelled = False
|
|
118
|
+
self._state = RalphState(
|
|
119
|
+
prompt=prompt,
|
|
120
|
+
max_iterations=max_iterations or self.config.max_iterations,
|
|
121
|
+
completion_promise=completion_promise or self.config.completion_promise,
|
|
122
|
+
)
|
|
123
|
+
|
|
124
|
+
consecutive_errors = 0
|
|
125
|
+
|
|
126
|
+
try:
|
|
127
|
+
while not self._cancelled:
|
|
128
|
+
self._state.iteration += 1
|
|
129
|
+
ralph_instructions = self._build_ralph_instructions()
|
|
130
|
+
|
|
131
|
+
# Check max iterations
|
|
132
|
+
if self._state.max_iterations and self._state.iteration > self._state.max_iterations:
|
|
133
|
+
self._state.completed = True
|
|
134
|
+
self._state.completion_reason = f"max_iterations ({self._state.max_iterations})"
|
|
135
|
+
break
|
|
136
|
+
|
|
137
|
+
# Build iteration prompt
|
|
138
|
+
if self.config.show_iteration_header:
|
|
139
|
+
iteration_prompt = f"{ralph_instructions}\n\n---\n\n{prompt}"
|
|
140
|
+
else:
|
|
141
|
+
iteration_prompt = prompt
|
|
142
|
+
|
|
143
|
+
try:
|
|
144
|
+
# Send to Copex
|
|
145
|
+
response = await self.client.send(iteration_prompt)
|
|
146
|
+
content = response.content
|
|
147
|
+
self._state.history.append(content)
|
|
148
|
+
consecutive_errors = 0
|
|
149
|
+
|
|
150
|
+
# Check for completion promise
|
|
151
|
+
if self._state.completion_promise:
|
|
152
|
+
if self._check_promise(content, self._state.completion_promise):
|
|
153
|
+
self._state.completed = True
|
|
154
|
+
self._state.completion_reason = f"promise: {self._state.completion_promise}"
|
|
155
|
+
break
|
|
156
|
+
|
|
157
|
+
# Callback
|
|
158
|
+
if on_iteration:
|
|
159
|
+
on_iteration(self._state.iteration, content)
|
|
160
|
+
|
|
161
|
+
except Exception:
|
|
162
|
+
consecutive_errors += 1
|
|
163
|
+
if consecutive_errors >= self.config.max_consecutive_errors:
|
|
164
|
+
self._state.completed = True
|
|
165
|
+
self._state.completion_reason = f"errors: {consecutive_errors} consecutive failures"
|
|
166
|
+
break
|
|
167
|
+
|
|
168
|
+
if not self.config.continue_on_error:
|
|
169
|
+
raise
|
|
170
|
+
|
|
171
|
+
# Delay between iterations
|
|
172
|
+
if self.config.delay_between_iterations > 0:
|
|
173
|
+
await asyncio.sleep(self.config.delay_between_iterations)
|
|
174
|
+
|
|
175
|
+
finally:
|
|
176
|
+
if not self._state.completed:
|
|
177
|
+
self._state.completed = True
|
|
178
|
+
self._state.completion_reason = "unknown"
|
|
179
|
+
|
|
180
|
+
if on_complete:
|
|
181
|
+
on_complete(self._state)
|
|
182
|
+
|
|
183
|
+
return self._state
|
|
184
|
+
|
|
185
|
+
def _build_ralph_instructions(self) -> str:
|
|
186
|
+
"""Build system instructions for the current iteration."""
|
|
187
|
+
parts = [f"[Ralph Wiggum Loop - Iteration {self._state.iteration}]"]
|
|
188
|
+
|
|
189
|
+
if self._state.max_iterations:
|
|
190
|
+
parts.append(f"Max iterations: {self._state.max_iterations}")
|
|
191
|
+
|
|
192
|
+
if self._state.completion_promise:
|
|
193
|
+
parts.append(
|
|
194
|
+
f"\nTo complete this loop, output: <promise>{self._state.completion_promise}</promise>\n"
|
|
195
|
+
f"ONLY output this when the statement is genuinely TRUE."
|
|
196
|
+
)
|
|
197
|
+
else:
|
|
198
|
+
parts.append("\nNo completion promise set - loop runs until max iterations or cancelled.")
|
|
199
|
+
|
|
200
|
+
parts.append("\nYou can see your previous work in the conversation. Continue improving.")
|
|
201
|
+
|
|
202
|
+
return "\n".join(parts)
|
|
203
|
+
|
|
204
|
+
def _check_promise(self, content: str, promise: str) -> bool:
|
|
205
|
+
"""Check if content contains the completion promise."""
|
|
206
|
+
# Look for <promise>TEXT</promise> pattern
|
|
207
|
+
pattern = r"<promise>(.*?)</promise>"
|
|
208
|
+
matches = re.findall(pattern, content, re.DOTALL | re.IGNORECASE)
|
|
209
|
+
|
|
210
|
+
for match in matches:
|
|
211
|
+
# Normalize whitespace
|
|
212
|
+
normalized = " ".join(match.strip().split())
|
|
213
|
+
if normalized.lower() == promise.lower():
|
|
214
|
+
return True
|
|
215
|
+
|
|
216
|
+
return False
|
|
217
|
+
|
|
218
|
+
|
|
219
|
+
async def ralph_loop(
|
|
220
|
+
client: Any,
|
|
221
|
+
prompt: str,
|
|
222
|
+
*,
|
|
223
|
+
max_iterations: int | None = None,
|
|
224
|
+
completion_promise: str | None = None,
|
|
225
|
+
on_iteration: Callable[[int, str], None] | None = None,
|
|
226
|
+
) -> RalphState:
|
|
227
|
+
"""
|
|
228
|
+
Convenience function to run a Ralph loop.
|
|
229
|
+
|
|
230
|
+
Example:
|
|
231
|
+
async with Copex() as client:
|
|
232
|
+
result = await ralph_loop(
|
|
233
|
+
client,
|
|
234
|
+
"Build a calculator app with tests",
|
|
235
|
+
completion_promise="ALL TESTS PASSING",
|
|
236
|
+
max_iterations=20,
|
|
237
|
+
)
|
|
238
|
+
print(f"Completed in {result.iteration} iterations")
|
|
239
|
+
"""
|
|
240
|
+
ralph = RalphWiggum(client)
|
|
241
|
+
return await ralph.loop(
|
|
242
|
+
prompt,
|
|
243
|
+
max_iterations=max_iterations,
|
|
244
|
+
completion_promise=completion_promise,
|
|
245
|
+
on_iteration=on_iteration,
|
|
246
|
+
)
|
|
247
|
+
|
copex/tools.py
ADDED
|
@@ -0,0 +1,404 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Parallel Tools - Execute multiple tool calls concurrently.
|
|
3
|
+
|
|
4
|
+
Enables:
|
|
5
|
+
- Concurrent execution of independent tools
|
|
6
|
+
- Batching of tool results
|
|
7
|
+
- Timeout handling for slow tools
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
from __future__ import annotations
|
|
11
|
+
|
|
12
|
+
import asyncio
|
|
13
|
+
from dataclasses import dataclass
|
|
14
|
+
from typing import Any, Awaitable, Callable
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
@dataclass
|
|
18
|
+
class ToolResult:
|
|
19
|
+
"""Result from a tool execution."""
|
|
20
|
+
|
|
21
|
+
name: str
|
|
22
|
+
success: bool
|
|
23
|
+
result: Any = None
|
|
24
|
+
error: str | None = None
|
|
25
|
+
duration_ms: float = 0
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
@dataclass
|
|
29
|
+
class ParallelToolConfig:
|
|
30
|
+
"""Configuration for parallel tool execution."""
|
|
31
|
+
|
|
32
|
+
max_concurrent: int = 5 # Maximum concurrent tool calls
|
|
33
|
+
timeout: float = 30.0 # Timeout per tool in seconds
|
|
34
|
+
fail_fast: bool = False # Stop on first error
|
|
35
|
+
retry_on_error: bool = True # Retry failed tools
|
|
36
|
+
max_retries: int = 2 # Max retries per tool
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
class ToolRegistry:
|
|
40
|
+
"""
|
|
41
|
+
Registry for tools that can be executed in parallel.
|
|
42
|
+
|
|
43
|
+
Usage:
|
|
44
|
+
registry = ToolRegistry()
|
|
45
|
+
|
|
46
|
+
@registry.register("get_weather")
|
|
47
|
+
async def get_weather(city: str) -> str:
|
|
48
|
+
return f"Weather in {city}: Sunny"
|
|
49
|
+
|
|
50
|
+
@registry.register("get_time")
|
|
51
|
+
async def get_time(timezone: str) -> str:
|
|
52
|
+
return f"Time in {timezone}: 12:00"
|
|
53
|
+
|
|
54
|
+
# Execute multiple tools in parallel
|
|
55
|
+
results = await registry.execute_parallel([
|
|
56
|
+
("get_weather", {"city": "Seattle"}),
|
|
57
|
+
("get_time", {"timezone": "PST"}),
|
|
58
|
+
])
|
|
59
|
+
"""
|
|
60
|
+
|
|
61
|
+
def __init__(self, config: ParallelToolConfig | None = None):
|
|
62
|
+
self.config = config or ParallelToolConfig()
|
|
63
|
+
self._tools: dict[str, Callable[..., Awaitable[Any]]] = {}
|
|
64
|
+
self._descriptions: dict[str, str] = {}
|
|
65
|
+
|
|
66
|
+
def register(
|
|
67
|
+
self,
|
|
68
|
+
name: str,
|
|
69
|
+
description: str = "",
|
|
70
|
+
) -> Callable[[Callable], Callable]:
|
|
71
|
+
"""
|
|
72
|
+
Decorator to register a tool.
|
|
73
|
+
|
|
74
|
+
Args:
|
|
75
|
+
name: Tool name
|
|
76
|
+
description: Tool description
|
|
77
|
+
|
|
78
|
+
Example:
|
|
79
|
+
@registry.register("search", "Search the web")
|
|
80
|
+
async def search(query: str) -> str:
|
|
81
|
+
...
|
|
82
|
+
"""
|
|
83
|
+
def decorator(func: Callable[..., Awaitable[Any]]) -> Callable:
|
|
84
|
+
self._tools[name] = func
|
|
85
|
+
self._descriptions[name] = description or func.__doc__ or ""
|
|
86
|
+
return func
|
|
87
|
+
|
|
88
|
+
return decorator
|
|
89
|
+
|
|
90
|
+
def add_tool(
|
|
91
|
+
self,
|
|
92
|
+
name: str,
|
|
93
|
+
func: Callable[..., Awaitable[Any]],
|
|
94
|
+
description: str = "",
|
|
95
|
+
) -> None:
|
|
96
|
+
"""Add a tool directly (not as decorator)."""
|
|
97
|
+
self._tools[name] = func
|
|
98
|
+
self._descriptions[name] = description or func.__doc__ or ""
|
|
99
|
+
|
|
100
|
+
def get_tool(self, name: str) -> Callable[..., Awaitable[Any]] | None:
|
|
101
|
+
"""Get a tool by name."""
|
|
102
|
+
return self._tools.get(name)
|
|
103
|
+
|
|
104
|
+
def list_tools(self) -> list[dict[str, str]]:
|
|
105
|
+
"""List all registered tools."""
|
|
106
|
+
return [
|
|
107
|
+
{"name": name, "description": self._descriptions.get(name, "")}
|
|
108
|
+
for name in self._tools
|
|
109
|
+
]
|
|
110
|
+
|
|
111
|
+
async def execute(
|
|
112
|
+
self,
|
|
113
|
+
name: str,
|
|
114
|
+
params: dict[str, Any],
|
|
115
|
+
timeout: float | None = None,
|
|
116
|
+
) -> ToolResult:
|
|
117
|
+
"""
|
|
118
|
+
Execute a single tool.
|
|
119
|
+
|
|
120
|
+
Args:
|
|
121
|
+
name: Tool name
|
|
122
|
+
params: Tool parameters
|
|
123
|
+
timeout: Optional timeout override
|
|
124
|
+
|
|
125
|
+
Returns:
|
|
126
|
+
ToolResult with success/failure info
|
|
127
|
+
"""
|
|
128
|
+
import time
|
|
129
|
+
|
|
130
|
+
tool = self._tools.get(name)
|
|
131
|
+
if not tool:
|
|
132
|
+
return ToolResult(
|
|
133
|
+
name=name,
|
|
134
|
+
success=False,
|
|
135
|
+
error=f"Tool not found: {name}",
|
|
136
|
+
)
|
|
137
|
+
|
|
138
|
+
start = time.time()
|
|
139
|
+
timeout = timeout or self.config.timeout
|
|
140
|
+
|
|
141
|
+
try:
|
|
142
|
+
result = await asyncio.wait_for(
|
|
143
|
+
tool(**params),
|
|
144
|
+
timeout=timeout,
|
|
145
|
+
)
|
|
146
|
+
duration = (time.time() - start) * 1000
|
|
147
|
+
|
|
148
|
+
return ToolResult(
|
|
149
|
+
name=name,
|
|
150
|
+
success=True,
|
|
151
|
+
result=result,
|
|
152
|
+
duration_ms=duration,
|
|
153
|
+
)
|
|
154
|
+
|
|
155
|
+
except asyncio.TimeoutError:
|
|
156
|
+
duration = (time.time() - start) * 1000
|
|
157
|
+
return ToolResult(
|
|
158
|
+
name=name,
|
|
159
|
+
success=False,
|
|
160
|
+
error=f"Timeout after {timeout}s",
|
|
161
|
+
duration_ms=duration,
|
|
162
|
+
)
|
|
163
|
+
|
|
164
|
+
except Exception as e:
|
|
165
|
+
duration = (time.time() - start) * 1000
|
|
166
|
+
return ToolResult(
|
|
167
|
+
name=name,
|
|
168
|
+
success=False,
|
|
169
|
+
error=str(e),
|
|
170
|
+
duration_ms=duration,
|
|
171
|
+
)
|
|
172
|
+
|
|
173
|
+
async def execute_parallel(
|
|
174
|
+
self,
|
|
175
|
+
calls: list[tuple[str, dict[str, Any]]],
|
|
176
|
+
max_concurrent: int | None = None,
|
|
177
|
+
) -> list[ToolResult]:
|
|
178
|
+
"""
|
|
179
|
+
Execute multiple tools in parallel.
|
|
180
|
+
|
|
181
|
+
Args:
|
|
182
|
+
calls: List of (tool_name, params) tuples
|
|
183
|
+
max_concurrent: Override max concurrent limit
|
|
184
|
+
|
|
185
|
+
Returns:
|
|
186
|
+
List of ToolResult in same order as calls
|
|
187
|
+
"""
|
|
188
|
+
max_concurrent = max_concurrent or self.config.max_concurrent
|
|
189
|
+
semaphore = asyncio.Semaphore(max_concurrent)
|
|
190
|
+
|
|
191
|
+
async def limited_execute(name: str, params: dict) -> ToolResult:
|
|
192
|
+
async with semaphore:
|
|
193
|
+
if self.config.retry_on_error:
|
|
194
|
+
return await self.execute_with_retry(name, params)
|
|
195
|
+
return await self.execute(name, params)
|
|
196
|
+
|
|
197
|
+
tasks: list[asyncio.Task] = []
|
|
198
|
+
task_map: dict[asyncio.Task, int] = {}
|
|
199
|
+
for idx, (name, params) in enumerate(calls):
|
|
200
|
+
task = asyncio.create_task(limited_execute(name, params))
|
|
201
|
+
tasks.append(task)
|
|
202
|
+
task_map[task] = idx
|
|
203
|
+
|
|
204
|
+
results: list[ToolResult | None] = [None] * len(calls)
|
|
205
|
+
pending = set(tasks)
|
|
206
|
+
|
|
207
|
+
try:
|
|
208
|
+
while pending:
|
|
209
|
+
done, pending = await asyncio.wait(
|
|
210
|
+
pending,
|
|
211
|
+
return_when=asyncio.FIRST_COMPLETED,
|
|
212
|
+
)
|
|
213
|
+
for task in done:
|
|
214
|
+
idx = task_map[task]
|
|
215
|
+
try:
|
|
216
|
+
result = await task
|
|
217
|
+
except Exception as exc:
|
|
218
|
+
result = ToolResult(
|
|
219
|
+
name=calls[idx][0],
|
|
220
|
+
success=False,
|
|
221
|
+
error=str(exc),
|
|
222
|
+
)
|
|
223
|
+
|
|
224
|
+
results[idx] = result
|
|
225
|
+
|
|
226
|
+
if self.config.fail_fast and not result.success:
|
|
227
|
+
for pending_task in pending:
|
|
228
|
+
pending_task.cancel()
|
|
229
|
+
pending.clear()
|
|
230
|
+
break
|
|
231
|
+
finally:
|
|
232
|
+
if pending:
|
|
233
|
+
await asyncio.gather(*pending, return_exceptions=True)
|
|
234
|
+
|
|
235
|
+
if self.config.fail_fast and any(r is None for r in results):
|
|
236
|
+
for idx, result in enumerate(results):
|
|
237
|
+
if result is None:
|
|
238
|
+
results[idx] = ToolResult(
|
|
239
|
+
name=calls[idx][0],
|
|
240
|
+
success=False,
|
|
241
|
+
error="Cancelled due to fail_fast",
|
|
242
|
+
)
|
|
243
|
+
|
|
244
|
+
return [result for result in results if result is not None]
|
|
245
|
+
|
|
246
|
+
async def execute_with_retry(
|
|
247
|
+
self,
|
|
248
|
+
name: str,
|
|
249
|
+
params: dict[str, Any],
|
|
250
|
+
max_retries: int | None = None,
|
|
251
|
+
) -> ToolResult:
|
|
252
|
+
"""
|
|
253
|
+
Execute a tool with retries on failure.
|
|
254
|
+
|
|
255
|
+
Args:
|
|
256
|
+
name: Tool name
|
|
257
|
+
params: Tool parameters
|
|
258
|
+
max_retries: Override max retries
|
|
259
|
+
|
|
260
|
+
Returns:
|
|
261
|
+
ToolResult from last attempt
|
|
262
|
+
"""
|
|
263
|
+
max_retries = max_retries or self.config.max_retries
|
|
264
|
+
|
|
265
|
+
for attempt in range(max_retries + 1):
|
|
266
|
+
result = await self.execute(name, params)
|
|
267
|
+
|
|
268
|
+
if result.success:
|
|
269
|
+
return result
|
|
270
|
+
|
|
271
|
+
if attempt < max_retries:
|
|
272
|
+
# Exponential backoff
|
|
273
|
+
await asyncio.sleep(2 ** attempt * 0.5)
|
|
274
|
+
|
|
275
|
+
return result
|
|
276
|
+
|
|
277
|
+
|
|
278
|
+
class ParallelToolExecutor:
|
|
279
|
+
"""
|
|
280
|
+
High-level executor for parallel tool calls from Copex responses.
|
|
281
|
+
|
|
282
|
+
Integrates with Copex to automatically handle tool calls in parallel.
|
|
283
|
+
|
|
284
|
+
Usage:
|
|
285
|
+
executor = ParallelToolExecutor()
|
|
286
|
+
|
|
287
|
+
@executor.tool("fetch_data")
|
|
288
|
+
async def fetch_data(url: str) -> str:
|
|
289
|
+
async with aiohttp.ClientSession() as session:
|
|
290
|
+
async with session.get(url) as response:
|
|
291
|
+
return await response.text()
|
|
292
|
+
|
|
293
|
+
# In Copex callback
|
|
294
|
+
async def handle_tools(tool_calls: list[dict]) -> list[dict]:
|
|
295
|
+
return await executor.handle_batch(tool_calls)
|
|
296
|
+
"""
|
|
297
|
+
|
|
298
|
+
def __init__(self, config: ParallelToolConfig | None = None):
|
|
299
|
+
self.registry = ToolRegistry(config)
|
|
300
|
+
self.config = config or ParallelToolConfig()
|
|
301
|
+
|
|
302
|
+
def tool(
|
|
303
|
+
self,
|
|
304
|
+
name: str,
|
|
305
|
+
description: str = "",
|
|
306
|
+
) -> Callable[[Callable], Callable]:
|
|
307
|
+
"""Decorator to register a tool."""
|
|
308
|
+
return self.registry.register(name, description)
|
|
309
|
+
|
|
310
|
+
async def handle_batch(
|
|
311
|
+
self,
|
|
312
|
+
tool_calls: list[dict[str, Any]],
|
|
313
|
+
) -> list[dict[str, Any]]:
|
|
314
|
+
"""
|
|
315
|
+
Handle a batch of tool calls from Copex.
|
|
316
|
+
|
|
317
|
+
Args:
|
|
318
|
+
tool_calls: List of tool call dicts with "name" and "arguments"
|
|
319
|
+
|
|
320
|
+
Returns:
|
|
321
|
+
List of result dicts for Copex
|
|
322
|
+
"""
|
|
323
|
+
calls = [
|
|
324
|
+
(call["name"], call.get("arguments", {}))
|
|
325
|
+
for call in tool_calls
|
|
326
|
+
]
|
|
327
|
+
|
|
328
|
+
results = await self.registry.execute_parallel(calls)
|
|
329
|
+
|
|
330
|
+
return [
|
|
331
|
+
{
|
|
332
|
+
"tool_call_id": tool_calls[i].get("id", f"call_{i}"),
|
|
333
|
+
"name": result.name,
|
|
334
|
+
"result": result.result if result.success else None,
|
|
335
|
+
"error": result.error,
|
|
336
|
+
"success": result.success,
|
|
337
|
+
}
|
|
338
|
+
for i, result in enumerate(results)
|
|
339
|
+
]
|
|
340
|
+
|
|
341
|
+
def get_tool_definitions(self) -> list[dict[str, Any]]:
|
|
342
|
+
"""
|
|
343
|
+
Get tool definitions for Copex session.
|
|
344
|
+
|
|
345
|
+
Returns:
|
|
346
|
+
List of tool definitions for create_session
|
|
347
|
+
"""
|
|
348
|
+
definitions = []
|
|
349
|
+
for tool_info in self.registry.list_tools():
|
|
350
|
+
# Get the actual function to introspect
|
|
351
|
+
func = self.registry.get_tool(tool_info["name"])
|
|
352
|
+
if not func:
|
|
353
|
+
continue
|
|
354
|
+
|
|
355
|
+
# Try to get type hints for parameters
|
|
356
|
+
import inspect
|
|
357
|
+
sig = inspect.signature(func)
|
|
358
|
+
properties = {}
|
|
359
|
+
required = []
|
|
360
|
+
|
|
361
|
+
for param_name, param in sig.parameters.items():
|
|
362
|
+
if param_name in ("self", "cls"):
|
|
363
|
+
continue
|
|
364
|
+
|
|
365
|
+
param_type = "string"
|
|
366
|
+
if param.annotation != inspect.Parameter.empty:
|
|
367
|
+
if param.annotation is int:
|
|
368
|
+
param_type = "integer"
|
|
369
|
+
elif param.annotation is float:
|
|
370
|
+
param_type = "number"
|
|
371
|
+
elif param.annotation is bool:
|
|
372
|
+
param_type = "boolean"
|
|
373
|
+
|
|
374
|
+
properties[param_name] = {"type": param_type}
|
|
375
|
+
|
|
376
|
+
if param.default == inspect.Parameter.empty:
|
|
377
|
+
required.append(param_name)
|
|
378
|
+
|
|
379
|
+
definitions.append({
|
|
380
|
+
"name": tool_info["name"],
|
|
381
|
+
"description": tool_info["description"],
|
|
382
|
+
"parameters": {
|
|
383
|
+
"type": "object",
|
|
384
|
+
"properties": properties,
|
|
385
|
+
"required": required,
|
|
386
|
+
},
|
|
387
|
+
})
|
|
388
|
+
|
|
389
|
+
return definitions
|
|
390
|
+
|
|
391
|
+
|
|
392
|
+
def parallel_tools(*tools: Callable) -> list[Callable]:
|
|
393
|
+
"""
|
|
394
|
+
Convenience wrapper to mark tools for parallel execution.
|
|
395
|
+
|
|
396
|
+
Usage:
|
|
397
|
+
from copex.tools import parallel_tools
|
|
398
|
+
|
|
399
|
+
tools = parallel_tools(get_weather, get_time, fetch_data)
|
|
400
|
+
|
|
401
|
+
async with Copex() as copex:
|
|
402
|
+
response = await copex.send("...", tools=tools)
|
|
403
|
+
"""
|
|
404
|
+
return list(tools)
|