jaf-py 2.5.10__py3-none-any.whl → 2.5.11__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- jaf/__init__.py +154 -57
- jaf/a2a/__init__.py +42 -21
- jaf/a2a/agent.py +79 -126
- jaf/a2a/agent_card.py +87 -78
- jaf/a2a/client.py +30 -66
- jaf/a2a/examples/client_example.py +12 -12
- jaf/a2a/examples/integration_example.py +38 -47
- jaf/a2a/examples/server_example.py +56 -53
- jaf/a2a/memory/__init__.py +0 -4
- jaf/a2a/memory/cleanup.py +28 -21
- jaf/a2a/memory/factory.py +155 -133
- jaf/a2a/memory/providers/composite.py +21 -26
- jaf/a2a/memory/providers/in_memory.py +89 -83
- jaf/a2a/memory/providers/postgres.py +117 -115
- jaf/a2a/memory/providers/redis.py +128 -121
- jaf/a2a/memory/serialization.py +77 -87
- jaf/a2a/memory/tests/run_comprehensive_tests.py +112 -83
- jaf/a2a/memory/tests/test_cleanup.py +211 -94
- jaf/a2a/memory/tests/test_serialization.py +73 -68
- jaf/a2a/memory/tests/test_stress_concurrency.py +186 -133
- jaf/a2a/memory/tests/test_task_lifecycle.py +138 -120
- jaf/a2a/memory/types.py +91 -53
- jaf/a2a/protocol.py +95 -125
- jaf/a2a/server.py +90 -118
- jaf/a2a/standalone_client.py +30 -43
- jaf/a2a/tests/__init__.py +16 -33
- jaf/a2a/tests/run_tests.py +17 -53
- jaf/a2a/tests/test_agent.py +40 -140
- jaf/a2a/tests/test_client.py +54 -117
- jaf/a2a/tests/test_integration.py +28 -82
- jaf/a2a/tests/test_protocol.py +54 -139
- jaf/a2a/tests/test_types.py +50 -136
- jaf/a2a/types.py +58 -34
- jaf/cli.py +21 -41
- jaf/core/__init__.py +7 -1
- jaf/core/agent_tool.py +93 -72
- jaf/core/analytics.py +257 -207
- jaf/core/checkpoint.py +223 -0
- jaf/core/composition.py +249 -235
- jaf/core/engine.py +817 -519
- jaf/core/errors.py +55 -42
- jaf/core/guardrails.py +276 -202
- jaf/core/handoff.py +47 -31
- jaf/core/parallel_agents.py +69 -75
- jaf/core/performance.py +75 -73
- jaf/core/proxy.py +43 -44
- jaf/core/proxy_helpers.py +24 -27
- jaf/core/regeneration.py +220 -129
- jaf/core/state.py +68 -66
- jaf/core/streaming.py +115 -108
- jaf/core/tool_results.py +111 -101
- jaf/core/tools.py +114 -116
- jaf/core/tracing.py +269 -210
- jaf/core/types.py +371 -151
- jaf/core/workflows.py +209 -168
- jaf/exceptions.py +46 -38
- jaf/memory/__init__.py +1 -6
- jaf/memory/approval_storage.py +54 -77
- jaf/memory/factory.py +4 -4
- jaf/memory/providers/in_memory.py +216 -180
- jaf/memory/providers/postgres.py +216 -146
- jaf/memory/providers/redis.py +173 -116
- jaf/memory/types.py +70 -51
- jaf/memory/utils.py +36 -34
- jaf/plugins/__init__.py +12 -12
- jaf/plugins/base.py +105 -96
- jaf/policies/__init__.py +0 -1
- jaf/policies/handoff.py +37 -46
- jaf/policies/validation.py +76 -52
- jaf/providers/__init__.py +6 -3
- jaf/providers/mcp.py +97 -51
- jaf/providers/model.py +360 -279
- jaf/server/__init__.py +1 -1
- jaf/server/main.py +7 -11
- jaf/server/server.py +514 -359
- jaf/server/types.py +208 -52
- jaf/utils/__init__.py +17 -18
- jaf/utils/attachments.py +111 -116
- jaf/utils/document_processor.py +175 -174
- jaf/visualization/__init__.py +1 -1
- jaf/visualization/example.py +111 -110
- jaf/visualization/functional_core.py +46 -71
- jaf/visualization/graphviz.py +154 -189
- jaf/visualization/imperative_shell.py +7 -16
- jaf/visualization/types.py +8 -4
- {jaf_py-2.5.10.dist-info → jaf_py-2.5.11.dist-info}/METADATA +2 -2
- jaf_py-2.5.11.dist-info/RECORD +97 -0
- jaf_py-2.5.10.dist-info/RECORD +0 -96
- {jaf_py-2.5.10.dist-info → jaf_py-2.5.11.dist-info}/WHEEL +0 -0
- {jaf_py-2.5.10.dist-info → jaf_py-2.5.11.dist-info}/entry_points.txt +0 -0
- {jaf_py-2.5.10.dist-info → jaf_py-2.5.11.dist-info}/licenses/LICENSE +0 -0
- {jaf_py-2.5.10.dist-info → jaf_py-2.5.11.dist-info}/top_level.txt +0 -0
jaf/core/composition.py
CHANGED
|
@@ -10,8 +10,17 @@ import asyncio
|
|
|
10
10
|
import json
|
|
11
11
|
from dataclasses import dataclass, field
|
|
12
12
|
from typing import (
|
|
13
|
-
Any,
|
|
14
|
-
|
|
13
|
+
Any,
|
|
14
|
+
Callable,
|
|
15
|
+
Dict,
|
|
16
|
+
List,
|
|
17
|
+
Optional,
|
|
18
|
+
Union,
|
|
19
|
+
TypeVar,
|
|
20
|
+
Generic,
|
|
21
|
+
Awaitable,
|
|
22
|
+
Protocol,
|
|
23
|
+
runtime_checkable,
|
|
15
24
|
)
|
|
16
25
|
from functools import wraps
|
|
17
26
|
from enum import Enum
|
|
@@ -20,35 +29,37 @@ from .types import Tool, ToolSchema, ToolSource, ToolExecuteFunction
|
|
|
20
29
|
from .tool_results import ToolResult, ToolResponse
|
|
21
30
|
|
|
22
31
|
|
|
23
|
-
T = TypeVar(
|
|
24
|
-
Args = TypeVar(
|
|
25
|
-
Ctx = TypeVar(
|
|
32
|
+
T = TypeVar("T")
|
|
33
|
+
Args = TypeVar("Args")
|
|
34
|
+
Ctx = TypeVar("Ctx")
|
|
26
35
|
|
|
27
36
|
|
|
28
37
|
class CompositionStrategy(str, Enum):
|
|
29
38
|
"""Strategies for tool composition."""
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
39
|
+
|
|
40
|
+
PIPELINE = "pipeline"
|
|
41
|
+
PARALLEL = "parallel"
|
|
42
|
+
CONDITIONAL = "conditional"
|
|
43
|
+
RETRY = "retry"
|
|
44
|
+
CACHE = "cache"
|
|
45
|
+
RATE_LIMIT = "rate_limit"
|
|
46
|
+
FALLBACK = "fallback"
|
|
37
47
|
|
|
38
48
|
|
|
39
49
|
@dataclass(frozen=True)
|
|
40
50
|
class CompositionMetadata:
|
|
41
51
|
"""Metadata for composed tools."""
|
|
52
|
+
|
|
42
53
|
strategy: CompositionStrategy
|
|
43
54
|
component_tools: List[str]
|
|
44
55
|
composition_id: str
|
|
45
|
-
created_at: float = field(default_factory=lambda: __import__(
|
|
56
|
+
created_at: float = field(default_factory=lambda: __import__("time").time())
|
|
46
57
|
|
|
47
58
|
|
|
48
59
|
@runtime_checkable
|
|
49
60
|
class ToolTransformer(Protocol):
|
|
50
61
|
"""Protocol for tool transformers."""
|
|
51
|
-
|
|
62
|
+
|
|
52
63
|
def transform(self, tool: Tool[Any, Any]) -> Tool[Any, Any]:
|
|
53
64
|
"""Transform a tool into an enhanced version."""
|
|
54
65
|
...
|
|
@@ -57,22 +68,22 @@ class ToolTransformer(Protocol):
|
|
|
57
68
|
class ToolPipeline:
|
|
58
69
|
"""
|
|
59
70
|
Creates a pipeline of tools that execute in sequence.
|
|
60
|
-
|
|
71
|
+
|
|
61
72
|
Each tool's output becomes the input to the next tool in the pipeline.
|
|
62
73
|
"""
|
|
63
|
-
|
|
74
|
+
|
|
64
75
|
def __init__(self, *tools: Tool[Any, Any], name: str = "pipeline"):
|
|
65
76
|
self.tools = list(tools)
|
|
66
77
|
self.name = name
|
|
67
78
|
self.pipeline_id = f"pipeline_{id(self)}"
|
|
68
|
-
|
|
79
|
+
|
|
69
80
|
def create_tool(self) -> Tool[Any, Any]:
|
|
70
81
|
"""Create a single tool that represents the entire pipeline."""
|
|
71
|
-
|
|
82
|
+
|
|
72
83
|
class PipelineTool:
|
|
73
84
|
def __init__(self, pipeline: ToolPipeline):
|
|
74
85
|
self.pipeline = pipeline
|
|
75
|
-
|
|
86
|
+
|
|
76
87
|
@property
|
|
77
88
|
def schema(self) -> ToolSchema[Any]:
|
|
78
89
|
# Use the first tool's schema as the base
|
|
@@ -80,49 +91,45 @@ class ToolPipeline:
|
|
|
80
91
|
return ToolSchema(
|
|
81
92
|
name=f"{self.pipeline.name}_pipeline",
|
|
82
93
|
description=f"Pipeline of {len(self.pipeline.tools)} tools: {', '.join(t.schema.name for t in self.pipeline.tools)}",
|
|
83
|
-
parameters=first_tool.schema.parameters
|
|
94
|
+
parameters=first_tool.schema.parameters,
|
|
84
95
|
)
|
|
85
|
-
|
|
96
|
+
|
|
86
97
|
async def execute(self, args: Any, context: Any) -> Union[str, ToolResult]:
|
|
87
98
|
"""Execute the pipeline sequentially."""
|
|
88
99
|
current_input = args
|
|
89
100
|
results = []
|
|
90
|
-
|
|
101
|
+
|
|
91
102
|
for i, tool in enumerate(self.pipeline.tools):
|
|
92
103
|
try:
|
|
93
104
|
result = await tool.execute(current_input, context)
|
|
94
|
-
results.append({
|
|
95
|
-
|
|
96
|
-
'step': i + 1,
|
|
97
|
-
'result': result
|
|
98
|
-
})
|
|
99
|
-
|
|
105
|
+
results.append({"tool": tool.schema.name, "step": i + 1, "result": result})
|
|
106
|
+
|
|
100
107
|
# Parse result for next step
|
|
101
108
|
if isinstance(result, str):
|
|
102
109
|
try:
|
|
103
110
|
parsed = json.loads(result)
|
|
104
111
|
current_input = parsed
|
|
105
112
|
except json.JSONDecodeError:
|
|
106
|
-
current_input = {
|
|
113
|
+
current_input = {"input": result}
|
|
107
114
|
else:
|
|
108
|
-
current_input = result.data if hasattr(result,
|
|
109
|
-
|
|
115
|
+
current_input = result.data if hasattr(result, "data") else result
|
|
116
|
+
|
|
110
117
|
except Exception as e:
|
|
111
118
|
return ToolResponse.error(
|
|
112
|
-
code=
|
|
119
|
+
code="pipeline_error",
|
|
113
120
|
message=f"Pipeline failed at step {i + 1} ({tool.schema.name}): {str(e)}",
|
|
114
|
-
details={
|
|
121
|
+
details={"step": i + 1, "tool": tool.schema.name, "results": results},
|
|
115
122
|
)
|
|
116
|
-
|
|
123
|
+
|
|
117
124
|
return ToolResponse.success(
|
|
118
125
|
data=current_input,
|
|
119
126
|
metadata={
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
}
|
|
127
|
+
"pipeline_id": self.pipeline.pipeline_id,
|
|
128
|
+
"steps_executed": len(results),
|
|
129
|
+
"step_results": results,
|
|
130
|
+
},
|
|
124
131
|
)
|
|
125
|
-
|
|
132
|
+
|
|
126
133
|
return PipelineTool(self)
|
|
127
134
|
|
|
128
135
|
|
|
@@ -130,128 +137,128 @@ class ParallelToolExecution:
|
|
|
130
137
|
"""
|
|
131
138
|
Executes multiple tools in parallel and combines their results.
|
|
132
139
|
"""
|
|
133
|
-
|
|
134
|
-
def __init__(
|
|
140
|
+
|
|
141
|
+
def __init__(
|
|
142
|
+
self, *tools: Tool[Any, Any], name: str = "parallel", combine_strategy: str = "merge"
|
|
143
|
+
):
|
|
135
144
|
self.tools = list(tools)
|
|
136
145
|
self.name = name
|
|
137
146
|
self.combine_strategy = combine_strategy
|
|
138
147
|
self.execution_id = f"parallel_{id(self)}"
|
|
139
|
-
|
|
148
|
+
|
|
140
149
|
def create_tool(self) -> Tool[Any, Any]:
|
|
141
150
|
"""Create a single tool that executes all tools in parallel."""
|
|
142
|
-
|
|
151
|
+
|
|
143
152
|
class ParallelTool:
|
|
144
153
|
def __init__(self, executor: ParallelToolExecution):
|
|
145
154
|
self.executor = executor
|
|
146
|
-
|
|
155
|
+
|
|
147
156
|
@property
|
|
148
157
|
def schema(self) -> ToolSchema[Any]:
|
|
149
158
|
tool_names = [t.schema.name for t in self.executor.tools]
|
|
150
159
|
return ToolSchema(
|
|
151
160
|
name=f"{self.executor.name}_parallel",
|
|
152
161
|
description=f"Parallel execution of {len(self.executor.tools)} tools: {', '.join(tool_names)}",
|
|
153
|
-
parameters=self.executor.tools[0].schema.parameters
|
|
162
|
+
parameters=self.executor.tools[0].schema.parameters
|
|
163
|
+
if self.executor.tools
|
|
164
|
+
else None,
|
|
154
165
|
)
|
|
155
|
-
|
|
166
|
+
|
|
156
167
|
async def execute(self, args: Any, context: Any) -> Union[str, ToolResult]:
|
|
157
168
|
"""Execute all tools in parallel."""
|
|
158
169
|
if not self.executor.tools:
|
|
159
|
-
return ToolResponse.error(
|
|
160
|
-
|
|
170
|
+
return ToolResponse.error("no_tools", "No tools to execute")
|
|
171
|
+
|
|
161
172
|
# Execute all tools concurrently
|
|
162
173
|
tasks = [
|
|
163
174
|
self._execute_tool_with_metadata(tool, args, context)
|
|
164
175
|
for tool in self.executor.tools
|
|
165
176
|
]
|
|
166
|
-
|
|
177
|
+
|
|
167
178
|
results = await asyncio.gather(*tasks, return_exceptions=True)
|
|
168
|
-
|
|
179
|
+
|
|
169
180
|
# Process results
|
|
170
181
|
successful_results = []
|
|
171
182
|
failed_results = []
|
|
172
|
-
|
|
183
|
+
|
|
173
184
|
for i, result in enumerate(results):
|
|
174
185
|
tool_name = self.executor.tools[i].schema.name
|
|
175
|
-
|
|
186
|
+
|
|
176
187
|
if isinstance(result, Exception):
|
|
177
|
-
failed_results.append({
|
|
178
|
-
'tool': tool_name,
|
|
179
|
-
'error': str(result),
|
|
180
|
-
'index': i
|
|
181
|
-
})
|
|
188
|
+
failed_results.append({"tool": tool_name, "error": str(result), "index": i})
|
|
182
189
|
else:
|
|
183
|
-
successful_results.append({
|
|
184
|
-
|
|
185
|
-
'result': result,
|
|
186
|
-
'index': i
|
|
187
|
-
})
|
|
188
|
-
|
|
190
|
+
successful_results.append({"tool": tool_name, "result": result, "index": i})
|
|
191
|
+
|
|
189
192
|
# Combine results based on strategy
|
|
190
193
|
combined_data = self._combine_results(successful_results)
|
|
191
|
-
|
|
194
|
+
|
|
192
195
|
return ToolResponse.success(
|
|
193
196
|
data=combined_data,
|
|
194
197
|
metadata={
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
}
|
|
198
|
+
"execution_id": self.executor.execution_id,
|
|
199
|
+
"successful_count": len(successful_results),
|
|
200
|
+
"failed_count": len(failed_results),
|
|
201
|
+
"failed_tools": failed_results,
|
|
202
|
+
"combine_strategy": self.executor.combine_strategy,
|
|
203
|
+
},
|
|
201
204
|
)
|
|
202
|
-
|
|
203
|
-
async def _execute_tool_with_metadata(
|
|
205
|
+
|
|
206
|
+
async def _execute_tool_with_metadata(
|
|
207
|
+
self, tool: Tool[Any, Any], args: Any, context: Any
|
|
208
|
+
) -> Dict[str, Any]:
|
|
204
209
|
"""Execute a single tool and wrap result with metadata."""
|
|
205
|
-
start_time = __import__(
|
|
210
|
+
start_time = __import__("time").time()
|
|
206
211
|
try:
|
|
207
212
|
result = await tool.execute(args, context)
|
|
208
|
-
execution_time = (__import__(
|
|
209
|
-
|
|
213
|
+
execution_time = (__import__("time").time() - start_time) * 1000
|
|
214
|
+
|
|
210
215
|
return {
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
216
|
+
"success": True,
|
|
217
|
+
"data": result,
|
|
218
|
+
"execution_time_ms": execution_time,
|
|
219
|
+
"tool_name": tool.schema.name,
|
|
215
220
|
}
|
|
216
221
|
except Exception as e:
|
|
217
|
-
execution_time = (__import__(
|
|
222
|
+
execution_time = (__import__("time").time() - start_time) * 1000
|
|
218
223
|
return {
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
224
|
+
"success": False,
|
|
225
|
+
"error": str(e),
|
|
226
|
+
"execution_time_ms": execution_time,
|
|
227
|
+
"tool_name": tool.schema.name,
|
|
223
228
|
}
|
|
224
|
-
|
|
229
|
+
|
|
225
230
|
def _combine_results(self, results: List[Dict[str, Any]]) -> Any:
|
|
226
231
|
"""Combine results based on the configured strategy."""
|
|
227
232
|
if not results:
|
|
228
233
|
return None
|
|
229
|
-
|
|
234
|
+
|
|
230
235
|
if self.executor.combine_strategy == "merge":
|
|
231
236
|
# Merge all results into a single dictionary
|
|
232
237
|
combined = {}
|
|
233
238
|
for result in results:
|
|
234
|
-
tool_name = result[
|
|
235
|
-
combined[tool_name] = result[
|
|
239
|
+
tool_name = result["tool"]
|
|
240
|
+
combined[tool_name] = result["result"]
|
|
236
241
|
return combined
|
|
237
|
-
|
|
242
|
+
|
|
238
243
|
elif self.executor.combine_strategy == "array":
|
|
239
244
|
# Return results as an array
|
|
240
|
-
return [result[
|
|
241
|
-
|
|
245
|
+
return [result["result"] for result in results]
|
|
246
|
+
|
|
242
247
|
elif self.executor.combine_strategy == "first":
|
|
243
248
|
# Return the first successful result
|
|
244
|
-
return results[0][
|
|
245
|
-
|
|
249
|
+
return results[0]["result"] if results else None
|
|
250
|
+
|
|
246
251
|
elif self.executor.combine_strategy == "best":
|
|
247
252
|
# Return the result with the best execution time
|
|
248
|
-
best_result = min(
|
|
249
|
-
|
|
250
|
-
|
|
253
|
+
best_result = min(
|
|
254
|
+
results, key=lambda r: r.get("execution_time_ms", float("inf"))
|
|
255
|
+
)
|
|
256
|
+
return best_result["result"]
|
|
257
|
+
|
|
251
258
|
else:
|
|
252
259
|
# Default to merge strategy
|
|
253
260
|
return self._combine_results(results)
|
|
254
|
-
|
|
261
|
+
|
|
255
262
|
return ParallelTool(self)
|
|
256
263
|
|
|
257
264
|
|
|
@@ -259,66 +266,72 @@ class ConditionalTool:
|
|
|
259
266
|
"""
|
|
260
267
|
Creates a tool that conditionally executes different tools based on input.
|
|
261
268
|
"""
|
|
262
|
-
|
|
269
|
+
|
|
263
270
|
def __init__(
|
|
264
271
|
self,
|
|
265
272
|
condition: Callable[[Any], bool],
|
|
266
273
|
true_tool: Tool[Any, Any],
|
|
267
274
|
false_tool: Tool[Any, Any],
|
|
268
|
-
name: str = "conditional"
|
|
275
|
+
name: str = "conditional",
|
|
269
276
|
):
|
|
270
277
|
self.condition = condition
|
|
271
278
|
self.true_tool = true_tool
|
|
272
279
|
self.false_tool = false_tool
|
|
273
280
|
self.name = name
|
|
274
|
-
|
|
281
|
+
|
|
275
282
|
def create_tool(self) -> Tool[Any, Any]:
|
|
276
283
|
"""Create a conditional tool."""
|
|
277
|
-
|
|
284
|
+
|
|
278
285
|
class ConditionalToolImpl:
|
|
279
286
|
def __init__(self, conditional: ConditionalTool):
|
|
280
287
|
self.conditional = conditional
|
|
281
|
-
|
|
288
|
+
|
|
282
289
|
@property
|
|
283
290
|
def schema(self) -> ToolSchema[Any]:
|
|
284
291
|
return ToolSchema(
|
|
285
292
|
name=f"{self.conditional.name}_conditional",
|
|
286
293
|
description=f"Conditional execution: {self.conditional.true_tool.schema.name} or {self.conditional.false_tool.schema.name}",
|
|
287
|
-
parameters=self.conditional.true_tool.schema.parameters
|
|
294
|
+
parameters=self.conditional.true_tool.schema.parameters,
|
|
288
295
|
)
|
|
289
|
-
|
|
296
|
+
|
|
290
297
|
async def execute(self, args: Any, context: Any) -> Union[str, ToolResult]:
|
|
291
298
|
"""Execute the appropriate tool based on condition."""
|
|
292
299
|
try:
|
|
293
300
|
condition_result = self.conditional.condition(args)
|
|
294
|
-
selected_tool =
|
|
295
|
-
|
|
301
|
+
selected_tool = (
|
|
302
|
+
self.conditional.true_tool
|
|
303
|
+
if condition_result
|
|
304
|
+
else self.conditional.false_tool
|
|
305
|
+
)
|
|
306
|
+
|
|
296
307
|
result = await selected_tool.execute(args, context)
|
|
297
|
-
|
|
308
|
+
|
|
298
309
|
# Wrap result with conditional metadata
|
|
299
310
|
if isinstance(result, str):
|
|
300
|
-
return json.dumps(
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
|
|
311
|
+
return json.dumps(
|
|
312
|
+
{
|
|
313
|
+
"result": result,
|
|
314
|
+
"condition_met": condition_result,
|
|
315
|
+
"selected_tool": selected_tool.schema.name,
|
|
316
|
+
}
|
|
317
|
+
)
|
|
305
318
|
else:
|
|
306
319
|
return ToolResponse.success(
|
|
307
|
-
data=result.data if hasattr(result,
|
|
320
|
+
data=result.data if hasattr(result, "data") else result,
|
|
308
321
|
metadata={
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
}
|
|
322
|
+
"condition_met": condition_result,
|
|
323
|
+
"selected_tool": selected_tool.schema.name,
|
|
324
|
+
"conditional_name": self.conditional.name,
|
|
325
|
+
},
|
|
313
326
|
)
|
|
314
|
-
|
|
327
|
+
|
|
315
328
|
except Exception as e:
|
|
316
329
|
return ToolResponse.error(
|
|
317
|
-
code=
|
|
330
|
+
code="conditional_error",
|
|
318
331
|
message=f"Conditional tool execution failed: {str(e)}",
|
|
319
|
-
details={
|
|
332
|
+
details={"conditional_name": self.conditional.name},
|
|
320
333
|
)
|
|
321
|
-
|
|
334
|
+
|
|
322
335
|
return ConditionalToolImpl(self)
|
|
323
336
|
|
|
324
337
|
|
|
@@ -326,82 +339,84 @@ class RetryTool:
|
|
|
326
339
|
"""
|
|
327
340
|
Wraps a tool with retry logic for handling transient failures.
|
|
328
341
|
"""
|
|
329
|
-
|
|
342
|
+
|
|
330
343
|
def __init__(
|
|
331
344
|
self,
|
|
332
345
|
tool: Tool[Any, Any],
|
|
333
346
|
max_retries: int = 3,
|
|
334
347
|
backoff_factor: float = 1.0,
|
|
335
|
-
retry_on: Optional[Callable[[Exception], bool]] = None
|
|
348
|
+
retry_on: Optional[Callable[[Exception], bool]] = None,
|
|
336
349
|
):
|
|
337
350
|
self.tool = tool
|
|
338
351
|
self.max_retries = max_retries
|
|
339
352
|
self.backoff_factor = backoff_factor
|
|
340
353
|
self.retry_on = retry_on or (lambda e: True) # Retry on all exceptions by default
|
|
341
|
-
|
|
354
|
+
|
|
342
355
|
def create_tool(self) -> Tool[Any, Any]:
|
|
343
356
|
"""Create a retry-enabled tool."""
|
|
344
|
-
|
|
357
|
+
|
|
345
358
|
class RetryToolImpl:
|
|
346
359
|
def __init__(self, retry_tool: RetryTool):
|
|
347
360
|
self.retry_tool = retry_tool
|
|
348
|
-
|
|
361
|
+
|
|
349
362
|
@property
|
|
350
363
|
def schema(self) -> ToolSchema[Any]:
|
|
351
364
|
return ToolSchema(
|
|
352
365
|
name=f"{self.retry_tool.tool.schema.name}_retry",
|
|
353
366
|
description=f"Retry-enabled: {self.retry_tool.tool.schema.description}",
|
|
354
|
-
parameters=self.retry_tool.tool.schema.parameters
|
|
367
|
+
parameters=self.retry_tool.tool.schema.parameters,
|
|
355
368
|
)
|
|
356
|
-
|
|
369
|
+
|
|
357
370
|
async def execute(self, args: Any, context: Any) -> Union[str, ToolResult]:
|
|
358
371
|
"""Execute with retry logic."""
|
|
359
372
|
last_exception = None
|
|
360
|
-
|
|
373
|
+
|
|
361
374
|
for attempt in range(self.retry_tool.max_retries + 1):
|
|
362
375
|
try:
|
|
363
376
|
result = await self.retry_tool.tool.execute(args, context)
|
|
364
|
-
|
|
377
|
+
|
|
365
378
|
# Success - wrap with retry metadata
|
|
366
379
|
if isinstance(result, str):
|
|
367
|
-
return json.dumps(
|
|
368
|
-
|
|
369
|
-
|
|
370
|
-
|
|
371
|
-
|
|
380
|
+
return json.dumps(
|
|
381
|
+
{
|
|
382
|
+
"result": result,
|
|
383
|
+
"attempts": attempt + 1,
|
|
384
|
+
"max_retries": self.retry_tool.max_retries,
|
|
385
|
+
}
|
|
386
|
+
)
|
|
372
387
|
else:
|
|
373
388
|
return ToolResponse.success(
|
|
374
|
-
data=result.data if hasattr(result,
|
|
389
|
+
data=result.data if hasattr(result, "data") else result,
|
|
375
390
|
metadata={
|
|
376
|
-
|
|
377
|
-
|
|
378
|
-
|
|
379
|
-
}
|
|
391
|
+
"attempts": attempt + 1,
|
|
392
|
+
"max_retries": self.retry_tool.max_retries,
|
|
393
|
+
"original_tool": self.retry_tool.tool.schema.name,
|
|
394
|
+
},
|
|
380
395
|
)
|
|
381
|
-
|
|
396
|
+
|
|
382
397
|
except Exception as e:
|
|
383
398
|
last_exception = e
|
|
384
|
-
|
|
399
|
+
|
|
385
400
|
# Check if we should retry this exception
|
|
386
401
|
if not self.retry_tool.retry_on(e):
|
|
387
402
|
break
|
|
388
|
-
|
|
403
|
+
|
|
389
404
|
# Don't sleep on the last attempt
|
|
390
405
|
if attempt < self.retry_tool.max_retries:
|
|
391
|
-
sleep_time = self.retry_tool.backoff_factor * (2
|
|
406
|
+
sleep_time = self.retry_tool.backoff_factor * (2**attempt)
|
|
392
407
|
await asyncio.sleep(sleep_time)
|
|
393
|
-
|
|
408
|
+
|
|
394
409
|
# All retries exhausted
|
|
395
410
|
return ToolResponse.error(
|
|
396
|
-
code=
|
|
411
|
+
code="retry_exhausted",
|
|
397
412
|
message=f"Tool failed after {self.retry_tool.max_retries + 1} attempts: {str(last_exception)}",
|
|
398
413
|
details={
|
|
399
|
-
|
|
400
|
-
|
|
401
|
-
|
|
402
|
-
}
|
|
414
|
+
"attempts": self.retry_tool.max_retries + 1,
|
|
415
|
+
"last_error": str(last_exception),
|
|
416
|
+
"original_tool": self.retry_tool.tool.schema.name,
|
|
417
|
+
},
|
|
403
418
|
)
|
|
404
|
-
|
|
419
|
+
|
|
405
420
|
return RetryToolImpl(self)
|
|
406
421
|
|
|
407
422
|
|
|
@@ -409,110 +424,114 @@ class CachedTool:
|
|
|
409
424
|
"""
|
|
410
425
|
Wraps a tool with caching to avoid redundant executions.
|
|
411
426
|
"""
|
|
412
|
-
|
|
427
|
+
|
|
413
428
|
def __init__(
|
|
414
429
|
self,
|
|
415
430
|
tool: Tool[Any, Any],
|
|
416
431
|
cache_key_fn: Optional[Callable[[Any], str]] = None,
|
|
417
|
-
ttl_seconds: Optional[float] = None
|
|
432
|
+
ttl_seconds: Optional[float] = None,
|
|
418
433
|
):
|
|
419
434
|
self.tool = tool
|
|
420
435
|
self.cache_key_fn = cache_key_fn or (lambda args: str(hash(str(args))))
|
|
421
436
|
self.ttl_seconds = ttl_seconds
|
|
422
437
|
self.cache: Dict[str, Dict[str, Any]] = {}
|
|
423
|
-
|
|
438
|
+
|
|
424
439
|
def create_tool(self) -> Tool[Any, Any]:
|
|
425
440
|
"""Create a cached tool."""
|
|
426
|
-
|
|
441
|
+
|
|
427
442
|
class CachedToolImpl:
|
|
428
443
|
def __init__(self, cached_tool: CachedTool):
|
|
429
444
|
self.cached_tool = cached_tool
|
|
430
|
-
|
|
445
|
+
|
|
431
446
|
@property
|
|
432
447
|
def schema(self) -> ToolSchema[Any]:
|
|
433
448
|
return ToolSchema(
|
|
434
449
|
name=f"{self.cached_tool.tool.schema.name}_cached",
|
|
435
450
|
description=f"Cached: {self.cached_tool.tool.schema.description}",
|
|
436
|
-
parameters=self.cached_tool.tool.schema.parameters
|
|
451
|
+
parameters=self.cached_tool.tool.schema.parameters,
|
|
437
452
|
)
|
|
438
|
-
|
|
453
|
+
|
|
439
454
|
async def execute(self, args: Any, context: Any) -> Union[str, ToolResult]:
|
|
440
455
|
"""Execute with caching."""
|
|
441
456
|
cache_key = self.cached_tool.cache_key_fn(args)
|
|
442
|
-
current_time = __import__(
|
|
443
|
-
|
|
457
|
+
current_time = __import__("time").time()
|
|
458
|
+
|
|
444
459
|
# Check cache
|
|
445
460
|
if cache_key in self.cached_tool.cache:
|
|
446
461
|
cache_entry = self.cached_tool.cache[cache_key]
|
|
447
|
-
|
|
462
|
+
|
|
448
463
|
# Check TTL
|
|
449
|
-
if (
|
|
450
|
-
|
|
451
|
-
|
|
464
|
+
if (
|
|
465
|
+
self.cached_tool.ttl_seconds is None
|
|
466
|
+
or current_time - cache_entry["timestamp"] < self.cached_tool.ttl_seconds
|
|
467
|
+
):
|
|
452
468
|
# Cache hit
|
|
453
|
-
cached_result = cache_entry[
|
|
454
|
-
|
|
469
|
+
cached_result = cache_entry["result"]
|
|
470
|
+
|
|
455
471
|
if isinstance(cached_result, str):
|
|
456
|
-
return json.dumps(
|
|
457
|
-
|
|
458
|
-
|
|
459
|
-
|
|
460
|
-
|
|
472
|
+
return json.dumps(
|
|
473
|
+
{
|
|
474
|
+
"result": cached_result,
|
|
475
|
+
"cache_hit": True,
|
|
476
|
+
"cached_at": cache_entry["timestamp"],
|
|
477
|
+
}
|
|
478
|
+
)
|
|
461
479
|
else:
|
|
462
480
|
return ToolResponse.success(
|
|
463
481
|
data=cached_result,
|
|
464
482
|
metadata={
|
|
465
|
-
|
|
466
|
-
|
|
467
|
-
|
|
468
|
-
}
|
|
483
|
+
"cache_hit": True,
|
|
484
|
+
"cached_at": cache_entry["timestamp"],
|
|
485
|
+
"original_tool": self.cached_tool.tool.schema.name,
|
|
486
|
+
},
|
|
469
487
|
)
|
|
470
|
-
|
|
488
|
+
|
|
471
489
|
# Cache miss - execute tool
|
|
472
490
|
try:
|
|
473
491
|
result = await self.cached_tool.tool.execute(args, context)
|
|
474
|
-
|
|
492
|
+
|
|
475
493
|
# Store in cache
|
|
476
494
|
self.cached_tool.cache[cache_key] = {
|
|
477
|
-
|
|
478
|
-
|
|
495
|
+
"result": result,
|
|
496
|
+
"timestamp": current_time,
|
|
479
497
|
}
|
|
480
|
-
|
|
498
|
+
|
|
481
499
|
# Return with cache metadata
|
|
482
500
|
if isinstance(result, str):
|
|
483
|
-
return json.dumps(
|
|
484
|
-
|
|
485
|
-
|
|
486
|
-
'cached_at': current_time
|
|
487
|
-
})
|
|
501
|
+
return json.dumps(
|
|
502
|
+
{"result": result, "cache_hit": False, "cached_at": current_time}
|
|
503
|
+
)
|
|
488
504
|
else:
|
|
489
505
|
return ToolResponse.success(
|
|
490
|
-
data=result.data if hasattr(result,
|
|
506
|
+
data=result.data if hasattr(result, "data") else result,
|
|
491
507
|
metadata={
|
|
492
|
-
|
|
493
|
-
|
|
494
|
-
|
|
495
|
-
}
|
|
508
|
+
"cache_hit": False,
|
|
509
|
+
"cached_at": current_time,
|
|
510
|
+
"original_tool": self.cached_tool.tool.schema.name,
|
|
511
|
+
},
|
|
496
512
|
)
|
|
497
|
-
|
|
513
|
+
|
|
498
514
|
except Exception as e:
|
|
499
515
|
return ToolResponse.error(
|
|
500
|
-
code=
|
|
516
|
+
code="cached_tool_error",
|
|
501
517
|
message=f"Cached tool execution failed: {str(e)}",
|
|
502
|
-
details={
|
|
518
|
+
details={"original_tool": self.cached_tool.tool.schema.name},
|
|
503
519
|
)
|
|
504
|
-
|
|
520
|
+
|
|
505
521
|
return CachedToolImpl(self)
|
|
506
522
|
|
|
507
523
|
|
|
508
524
|
# Convenience functions for creating composed tools
|
|
509
525
|
|
|
526
|
+
|
|
510
527
|
def create_tool_pipeline(*tools: Tool[Any, Any], name: str = "pipeline") -> Tool[Any, Any]:
|
|
511
528
|
"""Create a pipeline of tools that execute in sequence."""
|
|
512
529
|
return ToolPipeline(*tools, name=name).create_tool()
|
|
513
530
|
|
|
514
531
|
|
|
515
|
-
def create_parallel_tools(
|
|
532
|
+
def create_parallel_tools(
|
|
533
|
+
*tools: Tool[Any, Any], name: str = "parallel", combine_strategy: str = "merge"
|
|
534
|
+
) -> Tool[Any, Any]:
|
|
516
535
|
"""Create a tool that executes multiple tools in parallel."""
|
|
517
536
|
return ParallelToolExecution(*tools, name=name, combine_strategy=combine_strategy).create_tool()
|
|
518
537
|
|
|
@@ -521,7 +540,7 @@ def create_conditional_tool(
|
|
|
521
540
|
condition: Callable[[Any], bool],
|
|
522
541
|
true_tool: Tool[Any, Any],
|
|
523
542
|
false_tool: Tool[Any, Any],
|
|
524
|
-
name: str = "conditional"
|
|
543
|
+
name: str = "conditional",
|
|
525
544
|
) -> Tool[Any, Any]:
|
|
526
545
|
"""Create a tool that conditionally executes different tools."""
|
|
527
546
|
return ConditionalTool(condition, true_tool, false_tool, name).create_tool()
|
|
@@ -531,7 +550,7 @@ def with_retry(
|
|
|
531
550
|
tool: Tool[Any, Any],
|
|
532
551
|
max_retries: int = 3,
|
|
533
552
|
backoff_factor: float = 1.0,
|
|
534
|
-
retry_on: Optional[Callable[[Exception], bool]] = None
|
|
553
|
+
retry_on: Optional[Callable[[Exception], bool]] = None,
|
|
535
554
|
) -> Tool[Any, Any]:
|
|
536
555
|
"""Wrap a tool with retry logic."""
|
|
537
556
|
return RetryTool(tool, max_retries, backoff_factor, retry_on).create_tool()
|
|
@@ -540,7 +559,7 @@ def with_retry(
|
|
|
540
559
|
def with_cache(
|
|
541
560
|
tool: Tool[Any, Any],
|
|
542
561
|
cache_key_fn: Optional[Callable[[Any], str]] = None,
|
|
543
|
-
ttl_seconds: Optional[float] = None
|
|
562
|
+
ttl_seconds: Optional[float] = None,
|
|
544
563
|
) -> Tool[Any, Any]:
|
|
545
564
|
"""Wrap a tool with caching."""
|
|
546
565
|
return CachedTool(tool, cache_key_fn, ttl_seconds).create_tool()
|
|
@@ -548,64 +567,59 @@ def with_cache(
|
|
|
548
567
|
|
|
549
568
|
def with_timeout(tool: Tool[Any, Any], timeout_seconds: float) -> Tool[Any, Any]:
|
|
550
569
|
"""Wrap a tool with timeout protection."""
|
|
551
|
-
|
|
570
|
+
|
|
552
571
|
class TimeoutTool:
|
|
553
572
|
@property
|
|
554
573
|
def schema(self) -> ToolSchema[Any]:
|
|
555
574
|
return ToolSchema(
|
|
556
575
|
name=f"{tool.schema.name}_timeout",
|
|
557
576
|
description=f"Timeout-protected: {tool.schema.description}",
|
|
558
|
-
parameters=tool.schema.parameters
|
|
577
|
+
parameters=tool.schema.parameters,
|
|
559
578
|
)
|
|
560
|
-
|
|
579
|
+
|
|
561
580
|
async def execute(self, args: Any, context: Any) -> Union[str, ToolResult]:
|
|
562
581
|
"""Execute with timeout protection."""
|
|
563
582
|
try:
|
|
564
583
|
result = await asyncio.wait_for(
|
|
565
|
-
tool.execute(args, context),
|
|
566
|
-
timeout=timeout_seconds
|
|
584
|
+
tool.execute(args, context), timeout=timeout_seconds
|
|
567
585
|
)
|
|
568
|
-
|
|
586
|
+
|
|
569
587
|
if isinstance(result, str):
|
|
570
|
-
return json.dumps(
|
|
571
|
-
|
|
572
|
-
|
|
573
|
-
'timed_out': False
|
|
574
|
-
})
|
|
588
|
+
return json.dumps(
|
|
589
|
+
{"result": result, "timeout_seconds": timeout_seconds, "timed_out": False}
|
|
590
|
+
)
|
|
575
591
|
else:
|
|
576
592
|
return ToolResponse.success(
|
|
577
|
-
data=result.data if hasattr(result,
|
|
593
|
+
data=result.data if hasattr(result, "data") else result,
|
|
578
594
|
metadata={
|
|
579
|
-
|
|
580
|
-
|
|
581
|
-
|
|
582
|
-
}
|
|
595
|
+
"timeout_seconds": timeout_seconds,
|
|
596
|
+
"timed_out": False,
|
|
597
|
+
"original_tool": tool.schema.name,
|
|
598
|
+
},
|
|
583
599
|
)
|
|
584
|
-
|
|
600
|
+
|
|
585
601
|
except asyncio.TimeoutError:
|
|
586
602
|
return ToolResponse.error(
|
|
587
|
-
code=
|
|
603
|
+
code="timeout_error",
|
|
588
604
|
message=f"Tool execution timed out after {timeout_seconds} seconds",
|
|
589
|
-
details={
|
|
590
|
-
'timeout_seconds': timeout_seconds,
|
|
591
|
-
'original_tool': tool.schema.name
|
|
592
|
-
}
|
|
605
|
+
details={"timeout_seconds": timeout_seconds, "original_tool": tool.schema.name},
|
|
593
606
|
)
|
|
594
|
-
|
|
607
|
+
|
|
595
608
|
return TimeoutTool()
|
|
596
609
|
|
|
597
610
|
|
|
598
611
|
# Higher-order composition functions
|
|
599
612
|
|
|
613
|
+
|
|
600
614
|
def compose_tools(*transformers: ToolTransformer) -> Callable[[Tool[Any, Any]], Tool[Any, Any]]:
|
|
601
615
|
"""Compose multiple tool transformers into a single transformation."""
|
|
602
|
-
|
|
616
|
+
|
|
603
617
|
def composed_transform(tool: Tool[Any, Any]) -> Tool[Any, Any]:
|
|
604
618
|
result = tool
|
|
605
619
|
for transformer in transformers:
|
|
606
620
|
result = transformer.transform(result)
|
|
607
621
|
return result
|
|
608
|
-
|
|
622
|
+
|
|
609
623
|
return composed_transform
|
|
610
624
|
|
|
611
625
|
|
|
@@ -613,39 +627,39 @@ class ToolComposer:
|
|
|
613
627
|
"""
|
|
614
628
|
Builder class for creating complex tool compositions.
|
|
615
629
|
"""
|
|
616
|
-
|
|
630
|
+
|
|
617
631
|
def __init__(self, base_tool: Tool[Any, Any]):
|
|
618
632
|
self.tool = base_tool
|
|
619
633
|
self.composition_history: List[str] = []
|
|
620
|
-
|
|
621
|
-
def with_retry(self, max_retries: int = 3, backoff_factor: float = 1.0) ->
|
|
634
|
+
|
|
635
|
+
def with_retry(self, max_retries: int = 3, backoff_factor: float = 1.0) -> "ToolComposer":
|
|
622
636
|
"""Add retry capability."""
|
|
623
637
|
self.tool = with_retry(self.tool, max_retries, backoff_factor)
|
|
624
638
|
self.composition_history.append(f"retry(max={max_retries}, backoff={backoff_factor})")
|
|
625
639
|
return self
|
|
626
|
-
|
|
627
|
-
def with_cache(self, ttl_seconds: Optional[float] = None) ->
|
|
640
|
+
|
|
641
|
+
def with_cache(self, ttl_seconds: Optional[float] = None) -> "ToolComposer":
|
|
628
642
|
"""Add caching capability."""
|
|
629
643
|
self.tool = with_cache(self.tool, ttl_seconds=ttl_seconds)
|
|
630
644
|
self.composition_history.append(f"cache(ttl={ttl_seconds})")
|
|
631
645
|
return self
|
|
632
|
-
|
|
633
|
-
def with_timeout(self, timeout_seconds: float) ->
|
|
646
|
+
|
|
647
|
+
def with_timeout(self, timeout_seconds: float) -> "ToolComposer":
|
|
634
648
|
"""Add timeout protection."""
|
|
635
649
|
self.tool = with_timeout(self.tool, timeout_seconds)
|
|
636
650
|
self.composition_history.append(f"timeout({timeout_seconds}s)")
|
|
637
651
|
return self
|
|
638
|
-
|
|
652
|
+
|
|
639
653
|
def build(self) -> Tool[Any, Any]:
|
|
640
654
|
"""Build the final composed tool."""
|
|
641
655
|
return self.tool
|
|
642
|
-
|
|
656
|
+
|
|
643
657
|
def get_composition_info(self) -> Dict[str, Any]:
|
|
644
658
|
"""Get information about the composition."""
|
|
645
659
|
return {
|
|
646
|
-
|
|
647
|
-
|
|
648
|
-
|
|
660
|
+
"base_tool": self.tool.schema.name,
|
|
661
|
+
"transformations": self.composition_history,
|
|
662
|
+
"final_name": self.tool.schema.name,
|
|
649
663
|
}
|
|
650
664
|
|
|
651
665
|
|