RouteKitAI 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- routekitai/__init__.py +53 -0
- routekitai/cli/__init__.py +18 -0
- routekitai/cli/main.py +40 -0
- routekitai/cli/replay.py +80 -0
- routekitai/cli/run.py +95 -0
- routekitai/cli/serve.py +966 -0
- routekitai/cli/test_agent.py +178 -0
- routekitai/cli/trace.py +209 -0
- routekitai/cli/trace_analyze.py +120 -0
- routekitai/cli/trace_search.py +126 -0
- routekitai/core/__init__.py +58 -0
- routekitai/core/agent.py +325 -0
- routekitai/core/errors.py +49 -0
- routekitai/core/hooks.py +174 -0
- routekitai/core/memory.py +54 -0
- routekitai/core/message.py +132 -0
- routekitai/core/model.py +91 -0
- routekitai/core/policies.py +373 -0
- routekitai/core/policy.py +85 -0
- routekitai/core/policy_adapter.py +133 -0
- routekitai/core/runtime.py +1403 -0
- routekitai/core/tool.py +148 -0
- routekitai/core/tools.py +180 -0
- routekitai/evals/__init__.py +13 -0
- routekitai/evals/dataset.py +75 -0
- routekitai/evals/metrics.py +101 -0
- routekitai/evals/runner.py +184 -0
- routekitai/graphs/__init__.py +12 -0
- routekitai/graphs/executors.py +457 -0
- routekitai/graphs/graph.py +164 -0
- routekitai/memory/__init__.py +13 -0
- routekitai/memory/episodic.py +242 -0
- routekitai/memory/kv.py +34 -0
- routekitai/memory/retrieval.py +192 -0
- routekitai/memory/vector.py +700 -0
- routekitai/memory/working.py +66 -0
- routekitai/message.py +29 -0
- routekitai/model.py +48 -0
- routekitai/observability/__init__.py +21 -0
- routekitai/observability/analyzer.py +314 -0
- routekitai/observability/exporters/__init__.py +10 -0
- routekitai/observability/exporters/base.py +30 -0
- routekitai/observability/exporters/jsonl.py +81 -0
- routekitai/observability/exporters/otel.py +119 -0
- routekitai/observability/spans.py +111 -0
- routekitai/observability/streaming.py +117 -0
- routekitai/observability/trace.py +144 -0
- routekitai/providers/__init__.py +9 -0
- routekitai/providers/anthropic.py +227 -0
- routekitai/providers/azure_openai.py +243 -0
- routekitai/providers/local.py +196 -0
- routekitai/providers/openai.py +321 -0
- routekitai/py.typed +0 -0
- routekitai/sandbox/__init__.py +12 -0
- routekitai/sandbox/filesystem.py +131 -0
- routekitai/sandbox/network.py +142 -0
- routekitai/sandbox/permissions.py +70 -0
- routekitai/tool.py +33 -0
- routekitai-0.1.0.dist-info/METADATA +328 -0
- routekitai-0.1.0.dist-info/RECORD +64 -0
- routekitai-0.1.0.dist-info/WHEEL +5 -0
- routekitai-0.1.0.dist-info/entry_points.txt +2 -0
- routekitai-0.1.0.dist-info/licenses/LICENSE +21 -0
- routekitai-0.1.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,321 @@
|
|
|
1
|
+
"""OpenAI-compatible model provider."""
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
from collections.abc import AsyncIterator
|
|
5
|
+
from typing import Any
|
|
6
|
+
|
|
7
|
+
import httpx
|
|
8
|
+
|
|
9
|
+
from routekitai.core.errors import ModelError
|
|
10
|
+
from routekitai.core.message import Message, MessageRole
|
|
11
|
+
from routekitai.core.model import Model, ModelResponse, StreamEvent, ToolCall, Usage
|
|
12
|
+
from routekitai.core.tool import Tool
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class OpenAIChatModel(Model):
|
|
16
|
+
"""OpenAI-compatible HTTP model provider.
|
|
17
|
+
|
|
18
|
+
Supports OpenAI API and compatible endpoints (e.g., local LLM servers).
|
|
19
|
+
"""
|
|
20
|
+
|
|
21
|
+
def __init__(
|
|
22
|
+
self,
|
|
23
|
+
name: str = "gpt-4",
|
|
24
|
+
provider: str = "openai",
|
|
25
|
+
api_key: str | None = None,
|
|
26
|
+
base_url: str = "https://api.openai.com/v1",
|
|
27
|
+
**kwargs: Any,
|
|
28
|
+
) -> None:
|
|
29
|
+
"""Initialize OpenAI model.
|
|
30
|
+
|
|
31
|
+
Args:
|
|
32
|
+
name: Model name
|
|
33
|
+
provider: Provider name
|
|
34
|
+
api_key: API key (defaults to OPENAI_API_KEY env var)
|
|
35
|
+
base_url: API base URL
|
|
36
|
+
**kwargs: Additional configuration
|
|
37
|
+
"""
|
|
38
|
+
super().__init__()
|
|
39
|
+
self._name = name
|
|
40
|
+
self._provider = provider
|
|
41
|
+
self.api_key = api_key
|
|
42
|
+
self.base_url = base_url.rstrip("/")
|
|
43
|
+
self._client: httpx.AsyncClient | None = None
|
|
44
|
+
|
|
45
|
+
@property
|
|
46
|
+
def name(self) -> str:
|
|
47
|
+
"""Return the model name."""
|
|
48
|
+
return self._name
|
|
49
|
+
|
|
50
|
+
@property
|
|
51
|
+
def provider(self) -> str:
|
|
52
|
+
"""Return the provider name."""
|
|
53
|
+
return self._provider
|
|
54
|
+
|
|
55
|
+
def _get_client(self) -> "httpx.AsyncClient":
|
|
56
|
+
"""Get or create HTTP client."""
|
|
57
|
+
try:
|
|
58
|
+
import httpx
|
|
59
|
+
except ImportError as e:
|
|
60
|
+
raise ModelError(
|
|
61
|
+
"httpx is required for OpenAI provider. Install with: pip install httpx"
|
|
62
|
+
) from e
|
|
63
|
+
|
|
64
|
+
if self._client is None:
|
|
65
|
+
headers = {"Content-Type": "application/json"}
|
|
66
|
+
if self.api_key:
|
|
67
|
+
headers["Authorization"] = f"Bearer {self.api_key}"
|
|
68
|
+
self._client = httpx.AsyncClient(
|
|
69
|
+
base_url=self.base_url,
|
|
70
|
+
headers=headers,
|
|
71
|
+
timeout=60.0,
|
|
72
|
+
)
|
|
73
|
+
return self._client
|
|
74
|
+
|
|
75
|
+
def _message_to_openai(self, message: Message) -> dict[str, Any]:
|
|
76
|
+
"""Convert routkitai Message to OpenAI format."""
|
|
77
|
+
role_map = {
|
|
78
|
+
MessageRole.SYSTEM: "system",
|
|
79
|
+
MessageRole.USER: "user",
|
|
80
|
+
MessageRole.ASSISTANT: "assistant",
|
|
81
|
+
MessageRole.TOOL: "tool",
|
|
82
|
+
}
|
|
83
|
+
msg_dict: dict[str, Any] = {
|
|
84
|
+
"role": role_map.get(message.role, "user"),
|
|
85
|
+
"content": message.content,
|
|
86
|
+
}
|
|
87
|
+
if message.tool_calls:
|
|
88
|
+
msg_dict["tool_calls"] = [
|
|
89
|
+
{
|
|
90
|
+
"id": tc.get("id", ""),
|
|
91
|
+
"type": "function",
|
|
92
|
+
"function": {
|
|
93
|
+
"name": tc.get("name", ""),
|
|
94
|
+
"arguments": json.dumps(tc.get("arguments", {})),
|
|
95
|
+
},
|
|
96
|
+
}
|
|
97
|
+
for tc in message.tool_calls
|
|
98
|
+
]
|
|
99
|
+
if message.tool_result:
|
|
100
|
+
msg_dict["tool_call_id"] = message.tool_result.get("tool_call_id", "")
|
|
101
|
+
return msg_dict
|
|
102
|
+
|
|
103
|
+
def _tools_to_openai(self, tools: list[Tool]) -> list[dict[str, Any]]:
|
|
104
|
+
"""Convert routkitai Tools to OpenAI function format."""
|
|
105
|
+
return [
|
|
106
|
+
{
|
|
107
|
+
"type": "function",
|
|
108
|
+
"function": {
|
|
109
|
+
"name": tool.name,
|
|
110
|
+
"description": tool.description,
|
|
111
|
+
"parameters": tool.parameters,
|
|
112
|
+
},
|
|
113
|
+
}
|
|
114
|
+
for tool in tools
|
|
115
|
+
]
|
|
116
|
+
|
|
117
|
+
def _openai_to_tool_calls(self, tool_calls: list[dict[str, Any]]) -> list[ToolCall]:
|
|
118
|
+
"""Convert OpenAI tool calls to routkitai format."""
|
|
119
|
+
result = []
|
|
120
|
+
for tc in tool_calls:
|
|
121
|
+
if tc.get("type") == "function":
|
|
122
|
+
func = tc.get("function", {})
|
|
123
|
+
try:
|
|
124
|
+
arguments = json.loads(func.get("arguments", "{}"))
|
|
125
|
+
except (json.JSONDecodeError, TypeError):
|
|
126
|
+
arguments = func.get("arguments", {})
|
|
127
|
+
result.append(
|
|
128
|
+
ToolCall(
|
|
129
|
+
id=tc.get("id", ""),
|
|
130
|
+
name=func.get("name", ""),
|
|
131
|
+
arguments=arguments,
|
|
132
|
+
)
|
|
133
|
+
)
|
|
134
|
+
return result
|
|
135
|
+
|
|
136
|
+
async def chat(
|
|
137
|
+
self,
|
|
138
|
+
messages: list[Message],
|
|
139
|
+
tools: list[Tool] | None = None,
|
|
140
|
+
stream: bool = False,
|
|
141
|
+
**kwargs: Any,
|
|
142
|
+
) -> ModelResponse | AsyncIterator[StreamEvent]:
|
|
143
|
+
"""Chat with OpenAI-compatible model.
|
|
144
|
+
|
|
145
|
+
Args:
|
|
146
|
+
messages: Conversation messages
|
|
147
|
+
tools: Optional tools
|
|
148
|
+
stream: Whether to stream (not yet implemented)
|
|
149
|
+
**kwargs: Additional parameters (temperature, max_tokens, etc.)
|
|
150
|
+
|
|
151
|
+
Returns:
|
|
152
|
+
ModelResponse or stream of events
|
|
153
|
+
|
|
154
|
+
Raises:
|
|
155
|
+
ModelError: If API call fails
|
|
156
|
+
"""
|
|
157
|
+
client = self._get_client()
|
|
158
|
+
|
|
159
|
+
# Convert messages
|
|
160
|
+
openai_messages = [self._message_to_openai(msg) for msg in messages]
|
|
161
|
+
|
|
162
|
+
# Prepare request
|
|
163
|
+
request_data: dict[str, Any] = {
|
|
164
|
+
"model": self.name,
|
|
165
|
+
"messages": openai_messages,
|
|
166
|
+
"stream": stream,
|
|
167
|
+
**kwargs, # Allow temperature, max_tokens, etc.
|
|
168
|
+
}
|
|
169
|
+
|
|
170
|
+
# Add tools if provided
|
|
171
|
+
if tools:
|
|
172
|
+
request_data["tools"] = self._tools_to_openai(tools)
|
|
173
|
+
|
|
174
|
+
try:
|
|
175
|
+
if stream:
|
|
176
|
+
# Streaming mode
|
|
177
|
+
async def stream_generator() -> AsyncIterator[StreamEvent]:
|
|
178
|
+
async with client.stream(
|
|
179
|
+
"POST", "/chat/completions", json=request_data
|
|
180
|
+
) as response:
|
|
181
|
+
response.raise_for_status()
|
|
182
|
+
content_buffer = ""
|
|
183
|
+
tool_calls_buffer: dict[str, dict[str, Any]] = {}
|
|
184
|
+
|
|
185
|
+
async for line in response.aiter_lines():
|
|
186
|
+
if not line.strip() or line.startswith("data: [DONE]"):
|
|
187
|
+
continue
|
|
188
|
+
|
|
189
|
+
if line.startswith("data: "):
|
|
190
|
+
line = line[6:] # Remove "data: " prefix
|
|
191
|
+
|
|
192
|
+
try:
|
|
193
|
+
chunk_data = json.loads(line)
|
|
194
|
+
delta = chunk_data.get("choices", [{}])[0].get("delta", {})
|
|
195
|
+
|
|
196
|
+
# Handle content delta
|
|
197
|
+
if "content" in delta:
|
|
198
|
+
content_chunk = delta["content"]
|
|
199
|
+
content_buffer += content_chunk
|
|
200
|
+
yield StreamEvent(
|
|
201
|
+
type="content",
|
|
202
|
+
content=content_chunk,
|
|
203
|
+
metadata={"chunk": chunk_data},
|
|
204
|
+
)
|
|
205
|
+
|
|
206
|
+
# Handle tool calls delta
|
|
207
|
+
if "tool_calls" in delta:
|
|
208
|
+
for tool_call_delta in delta["tool_calls"]:
|
|
209
|
+
index = tool_call_delta.get("index", 0)
|
|
210
|
+
if index not in tool_calls_buffer:
|
|
211
|
+
tool_calls_buffer[index] = {
|
|
212
|
+
"id": "",
|
|
213
|
+
"name": "",
|
|
214
|
+
"arguments": "",
|
|
215
|
+
}
|
|
216
|
+
|
|
217
|
+
if "id" in tool_call_delta:
|
|
218
|
+
tool_calls_buffer[index]["id"] = tool_call_delta["id"]
|
|
219
|
+
if "function" in tool_call_delta:
|
|
220
|
+
func = tool_call_delta["function"]
|
|
221
|
+
if "name" in func:
|
|
222
|
+
tool_calls_buffer[index]["name"] = func["name"]
|
|
223
|
+
if "arguments" in func:
|
|
224
|
+
tool_calls_buffer[index]["arguments"] += func[
|
|
225
|
+
"arguments"
|
|
226
|
+
]
|
|
227
|
+
|
|
228
|
+
# Handle usage (usually in last chunk)
|
|
229
|
+
if "usage" in chunk_data:
|
|
230
|
+
usage_data = chunk_data["usage"]
|
|
231
|
+
yield StreamEvent(
|
|
232
|
+
type="usage",
|
|
233
|
+
usage=Usage(
|
|
234
|
+
prompt_tokens=usage_data.get("prompt_tokens", 0),
|
|
235
|
+
completion_tokens=usage_data.get(
|
|
236
|
+
"completion_tokens", 0
|
|
237
|
+
),
|
|
238
|
+
total_tokens=usage_data.get("total_tokens", 0),
|
|
239
|
+
),
|
|
240
|
+
metadata={"chunk": chunk_data},
|
|
241
|
+
)
|
|
242
|
+
|
|
243
|
+
except json.JSONDecodeError:
|
|
244
|
+
continue
|
|
245
|
+
|
|
246
|
+
# Final event with complete content and tool calls
|
|
247
|
+
tool_calls = None
|
|
248
|
+
if tool_calls_buffer:
|
|
249
|
+
tool_calls = []
|
|
250
|
+
for idx in sorted(tool_calls_buffer.keys()):
|
|
251
|
+
tc_data = tool_calls_buffer[idx]
|
|
252
|
+
try:
|
|
253
|
+
arguments = json.loads(tc_data["arguments"])
|
|
254
|
+
except json.JSONDecodeError:
|
|
255
|
+
arguments = {}
|
|
256
|
+
tool_calls.append(
|
|
257
|
+
ToolCall(
|
|
258
|
+
id=tc_data["id"],
|
|
259
|
+
name=tc_data["name"],
|
|
260
|
+
arguments=arguments,
|
|
261
|
+
)
|
|
262
|
+
)
|
|
263
|
+
|
|
264
|
+
yield StreamEvent(
|
|
265
|
+
type="done",
|
|
266
|
+
content=content_buffer if content_buffer else None,
|
|
267
|
+
tool_calls=tool_calls,
|
|
268
|
+
)
|
|
269
|
+
|
|
270
|
+
return stream_generator()
|
|
271
|
+
else:
|
|
272
|
+
# Non-streaming mode
|
|
273
|
+
response = await client.post("/chat/completions", json=request_data)
|
|
274
|
+
response.raise_for_status()
|
|
275
|
+
data = response.json()
|
|
276
|
+
|
|
277
|
+
# Parse response
|
|
278
|
+
choice = data.get("choices", [{}])[0]
|
|
279
|
+
message = choice.get("message", {})
|
|
280
|
+
# Handle None content (can happen when there are tool calls)
|
|
281
|
+
content = message.get("content") or ""
|
|
282
|
+
tool_calls_data = message.get("tool_calls", [])
|
|
283
|
+
|
|
284
|
+
# Convert tool calls
|
|
285
|
+
tool_calls = None
|
|
286
|
+
if tool_calls_data:
|
|
287
|
+
tool_calls = self._openai_to_tool_calls(tool_calls_data)
|
|
288
|
+
|
|
289
|
+
# Parse usage
|
|
290
|
+
usage_data = data.get("usage", {})
|
|
291
|
+
usage = Usage(
|
|
292
|
+
prompt_tokens=usage_data.get("prompt_tokens", 0),
|
|
293
|
+
completion_tokens=usage_data.get("completion_tokens", 0),
|
|
294
|
+
total_tokens=usage_data.get("total_tokens", 0),
|
|
295
|
+
)
|
|
296
|
+
|
|
297
|
+
return ModelResponse(
|
|
298
|
+
content=content,
|
|
299
|
+
tool_calls=tool_calls,
|
|
300
|
+
usage=usage,
|
|
301
|
+
metadata={"raw_response": data},
|
|
302
|
+
)
|
|
303
|
+
|
|
304
|
+
except httpx.HTTPStatusError as e:
|
|
305
|
+
raise ModelError(
|
|
306
|
+
f"OpenAI API error: {e.response.status_code} - {e.response.text}"
|
|
307
|
+
) from e
|
|
308
|
+
except Exception as e:
|
|
309
|
+
raise ModelError(f"Failed to call OpenAI API: {e}") from e
|
|
310
|
+
|
|
311
|
+
async def __aenter__(self) -> "OpenAIChatModel":
|
|
312
|
+
"""Async context manager entry."""
|
|
313
|
+
return self
|
|
314
|
+
|
|
315
|
+
async def __aexit__(
|
|
316
|
+
self, exc_type: type[BaseException] | None, exc_val: BaseException | None, exc_tb: Any
|
|
317
|
+
) -> None:
|
|
318
|
+
"""Async context manager exit."""
|
|
319
|
+
if self._client:
|
|
320
|
+
await self._client.aclose()
|
|
321
|
+
self._client = None
|
routekitai/py.typed
ADDED
|
File without changes
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
"""Sandbox and security for RouteKit."""
|
|
2
|
+
|
|
3
|
+
# TODO: Implement sandbox and security features
|
|
4
|
+
from routekitai.sandbox.filesystem import FilesystemSandbox
|
|
5
|
+
from routekitai.sandbox.network import NetworkSandbox
|
|
6
|
+
from routekitai.sandbox.permissions import PermissionManager
|
|
7
|
+
|
|
8
|
+
__all__ = [
|
|
9
|
+
"PermissionManager",
|
|
10
|
+
"NetworkSandbox",
|
|
11
|
+
"FilesystemSandbox",
|
|
12
|
+
]
|
|
@@ -0,0 +1,131 @@
|
|
|
1
|
+
"""Filesystem sandbox for tool execution."""
|
|
2
|
+
|
|
3
|
+
import shutil
|
|
4
|
+
from pathlib import Path
|
|
5
|
+
from typing import Any
|
|
6
|
+
|
|
7
|
+
from pydantic import BaseModel, Field
|
|
8
|
+
|
|
9
|
+
from routekitai.core.errors import RuntimeError as RouteKitRuntimeError
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class FilesystemSandboxError(RouteKitRuntimeError):
|
|
13
|
+
"""Error raised by filesystem sandbox operations."""
|
|
14
|
+
|
|
15
|
+
pass
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class FilesystemSandbox(BaseModel):
|
|
19
|
+
"""Filesystem sandbox for controlling tool file access.
|
|
20
|
+
|
|
21
|
+
Provides path restrictions, read-only mounts, and basic isolation.
|
|
22
|
+
"""
|
|
23
|
+
|
|
24
|
+
allowed_paths: list[Path] = Field(default_factory=list, description="Allowed paths")
|
|
25
|
+
read_only_paths: list[Path] = Field(default_factory=list, description="Read-only paths")
|
|
26
|
+
sandbox_root: Path | None = Field(default=None, description="Sandbox root directory")
|
|
27
|
+
|
|
28
|
+
def check_path(self, path: Path, operation: str = "read") -> bool:
|
|
29
|
+
"""Check if path operation is allowed.
|
|
30
|
+
|
|
31
|
+
Args:
|
|
32
|
+
path: File path
|
|
33
|
+
operation: Operation type (read, write, delete)
|
|
34
|
+
|
|
35
|
+
Returns:
|
|
36
|
+
True if allowed, False otherwise
|
|
37
|
+
"""
|
|
38
|
+
# Resolve path to absolute
|
|
39
|
+
try:
|
|
40
|
+
resolved_path = path.resolve()
|
|
41
|
+
except (OSError, RuntimeError):
|
|
42
|
+
return False
|
|
43
|
+
|
|
44
|
+
# If sandbox_root is set, ensure path is within it
|
|
45
|
+
if self.sandbox_root:
|
|
46
|
+
try:
|
|
47
|
+
sandbox_resolved = self.sandbox_root.resolve()
|
|
48
|
+
# Check if path is within sandbox
|
|
49
|
+
if not str(resolved_path).startswith(str(sandbox_resolved)):
|
|
50
|
+
return False
|
|
51
|
+
except (OSError, RuntimeError):
|
|
52
|
+
return False
|
|
53
|
+
|
|
54
|
+
# Check if path is in allowed paths
|
|
55
|
+
if self.allowed_paths:
|
|
56
|
+
is_allowed = False
|
|
57
|
+
for allowed in self.allowed_paths:
|
|
58
|
+
try:
|
|
59
|
+
allowed_resolved = allowed.resolve()
|
|
60
|
+
if str(resolved_path).startswith(str(allowed_resolved)):
|
|
61
|
+
is_allowed = True
|
|
62
|
+
break
|
|
63
|
+
except (OSError, RuntimeError):
|
|
64
|
+
continue
|
|
65
|
+
if not is_allowed:
|
|
66
|
+
return False
|
|
67
|
+
|
|
68
|
+
# Check if path is read-only for write/delete operations
|
|
69
|
+
if operation in ("write", "delete"):
|
|
70
|
+
for read_only in self.read_only_paths:
|
|
71
|
+
try:
|
|
72
|
+
read_only_resolved = read_only.resolve()
|
|
73
|
+
if str(resolved_path).startswith(str(read_only_resolved)):
|
|
74
|
+
return False
|
|
75
|
+
except (OSError, RuntimeError):
|
|
76
|
+
continue
|
|
77
|
+
|
|
78
|
+
return True
|
|
79
|
+
|
|
80
|
+
async def execute_operation(self, path: Path, operation: str, **kwargs: Any) -> Any:
|
|
81
|
+
"""Execute filesystem operation through sandbox.
|
|
82
|
+
|
|
83
|
+
Args:
|
|
84
|
+
path: File path
|
|
85
|
+
operation: Operation type (read, write, delete, list)
|
|
86
|
+
**kwargs: Operation parameters (content for write, etc.)
|
|
87
|
+
|
|
88
|
+
Returns:
|
|
89
|
+
Operation result
|
|
90
|
+
|
|
91
|
+
Raises:
|
|
92
|
+
FilesystemSandboxError: If operation is not allowed or fails
|
|
93
|
+
"""
|
|
94
|
+
if not self.check_path(path, operation):
|
|
95
|
+
raise FilesystemSandboxError(
|
|
96
|
+
f"Operation '{operation}' not allowed on path: {path}",
|
|
97
|
+
context={"path": str(path), "operation": operation},
|
|
98
|
+
)
|
|
99
|
+
|
|
100
|
+
try:
|
|
101
|
+
if operation == "read":
|
|
102
|
+
return path.read_text(encoding="utf-8")
|
|
103
|
+
elif operation == "write":
|
|
104
|
+
content = kwargs.get("content", "")
|
|
105
|
+
path.parent.mkdir(parents=True, exist_ok=True)
|
|
106
|
+
path.write_text(content, encoding="utf-8")
|
|
107
|
+
return {"success": True, "path": str(path)}
|
|
108
|
+
elif operation == "delete":
|
|
109
|
+
if path.is_file():
|
|
110
|
+
path.unlink()
|
|
111
|
+
elif path.is_dir():
|
|
112
|
+
shutil.rmtree(path)
|
|
113
|
+
return {"success": True, "path": str(path)}
|
|
114
|
+
elif operation == "list":
|
|
115
|
+
if path.is_dir():
|
|
116
|
+
return [str(p) for p in path.iterdir()]
|
|
117
|
+
return []
|
|
118
|
+
elif operation == "exists":
|
|
119
|
+
return path.exists()
|
|
120
|
+
elif operation == "mkdir":
|
|
121
|
+
path.mkdir(parents=True, exist_ok=True)
|
|
122
|
+
return {"success": True, "path": str(path)}
|
|
123
|
+
else:
|
|
124
|
+
raise FilesystemSandboxError(
|
|
125
|
+
f"Unknown operation: {operation}", context={"operation": operation}
|
|
126
|
+
)
|
|
127
|
+
except Exception as e:
|
|
128
|
+
raise FilesystemSandboxError(
|
|
129
|
+
f"Filesystem operation failed: {e}",
|
|
130
|
+
context={"path": str(path), "operation": operation},
|
|
131
|
+
) from e
|
|
@@ -0,0 +1,142 @@
|
|
|
1
|
+
"""Network sandbox for tool execution."""
|
|
2
|
+
|
|
3
|
+
import time
|
|
4
|
+
from collections import defaultdict
|
|
5
|
+
from typing import Any
|
|
6
|
+
from urllib.parse import urlparse
|
|
7
|
+
|
|
8
|
+
from pydantic import BaseModel, Field
|
|
9
|
+
|
|
10
|
+
from routekitai.core.errors import RuntimeError as RouteKitRuntimeError
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class NetworkSandboxError(RouteKitRuntimeError):
|
|
14
|
+
"""Error raised by network sandbox operations."""
|
|
15
|
+
|
|
16
|
+
pass
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class NetworkSandbox(BaseModel):
|
|
20
|
+
"""Network sandbox for controlling tool network access.
|
|
21
|
+
|
|
22
|
+
Provides allowlists, blocklists, and basic rate limiting.
|
|
23
|
+
"""
|
|
24
|
+
|
|
25
|
+
allowed_hosts: list[str] = Field(default_factory=list, description="Allowed hostnames")
|
|
26
|
+
blocked_hosts: list[str] = Field(default_factory=list, description="Blocked hostnames")
|
|
27
|
+
rate_limit: dict[str, Any] = Field(default_factory=dict, description="Rate limit config")
|
|
28
|
+
|
|
29
|
+
def __init__(self, **data: Any) -> None:
|
|
30
|
+
"""Initialize network sandbox."""
|
|
31
|
+
super().__init__(**data)
|
|
32
|
+
self._request_history: dict[str, list[float]] = defaultdict(list)
|
|
33
|
+
|
|
34
|
+
def _extract_host(self, url: str) -> str:
|
|
35
|
+
"""Extract hostname from URL.
|
|
36
|
+
|
|
37
|
+
Args:
|
|
38
|
+
url: URL string
|
|
39
|
+
|
|
40
|
+
Returns:
|
|
41
|
+
Hostname
|
|
42
|
+
"""
|
|
43
|
+
try:
|
|
44
|
+
parsed = urlparse(url)
|
|
45
|
+
return parsed.hostname or ""
|
|
46
|
+
except Exception:
|
|
47
|
+
return ""
|
|
48
|
+
|
|
49
|
+
async def check_request(self, url: str, method: str = "GET") -> bool:
|
|
50
|
+
"""Check if network request is allowed.
|
|
51
|
+
|
|
52
|
+
Args:
|
|
53
|
+
url: Request URL
|
|
54
|
+
method: HTTP method
|
|
55
|
+
|
|
56
|
+
Returns:
|
|
57
|
+
True if allowed, False otherwise
|
|
58
|
+
"""
|
|
59
|
+
host = self._extract_host(url)
|
|
60
|
+
if not host:
|
|
61
|
+
return False
|
|
62
|
+
|
|
63
|
+
# Check blocked hosts first
|
|
64
|
+
if self.blocked_hosts:
|
|
65
|
+
for blocked in self.blocked_hosts:
|
|
66
|
+
if blocked in host or host in blocked:
|
|
67
|
+
return False
|
|
68
|
+
|
|
69
|
+
# Check allowed hosts (if specified, must be in list)
|
|
70
|
+
if self.allowed_hosts:
|
|
71
|
+
is_allowed = False
|
|
72
|
+
for allowed in self.allowed_hosts:
|
|
73
|
+
if allowed in host or host in allowed:
|
|
74
|
+
is_allowed = True
|
|
75
|
+
break
|
|
76
|
+
if not is_allowed:
|
|
77
|
+
return False
|
|
78
|
+
|
|
79
|
+
# Check rate limiting
|
|
80
|
+
if self.rate_limit:
|
|
81
|
+
requests_per_minute = self.rate_limit.get("requests_per_minute")
|
|
82
|
+
if requests_per_minute:
|
|
83
|
+
now = time.time()
|
|
84
|
+
# Clean old requests (older than 1 minute)
|
|
85
|
+
self._request_history[host] = [
|
|
86
|
+
timestamp for timestamp in self._request_history[host] if now - timestamp < 60
|
|
87
|
+
]
|
|
88
|
+
# Check if limit exceeded
|
|
89
|
+
if len(self._request_history[host]) >= requests_per_minute:
|
|
90
|
+
return False
|
|
91
|
+
|
|
92
|
+
return True
|
|
93
|
+
|
|
94
|
+
async def execute_request(self, url: str, method: str = "GET", **kwargs: Any) -> Any:
|
|
95
|
+
"""Execute network request through sandbox.
|
|
96
|
+
|
|
97
|
+
Args:
|
|
98
|
+
url: Request URL
|
|
99
|
+
method: HTTP method
|
|
100
|
+
**kwargs: Request parameters
|
|
101
|
+
|
|
102
|
+
Returns:
|
|
103
|
+
Response data
|
|
104
|
+
|
|
105
|
+
Raises:
|
|
106
|
+
NetworkSandboxError: If request is not allowed or fails
|
|
107
|
+
"""
|
|
108
|
+
if not await self.check_request(url, method):
|
|
109
|
+
raise NetworkSandboxError(
|
|
110
|
+
f"Network request not allowed: {method} {url}",
|
|
111
|
+
context={"url": url, "method": method},
|
|
112
|
+
)
|
|
113
|
+
|
|
114
|
+
# Record request for rate limiting
|
|
115
|
+
host = self._extract_host(url)
|
|
116
|
+
if host:
|
|
117
|
+
self._request_history[host].append(time.time())
|
|
118
|
+
|
|
119
|
+
# Execute request using httpx
|
|
120
|
+
try:
|
|
121
|
+
import httpx
|
|
122
|
+
|
|
123
|
+
async with httpx.AsyncClient() as client:
|
|
124
|
+
response = await client.request(method, url, **kwargs)
|
|
125
|
+
response.raise_for_status()
|
|
126
|
+
return {
|
|
127
|
+
"status_code": response.status_code,
|
|
128
|
+
"headers": dict(response.headers),
|
|
129
|
+
"content": response.text,
|
|
130
|
+
"json": response.json()
|
|
131
|
+
if response.headers.get("content-type", "").startswith("application/json")
|
|
132
|
+
else None,
|
|
133
|
+
}
|
|
134
|
+
except ImportError:
|
|
135
|
+
raise NetworkSandboxError(
|
|
136
|
+
"httpx is required for network requests. Install with: pip install httpx",
|
|
137
|
+
context={"url": url, "method": method},
|
|
138
|
+
) from None
|
|
139
|
+
except Exception as e:
|
|
140
|
+
raise NetworkSandboxError(
|
|
141
|
+
f"Network request failed: {e}", context={"url": url, "method": method}
|
|
142
|
+
) from e
|
|
@@ -0,0 +1,70 @@
|
|
|
1
|
+
"""Permission management for tool execution."""
|
|
2
|
+
|
|
3
|
+
from enum import Enum
|
|
4
|
+
|
|
5
|
+
from pydantic import BaseModel, Field
|
|
6
|
+
|
|
7
|
+
from routekitai.core.tool import ToolPermission
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class PermissionLevel(str, Enum):
|
|
11
|
+
"""Permission levels for sandbox execution."""
|
|
12
|
+
|
|
13
|
+
NONE = "none"
|
|
14
|
+
READ_ONLY = "read_only"
|
|
15
|
+
READ_WRITE = "read_write"
|
|
16
|
+
FULL = "full"
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class PermissionManager(BaseModel):
|
|
20
|
+
"""Manages permissions for tool execution.
|
|
21
|
+
|
|
22
|
+
Guards tool execution by checking permissions before allowing tool calls.
|
|
23
|
+
"""
|
|
24
|
+
|
|
25
|
+
permissions: dict[str, PermissionLevel] = Field(
|
|
26
|
+
default_factory=dict, description="Permission mappings (tool_name -> level)"
|
|
27
|
+
)
|
|
28
|
+
default_level: PermissionLevel = Field(
|
|
29
|
+
default=PermissionLevel.NONE, description="Default permission level"
|
|
30
|
+
)
|
|
31
|
+
|
|
32
|
+
def check_permission(self, permission: ToolPermission, resource: str) -> bool:
|
|
33
|
+
"""Check if permission is granted.
|
|
34
|
+
|
|
35
|
+
Args:
|
|
36
|
+
permission: Tool permission type
|
|
37
|
+
resource: Resource identifier (typically tool name)
|
|
38
|
+
|
|
39
|
+
Returns:
|
|
40
|
+
True if permission granted, False otherwise
|
|
41
|
+
"""
|
|
42
|
+
# Check explicit permission
|
|
43
|
+
if resource in self.permissions:
|
|
44
|
+
level = self.permissions[resource]
|
|
45
|
+
if level == PermissionLevel.NONE:
|
|
46
|
+
return False
|
|
47
|
+
# For now, any non-NONE level grants permission
|
|
48
|
+
# Can be refined based on permission type
|
|
49
|
+
return True
|
|
50
|
+
|
|
51
|
+
# Check default level
|
|
52
|
+
return self.default_level != PermissionLevel.NONE
|
|
53
|
+
|
|
54
|
+
def grant_permission(self, resource: str, level: PermissionLevel) -> None:
|
|
55
|
+
"""Grant permission to resource.
|
|
56
|
+
|
|
57
|
+
Args:
|
|
58
|
+
resource: Resource identifier (typically tool name)
|
|
59
|
+
level: Permission level
|
|
60
|
+
"""
|
|
61
|
+
self.permissions[resource] = level
|
|
62
|
+
|
|
63
|
+
def revoke_permission(self, resource: str) -> None:
|
|
64
|
+
"""Revoke permission from resource.
|
|
65
|
+
|
|
66
|
+
Args:
|
|
67
|
+
resource: Resource identifier
|
|
68
|
+
"""
|
|
69
|
+
if resource in self.permissions:
|
|
70
|
+
del self.permissions[resource]
|