sandboxy 0.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (60) hide show
  1. sandboxy/__init__.py +3 -0
  2. sandboxy/agents/__init__.py +21 -0
  3. sandboxy/agents/base.py +66 -0
  4. sandboxy/agents/llm_prompt.py +308 -0
  5. sandboxy/agents/loader.py +222 -0
  6. sandboxy/api/__init__.py +5 -0
  7. sandboxy/api/app.py +76 -0
  8. sandboxy/api/routes/__init__.py +1 -0
  9. sandboxy/api/routes/agents.py +92 -0
  10. sandboxy/api/routes/local.py +1388 -0
  11. sandboxy/api/routes/tools.py +106 -0
  12. sandboxy/cli/__init__.py +1 -0
  13. sandboxy/cli/main.py +1196 -0
  14. sandboxy/cli/type_detector.py +48 -0
  15. sandboxy/config.py +49 -0
  16. sandboxy/core/__init__.py +1 -0
  17. sandboxy/core/async_runner.py +824 -0
  18. sandboxy/core/mdl_parser.py +441 -0
  19. sandboxy/core/runner.py +599 -0
  20. sandboxy/core/safe_eval.py +165 -0
  21. sandboxy/core/state.py +234 -0
  22. sandboxy/datasets/__init__.py +20 -0
  23. sandboxy/datasets/loader.py +193 -0
  24. sandboxy/datasets/runner.py +442 -0
  25. sandboxy/errors.py +166 -0
  26. sandboxy/local/context.py +235 -0
  27. sandboxy/local/results.py +173 -0
  28. sandboxy/logging.py +31 -0
  29. sandboxy/mcp/__init__.py +25 -0
  30. sandboxy/mcp/client.py +360 -0
  31. sandboxy/mcp/wrapper.py +99 -0
  32. sandboxy/providers/__init__.py +34 -0
  33. sandboxy/providers/anthropic_provider.py +271 -0
  34. sandboxy/providers/base.py +123 -0
  35. sandboxy/providers/http_client.py +101 -0
  36. sandboxy/providers/openai_provider.py +282 -0
  37. sandboxy/providers/openrouter.py +958 -0
  38. sandboxy/providers/registry.py +199 -0
  39. sandboxy/scenarios/__init__.py +11 -0
  40. sandboxy/scenarios/comparison.py +491 -0
  41. sandboxy/scenarios/loader.py +262 -0
  42. sandboxy/scenarios/runner.py +468 -0
  43. sandboxy/scenarios/unified.py +1434 -0
  44. sandboxy/session/__init__.py +21 -0
  45. sandboxy/session/manager.py +278 -0
  46. sandboxy/tools/__init__.py +34 -0
  47. sandboxy/tools/base.py +127 -0
  48. sandboxy/tools/loader.py +270 -0
  49. sandboxy/tools/yaml_tools.py +708 -0
  50. sandboxy/ui/__init__.py +27 -0
  51. sandboxy/ui/dist/assets/index-CgAkYWrJ.css +1 -0
  52. sandboxy/ui/dist/assets/index-D4zoGFcr.js +347 -0
  53. sandboxy/ui/dist/index.html +14 -0
  54. sandboxy/utils/__init__.py +3 -0
  55. sandboxy/utils/time.py +20 -0
  56. sandboxy-0.0.1.dist-info/METADATA +241 -0
  57. sandboxy-0.0.1.dist-info/RECORD +60 -0
  58. sandboxy-0.0.1.dist-info/WHEEL +4 -0
  59. sandboxy-0.0.1.dist-info/entry_points.txt +3 -0
  60. sandboxy-0.0.1.dist-info/licenses/LICENSE +201 -0
sandboxy/__init__.py ADDED
@@ -0,0 +1,3 @@
1
+ """Sandboxy - Open-source agent simulation and benchmarking platform."""
2
+
3
+ __version__ = "0.1.0"
@@ -0,0 +1,21 @@
1
+ """Agents module - Agent interface, loader, and implementations."""
2
+
3
+ from sandboxy.agents.base import Agent, AgentAction, AgentConfig, AgentKind, BaseAgent
4
+ from sandboxy.agents.llm_prompt import LlmPromptAgent
5
+ from sandboxy.agents.loader import (
6
+ AgentLoader,
7
+ create_agent_from_config,
8
+ create_agent_from_model,
9
+ )
10
+
11
+ __all__ = [
12
+ "Agent",
13
+ "AgentAction",
14
+ "AgentConfig",
15
+ "AgentKind",
16
+ "AgentLoader",
17
+ "BaseAgent",
18
+ "LlmPromptAgent",
19
+ "create_agent_from_config",
20
+ "create_agent_from_model",
21
+ ]
@@ -0,0 +1,66 @@
1
+ """Base agent interface and models."""
2
+
3
+ from typing import Any, Literal, Protocol
4
+
5
+ from pydantic import BaseModel, Field
6
+
7
+ from sandboxy.core.state import Message
8
+
9
+ AgentKind = Literal["llm-prompt", "python-module", "http-endpoint"]
10
+
11
+
12
+ class AgentConfig(BaseModel):
13
+ """Configuration for an agent."""
14
+
15
+ id: str
16
+ name: str
17
+ kind: AgentKind
18
+ model: str = ""
19
+ system_prompt: str = ""
20
+ tools: list[str] = Field(default_factory=list)
21
+ params: dict[str, Any] = Field(default_factory=dict)
22
+ impl: dict[str, Any] = Field(default_factory=dict)
23
+
24
+
25
+ class AgentAction(BaseModel):
26
+ """Action returned by an agent after processing."""
27
+
28
+ type: Literal["message", "tool_call", "stop"]
29
+ content: str | None = None
30
+ tool_name: str | None = None
31
+ tool_action: str | None = None
32
+ tool_args: dict[str, Any] | None = None
33
+ tool_call_id: str | None = None
34
+
35
+
36
+ class Agent(Protocol):
37
+ """Protocol for agent implementations."""
38
+
39
+ config: AgentConfig
40
+
41
+ def step(
42
+ self, history: list[Message], available_tools: list[dict[str, Any]] | None = None
43
+ ) -> AgentAction:
44
+ """Process conversation history and return next action.
45
+
46
+ Args:
47
+ history: Conversation history as list of messages.
48
+ available_tools: Optional list of available tool schemas for function calling.
49
+
50
+ Returns:
51
+ Next action to take (message, tool call, or stop).
52
+ """
53
+ ...
54
+
55
+
56
+ class BaseAgent:
57
+ """Base class for agent implementations."""
58
+
59
+ def __init__(self, config: AgentConfig) -> None:
60
+ self.config = config
61
+
62
+ def step(
63
+ self, history: list[Message], available_tools: list[dict[str, Any]] | None = None
64
+ ) -> AgentAction:
65
+ """Process history and return action. Override in subclasses."""
66
+ return AgentAction(type="stop")
@@ -0,0 +1,308 @@
1
+ """LLM-based prompt agent using OpenAI SDK."""
2
+
3
+ import json
4
+ import logging
5
+ import os
6
+ import time
7
+ from typing import Any
8
+
9
+ from sandboxy.agents.base import AgentAction, AgentConfig, BaseAgent
10
+ from sandboxy.core.state import Message
11
+
12
+ logger = logging.getLogger(__name__)
13
+
14
+ # Retry configuration
15
+ MAX_RETRIES = 3
16
+ RETRY_DELAY_BASE = 1.0 # seconds
17
+
18
+
19
+ class LlmPromptAgent(BaseAgent):
20
+ """Agent that uses an LLM via OpenAI-compatible API.
21
+
22
+ Supports both direct OpenAI and OpenRouter (for 400+ models).
23
+ Uses OpenRouter when model contains "/" (e.g., "openai/gpt-4o").
24
+ """
25
+
26
+ def __init__(self, config: AgentConfig) -> None:
27
+ """Initialize the agent.
28
+
29
+ Args:
30
+ config: Agent configuration.
31
+ """
32
+ super().__init__(config)
33
+ self._client: Any = None
34
+ self._is_openrouter = "/" in (config.model or "")
35
+ # Token usage tracking
36
+ self._total_input_tokens = 0
37
+ self._total_output_tokens = 0
38
+
39
+ @property
40
+ def api_key(self) -> str:
41
+ """Get the appropriate API key based on model type."""
42
+ if self._is_openrouter:
43
+ return os.getenv("OPENROUTER_API_KEY", "")
44
+ return os.getenv("OPENAI_API_KEY", "")
45
+
46
+ @property
47
+ def client(self) -> Any:
48
+ """Lazy-load OpenAI client with appropriate configuration."""
49
+ if self._client is None:
50
+ from openai import OpenAI
51
+
52
+ if self._is_openrouter:
53
+ logger.debug("Initializing OpenRouter client for model: %s", self.config.model)
54
+ self._client = OpenAI(
55
+ api_key=self.api_key,
56
+ base_url="https://openrouter.ai/api/v1",
57
+ )
58
+ else:
59
+ logger.debug("Initializing OpenAI client for model: %s", self.config.model)
60
+ self._client = OpenAI(api_key=self.api_key)
61
+ return self._client
62
+
63
+ def step(
64
+ self,
65
+ history: list[Message],
66
+ available_tools: list[dict[str, Any]] | None = None,
67
+ ) -> AgentAction:
68
+ """Process conversation and return next action using LLM."""
69
+ if not self.api_key:
70
+ return self._stub_response(history)
71
+
72
+ messages = self._build_messages(history)
73
+ tools = self._build_tools(available_tools) if available_tools else None
74
+
75
+ last_error = None
76
+ for attempt in range(MAX_RETRIES):
77
+ try:
78
+ response = self._call_api(messages, tools)
79
+ return self._parse_response(response)
80
+ except Exception as e:
81
+ last_error = e
82
+ is_retryable = self._is_retryable_error(e)
83
+
84
+ if is_retryable and attempt < MAX_RETRIES - 1:
85
+ delay = RETRY_DELAY_BASE * (2**attempt)
86
+ logger.warning(
87
+ "LLM call failed (attempt %d/%d), retrying in %.1fs: %s",
88
+ attempt + 1,
89
+ MAX_RETRIES,
90
+ delay,
91
+ e,
92
+ )
93
+ time.sleep(delay)
94
+ else:
95
+ logger.error("Error calling LLM: %s", e, exc_info=True)
96
+ break
97
+
98
+ return AgentAction(
99
+ type="message",
100
+ content=f"Error calling LLM: {last_error}",
101
+ )
102
+
103
+ def _is_retryable_error(self, error: Exception) -> bool:
104
+ """Check if an error is retryable."""
105
+ error_str = str(error).lower()
106
+ retryable_patterns = [
107
+ "rate limit",
108
+ "timeout",
109
+ "connection",
110
+ "503",
111
+ "502",
112
+ "500",
113
+ "overloaded",
114
+ ]
115
+ return any(pattern in error_str for pattern in retryable_patterns)
116
+
117
+ def _build_messages(self, history: list[Message]) -> list[dict[str, Any]]:
118
+ """Convert history to OpenAI message format."""
119
+ messages: list[dict[str, Any]] = []
120
+
121
+ if self.config.system_prompt:
122
+ messages.append(
123
+ {
124
+ "role": "system",
125
+ "content": self.config.system_prompt,
126
+ }
127
+ )
128
+
129
+ for msg in history:
130
+ if msg.role == "tool":
131
+ messages.append(
132
+ {
133
+ "role": "tool",
134
+ "content": msg.content,
135
+ "tool_call_id": msg.tool_call_id or msg.tool_name or "unknown",
136
+ }
137
+ )
138
+ elif msg.role == "assistant" and msg.tool_calls:
139
+ messages.append(
140
+ {
141
+ "role": "assistant",
142
+ "content": msg.content or None,
143
+ "tool_calls": [
144
+ {
145
+ "id": tc.id,
146
+ "type": "function",
147
+ "function": {
148
+ "name": tc.name,
149
+ "arguments": tc.arguments,
150
+ },
151
+ }
152
+ for tc in msg.tool_calls
153
+ ],
154
+ }
155
+ )
156
+ else:
157
+ messages.append(
158
+ {
159
+ "role": msg.role,
160
+ "content": msg.content,
161
+ }
162
+ )
163
+
164
+ return messages
165
+
166
+ def _build_tools(self, available_tools: list[dict[str, Any]]) -> list[dict[str, Any]]:
167
+ """Build OpenAI tools format from available tools."""
168
+ tools = []
169
+ for tool in available_tools:
170
+ actions = tool.get("actions", [])
171
+ for action in actions:
172
+ tools.append(
173
+ {
174
+ "type": "function",
175
+ "function": {
176
+ "name": f"{tool['name']}__{action['name']}",
177
+ "description": action.get("description", ""),
178
+ "parameters": action.get(
179
+ "parameters", {"type": "object", "properties": {}}
180
+ ),
181
+ },
182
+ }
183
+ )
184
+ return tools
185
+
186
+ def _call_api(
187
+ self,
188
+ messages: list[dict[str, Any]],
189
+ tools: list[dict[str, Any]] | None,
190
+ ) -> Any:
191
+ """Make API call to OpenAI/OpenRouter."""
192
+ model = self.config.model or "gpt-4o-mini"
193
+ kwargs: dict[str, Any] = {
194
+ "model": model,
195
+ "messages": messages,
196
+ }
197
+
198
+ # Add temperature (some models don't support it)
199
+ if "nano" not in model.lower():
200
+ kwargs["temperature"] = self.config.params.get("temperature", 0.7)
201
+
202
+ # Add max tokens
203
+ max_tokens = self.config.params.get("max_tokens", 2048)
204
+ kwargs["max_completion_tokens"] = max_tokens
205
+
206
+ if tools:
207
+ kwargs["tools"] = tools
208
+ kwargs["tool_choice"] = "auto"
209
+
210
+ return self.client.chat.completions.create(**kwargs)
211
+
212
+ def _parse_response(self, response: Any) -> AgentAction:
213
+ """Parse OpenAI response into AgentAction."""
214
+ # Track token usage
215
+ if hasattr(response, "usage") and response.usage:
216
+ self._total_input_tokens += getattr(response.usage, "prompt_tokens", 0)
217
+ self._total_output_tokens += getattr(response.usage, "completion_tokens", 0)
218
+
219
+ choice = response.choices[0]
220
+ message = choice.message
221
+
222
+ if message.tool_calls:
223
+ tool_call = message.tool_calls[0]
224
+ function = tool_call.function
225
+
226
+ full_name = function.name
227
+ if "__" in full_name:
228
+ tool_name, tool_action = full_name.split("__", 1)
229
+ else:
230
+ parts = full_name.rsplit("_", 1)
231
+ if len(parts) == 2:
232
+ tool_name, tool_action = parts
233
+ else:
234
+ tool_name = full_name
235
+ tool_action = "invoke"
236
+
237
+ try:
238
+ tool_args = json.loads(function.arguments)
239
+ except json.JSONDecodeError:
240
+ tool_args = {}
241
+
242
+ return AgentAction(
243
+ type="tool_call",
244
+ tool_name=tool_name,
245
+ tool_action=tool_action,
246
+ tool_args=tool_args,
247
+ tool_call_id=tool_call.id,
248
+ )
249
+
250
+ if choice.finish_reason == "stop" and not message.content:
251
+ return AgentAction(type="stop")
252
+
253
+ return AgentAction(
254
+ type="message",
255
+ content=message.content or "",
256
+ )
257
+
258
+ def get_usage(self) -> dict[str, int]:
259
+ """Get accumulated token usage across all API calls.
260
+
261
+ Returns:
262
+ Dictionary with input_tokens and output_tokens counts.
263
+ """
264
+ return {
265
+ "input_tokens": self._total_input_tokens,
266
+ "output_tokens": self._total_output_tokens,
267
+ }
268
+
269
+ def reset_usage(self) -> None:
270
+ """Reset token usage counters."""
271
+ self._total_input_tokens = 0
272
+ self._total_output_tokens = 0
273
+
274
+ def _stub_response(self, history: list[Message]) -> AgentAction:
275
+ """Return stub response when no API key is configured."""
276
+ last_user = next(
277
+ (m for m in reversed(history) if m.role == "user"),
278
+ None,
279
+ )
280
+
281
+ if last_user:
282
+ content = last_user.content.lower()
283
+ if "refund" in content:
284
+ return AgentAction(
285
+ type="message",
286
+ content=(
287
+ "I understand you're inquiring about a refund. "
288
+ "Let me look into that for you. Could you please "
289
+ "provide your order number?"
290
+ ),
291
+ )
292
+ if "order" in content:
293
+ return AgentAction(
294
+ type="message",
295
+ content=(
296
+ "I'd be happy to help you with your order. "
297
+ "What would you like to know about it?"
298
+ ),
299
+ )
300
+
301
+ key_name = "OPENROUTER_API_KEY" if self._is_openrouter else "OPENAI_API_KEY"
302
+ return AgentAction(
303
+ type="message",
304
+ content=(
305
+ f"[STUB] No API key configured. "
306
+ f"Set {key_name} environment variable to enable LLM calls."
307
+ ),
308
+ )
@@ -0,0 +1,222 @@
1
+ """Agent loader - loads agent configurations and instantiates agents."""
2
+
3
+ import logging
4
+ from pathlib import Path
5
+ from typing import Any
6
+
7
+ import yaml
8
+
9
+ from sandboxy.agents.base import Agent, AgentConfig
10
+ from sandboxy.agents.llm_prompt import LlmPromptAgent
11
+
12
+ logger = logging.getLogger(__name__)
13
+
14
+ # Default directories to search for agent specs (user's directories only)
15
+ DEFAULT_AGENT_DIRS = [
16
+ Path.home() / ".sandboxy" / "agents",
17
+ ]
18
+
19
+
20
+ def get_agent_dirs() -> list[Path]:
21
+ """Get agent directories, including local if in local mode.
22
+
23
+ Returns:
24
+ List of directories to search for agents.
25
+ """
26
+ from sandboxy.local.context import get_local_context, is_local_mode
27
+
28
+ dirs: list[Path] = []
29
+
30
+ if is_local_mode():
31
+ ctx = get_local_context()
32
+ if ctx and ctx.agents_dir.exists():
33
+ dirs.append(ctx.agents_dir)
34
+
35
+ dirs.extend(DEFAULT_AGENT_DIRS)
36
+ return dirs
37
+
38
+
39
+ class AgentLoader:
40
+ """Loader for agent configurations and instantiation."""
41
+
42
+ def __init__(self, dirs: list[Path] | None = None) -> None:
43
+ """Initialize loader with directories to search.
44
+
45
+ Args:
46
+ dirs: Directories to search for agent specs. Uses defaults if None.
47
+ """
48
+ self.dirs = dirs if dirs is not None else DEFAULT_AGENT_DIRS
49
+ self._configs: dict[str, AgentConfig] = {}
50
+ self._load_configs()
51
+
52
+ def _load_configs(self) -> None:
53
+ """Load all agent configurations from directories."""
54
+ for d in self.dirs:
55
+ if not d.exists():
56
+ continue
57
+
58
+ for path in d.glob("**/*.yaml"):
59
+ self._load_config_file(path)
60
+ for path in d.glob("**/*.yml"):
61
+ self._load_config_file(path)
62
+
63
+ def _load_config_file(self, path: Path) -> None:
64
+ """Load a single agent configuration file."""
65
+ try:
66
+ raw: dict[str, Any] = yaml.safe_load(path.read_text())
67
+ if not raw or "id" not in raw:
68
+ logger.debug("Skipping %s: missing 'id' field", path)
69
+ return
70
+
71
+ config = AgentConfig(
72
+ id=raw["id"],
73
+ name=raw.get("name", raw["id"]),
74
+ kind=raw.get("kind", "llm-prompt"),
75
+ model=raw.get("model", ""),
76
+ system_prompt=raw.get("system_prompt", ""),
77
+ tools=raw.get("tools", []),
78
+ params=raw.get("params", {}),
79
+ impl=raw.get("impl", {}),
80
+ )
81
+ self._configs[config.id] = config
82
+ logger.debug("Loaded agent config: %s from %s", config.id, path)
83
+ except yaml.YAMLError as e:
84
+ logger.warning("Failed to parse YAML file %s: %s", path, e)
85
+ except KeyError as e:
86
+ logger.warning("Missing required field in %s: %s", path, e)
87
+
88
+ def list_ids(self) -> list[str]:
89
+ """Get list of available agent IDs.
90
+
91
+ Returns:
92
+ List of agent IDs.
93
+ """
94
+ return list(self._configs.keys())
95
+
96
+ def get_config(self, agent_id: str) -> AgentConfig | None:
97
+ """Get agent configuration by ID.
98
+
99
+ Args:
100
+ agent_id: Agent identifier.
101
+
102
+ Returns:
103
+ Agent configuration or None if not found.
104
+ """
105
+ return self._configs.get(agent_id)
106
+
107
+ def load(self, agent_id: str) -> Agent:
108
+ """Load and instantiate an agent by ID.
109
+
110
+ Args:
111
+ agent_id: Agent identifier. Can be either:
112
+ - A predefined agent ID from user's YAML files
113
+ - A model ID (e.g., "openai/gpt-4o", "anthropic/claude-3.5-haiku")
114
+
115
+ Returns:
116
+ Instantiated agent.
117
+
118
+ Raises:
119
+ ValueError: If agent ID not found.
120
+ """
121
+ config = self._configs.get(agent_id)
122
+ if config is None:
123
+ # Check if it's a model ID (contains a /)
124
+ if "/" in agent_id:
125
+ # Create a dynamic agent config for this model
126
+ config = AgentConfig(
127
+ id=agent_id,
128
+ name=agent_id.split("/")[-1].replace("-", " ").title(),
129
+ kind="llm-prompt",
130
+ model=agent_id,
131
+ system_prompt="You are a helpful assistant. Use the available tools to complete tasks.",
132
+ tools=[],
133
+ params={"temperature": 0.7, "max_tokens": 2048},
134
+ impl={},
135
+ )
136
+ else:
137
+ msg = f"Agent not found: {agent_id}"
138
+ raise ValueError(msg)
139
+ return self._instantiate(config)
140
+
141
+ def load_default(self) -> Agent:
142
+ """Load the default agent.
143
+
144
+ Returns:
145
+ Default agent instance.
146
+
147
+ Raises:
148
+ ValueError: If no agents are available and no model specified.
149
+ """
150
+ # Use any available agent from user's config
151
+ if self._configs:
152
+ config = next(iter(self._configs.values()))
153
+ return self._instantiate(config)
154
+
155
+ raise ValueError("No agents available. Specify a model with -m (e.g., -m openai/gpt-4o)")
156
+
157
+ def _instantiate(self, config: AgentConfig) -> Agent:
158
+ """Create agent instance from configuration.
159
+
160
+ Args:
161
+ config: Agent configuration.
162
+
163
+ Returns:
164
+ Agent instance.
165
+
166
+ Raises:
167
+ ValueError: If agent kind is not supported.
168
+ """
169
+ return _instantiate_agent(config)
170
+
171
+
172
+ def _instantiate_agent(config: AgentConfig) -> Agent:
173
+ """Create agent instance from configuration.
174
+
175
+ Args:
176
+ config: Agent configuration.
177
+
178
+ Returns:
179
+ Agent instance.
180
+
181
+ Raises:
182
+ ValueError: If agent kind is not supported.
183
+ """
184
+ if config.kind == "llm-prompt":
185
+ return LlmPromptAgent(config)
186
+ msg = f"Unsupported agent kind: {config.kind}"
187
+ raise ValueError(msg)
188
+
189
+
190
+ def create_agent_from_config(config: AgentConfig) -> Agent:
191
+ """Create an agent instance directly from configuration.
192
+
193
+ Args:
194
+ config: Agent configuration.
195
+
196
+ Returns:
197
+ Agent instance.
198
+ """
199
+ return _instantiate_agent(config)
200
+
201
+
202
+ def create_agent_from_model(model_id: str, system_prompt: str = "") -> Agent:
203
+ """Create an agent directly from a model ID.
204
+
205
+ Args:
206
+ model_id: Model identifier (e.g., "openai/gpt-4o", "anthropic/claude-3.5-sonnet")
207
+ system_prompt: Optional system prompt override.
208
+
209
+ Returns:
210
+ Agent instance configured for the model.
211
+ """
212
+ config = AgentConfig(
213
+ id=model_id,
214
+ name=model_id.split("/")[-1].replace("-", " ").title() if "/" in model_id else model_id,
215
+ kind="llm-prompt",
216
+ model=model_id,
217
+ system_prompt=system_prompt or "You are a helpful assistant.",
218
+ tools=[],
219
+ params={"temperature": 0.7, "max_tokens": 4096},
220
+ impl={},
221
+ )
222
+ return _instantiate_agent(config)
@@ -0,0 +1,5 @@
1
+ """FastAPI application for Sandboxy."""
2
+
3
+ from sandboxy.api.app import create_local_app
4
+
5
+ __all__ = ["create_local_app"]