progtc 0.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,30 @@
1
+ name: Release
2
+
3
+ on:
4
+ push:
5
+ tags:
6
+ # Publish on any tag starting with a `v`, e.g., v1.2.3
7
+ - v*
8
+
9
+ jobs:
10
+ pypi:
11
+ name: Publish to PyPI
12
+ runs-on: ubuntu-latest
13
+ # Environment and permissions trusted publishing.
14
+ environment:
15
+ # Create this environment in the GitHub repository under Settings -> Environments
16
+ name: pypi
17
+ permissions:
18
+ id-token: write
19
+ contents: read
20
+ steps:
21
+ - name: Checkout
22
+ uses: actions/checkout@v5
23
+ - name: Install uv
24
+ uses: astral-sh/setup-uv@v6
25
+ - name: Install Python 3.13
26
+ run: uv python install 3.13
27
+ - name: Build
28
+ run: uv build
29
+ - name: Publish
30
+ run: uv publish
@@ -0,0 +1,10 @@
1
+ # Python-generated files
2
+ __pycache__/
3
+ *.py[oc]
4
+ build/
5
+ dist/
6
+ wheels/
7
+ *.egg-info
8
+
9
+ # Virtual environments
10
+ .venv
@@ -0,0 +1 @@
1
+ 3.13
progtc-0.1.0/PKG-INFO ADDED
@@ -0,0 +1,184 @@
1
+ Metadata-Version: 2.4
2
+ Name: progtc
3
+ Version: 0.1.0
4
+ Summary: Add your description here
5
+ Author-email: Callum Downie <70471360+calmdown13@users.noreply.github.com>
6
+ Requires-Python: >=3.12
7
+ Requires-Dist: httpx-sse>=0.4.3
8
+ Requires-Dist: httpx>=0.28.1
9
+ Requires-Dist: pydantic>=2.12.5
10
+ Provides-Extra: server
11
+ Requires-Dist: fastapi>=0.123.10; extra == 'server'
12
+ Requires-Dist: rich>=14.2.0; extra == 'server'
13
+ Requires-Dist: typer>=0.20.0; extra == 'server'
14
+ Requires-Dist: uvicorn[standard]>=0.38.0; extra == 'server'
15
+ Description-Content-Type: text/markdown
16
+
17
+ ```
18
+ ██████╗ ██████╗ ██████╗ ██████╗ ████████╗ ██████╗
19
+ ██╔══██╗██╔══██╗██╔═══██╗██╔════╝ ╚══██╔══╝██╔════╝
20
+ ██████╔╝██████╔╝██║ ██║██║ ███╗ ██║ ██║
21
+ ██╔═══╝ ██╔══██╗██║ ██║██║ ██║ ██║ ██║
22
+ ██║ ██║ ██║╚██████╔╝╚██████╔╝ ██║ ╚██████╗
23
+ ╚═╝ ╚═╝ ╚═╝ ╚═════╝ ╚═════╝ ╚═╝ ╚═════╝
24
+ by capsa
25
+ ```
26
+
27
+ **Programmatic Tool Calling** — Let LLM-generated code call your tools, even from inside a sandbox.
28
+
29
+ ---
30
+
31
+ ## The Problem
32
+
33
+ You want an AI agent to write and execute Python code. Easy enough—spin up an [E2B](https://e2b.dev) sandbox and let it run. But what if that code needs to call your tools?
34
+
35
+ The code runs inside a sandbox. Your tools live outside. There's no bridge.
36
+
37
+ ## The Solution
38
+
39
+ **progtc** creates that bridge. It runs a lightweight server inside your sandbox that exposes your tools to the generated code. When the code calls a tool, the request streams back to your client, you execute it locally, and return the result—all transparently.
40
+
41
+
42
+ ## Installation
43
+
44
+ ```bash
45
+ pip install progtc
46
+ ```
47
+
48
+ Or with [uv](https://docs.astral.sh/uv/):
49
+
50
+ ```bash
51
+ uv add progtc
52
+ ```
53
+
54
+ ## Quick Start
55
+
56
+ ### 1. Start the Server (inside your sandbox)
57
+
58
+ ```bash
59
+ progtc serve --host 0.0.0.0 --port 8000 --api-key your-secret-key
60
+ ```
61
+
62
+ ### 2. Execute Code from Your Client
63
+
64
+ ```python
65
+ from progtc import AsyncProgtcClient
66
+
67
+ client = AsyncProgtcClient(
68
+ base_url="https://your-sandbox-url:8000",
69
+ api_key="your-secret-key",
70
+ )
71
+
72
+ # Define your tools as async functions
73
+ async def get_weather(city: str, country: str) -> str:
74
+ # Your actual implementation
75
+ return f"Weather in {city}, {country}: Sunny, 22°C"
76
+
77
+ async def search_database(query: str) -> list[dict]:
78
+ # Your actual implementation
79
+ return [{"id": 1, "name": "Result"}]
80
+
81
+ # Execute LLM-generated code that uses your tools
82
+ code = """
83
+ from tools import get_weather
84
+
85
+ weather = await get_weather("London", "UK")
86
+ print(f"The weather is: {weather}")
87
+ """
88
+
89
+ result = await client.execute_code(
90
+ code=code,
91
+ tool_call_handlers={
92
+ "get_weather": get_weather,
93
+ "search_database": search_database,
94
+ },
95
+ )
96
+
97
+ print(result.stdout) # "The weather is: Weather in London, UK: Sunny, 22°C"
98
+ ```
99
+
100
+ ## How It Works
101
+
102
+ 1. **Your client** sends code + a list of available tool names to the progtc server
103
+ 2. **The server** executes the code in an isolated process, injecting a `tools` module
104
+ 3. **When code calls a tool**, the server streams the call back to your client via SSE
105
+ 4. **Your client** executes the tool locally and sends the result back
106
+ 5. **The server** resumes code execution with the result
107
+ 6. **Stdout/stderr** are captured and streamed back when execution completes
108
+
109
+ ## Code Requirements
110
+
111
+ The LLM-generated code must:
112
+
113
+ - **Import tools from the `tools` module**: `from tools import my_tool`
114
+ - **Await all tool calls** (they're async)
115
+ - **Use `print()` for output** — stdout/stderr are captured and returned
116
+
117
+ ```python
118
+ from tools import get_weather, search_database
119
+ import asyncio
120
+
121
+ # Call tools like regular async functions
122
+ weather, results = await asyncio.gather(
123
+ get_weather("Tokyo", "Japan"),
124
+ search_database("hotels"),
125
+ )
126
+
127
+ print(f"Weather: {weather}")
128
+ print(f"Results: {results}")
129
+ ```
130
+
131
+ > **Note:** The code runs in a top-level async context, so you can use `await` directly without defining an async function.
132
+
133
+ ## CLI Options
134
+
135
+ ```bash
136
+ progtc serve [OPTIONS]
137
+ ```
138
+
139
+ | Option | Default | Description |
140
+ |--------|---------|-------------|
141
+ | `--host` | `127.0.0.1` | Host to bind to |
142
+ | `--port` | `8000` | Port to bind to |
143
+ | `--api-key` | (env: `PROGTC_API_KEY`) | API key for authentication |
144
+ | `--tool-call-timeout` | `10.0` | Timeout for individual tool calls (seconds) |
145
+ | `--code-execution-timeout` | `30.0` | Total timeout for code execution (seconds) |
146
+
147
+ ## Error Handling
148
+
149
+ The client returns a discriminated union—either success or one of several error types:
150
+
151
+ ```python
152
+ from progtc.types import MessageType
153
+
154
+ result = await client.execute_code(code, tool_call_handlers)
155
+
156
+ match result.type:
157
+ case MessageType.SUCCESS:
158
+ print(f"Stdout: {result.stdout}")
159
+ print(f"Stderr: {result.stderr}")
160
+ case MessageType.ERROR:
161
+ print(f"Error: {result.message}")
162
+ print(f"Code: {result.code}") # compilation, runtime, timeout, etc.
163
+ ```
164
+
165
+ Error codes:
166
+ - `code_compilation_error` — Code failed to compile/exec
167
+ - `code_runtime_error` — Exception raised during execution
168
+ - `code_timeout_error` — Execution exceeded timeout
169
+
170
+ ## Example: E2B + pydantic-ai
171
+
172
+ See [`examples/e2b-example/`](examples/e2b-example/) for a complete example using progtc with [E2B](https://e2b.dev) sandboxes and [pydantic-ai](https://ai.pydantic.dev) agents.
173
+
174
+ The example demonstrates an AI agent that can execute Python code in a secure sandbox while calling tools defined in your application.
175
+
176
+ ## License
177
+
178
+ MIT
179
+
180
+ ---
181
+
182
+ <p align="center">
183
+ <b>Building AI agents?</b> We're hiring: <a href="https://capsa.ai/careers">capsa.ai/careers</a>
184
+ </p>
progtc-0.1.0/README.md ADDED
@@ -0,0 +1,168 @@
1
+ ```
2
+ ██████╗ ██████╗ ██████╗ ██████╗ ████████╗ ██████╗
3
+ ██╔══██╗██╔══██╗██╔═══██╗██╔════╝ ╚══██╔══╝██╔════╝
4
+ ██████╔╝██████╔╝██║ ██║██║ ███╗ ██║ ██║
5
+ ██╔═══╝ ██╔══██╗██║ ██║██║ ██║ ██║ ██║
6
+ ██║ ██║ ██║╚██████╔╝╚██████╔╝ ██║ ╚██████╗
7
+ ╚═╝ ╚═╝ ╚═╝ ╚═════╝ ╚═════╝ ╚═╝ ╚═════╝
8
+ by capsa
9
+ ```
10
+
11
+ **Programmatic Tool Calling** — Let LLM-generated code call your tools, even from inside a sandbox.
12
+
13
+ ---
14
+
15
+ ## The Problem
16
+
17
+ You want an AI agent to write and execute Python code. Easy enough—spin up an [E2B](https://e2b.dev) sandbox and let it run. But what if that code needs to call your tools?
18
+
19
+ The code runs inside a sandbox. Your tools live outside. There's no bridge.
20
+
21
+ ## The Solution
22
+
23
+ **progtc** creates that bridge. It runs a lightweight server inside your sandbox that exposes your tools to the generated code. When the code calls a tool, the request streams back to your client, you execute it locally, and return the result—all transparently.
24
+
25
+
26
+ ## Installation
27
+
28
+ ```bash
29
+ pip install progtc
30
+ ```
31
+
32
+ Or with [uv](https://docs.astral.sh/uv/):
33
+
34
+ ```bash
35
+ uv add progtc
36
+ ```
37
+
38
+ ## Quick Start
39
+
40
+ ### 1. Start the Server (inside your sandbox)
41
+
42
+ ```bash
43
+ progtc serve --host 0.0.0.0 --port 8000 --api-key your-secret-key
44
+ ```
45
+
46
+ ### 2. Execute Code from Your Client
47
+
48
+ ```python
49
+ from progtc import AsyncProgtcClient
50
+
51
+ client = AsyncProgtcClient(
52
+ base_url="https://your-sandbox-url:8000",
53
+ api_key="your-secret-key",
54
+ )
55
+
56
+ # Define your tools as async functions
57
+ async def get_weather(city: str, country: str) -> str:
58
+ # Your actual implementation
59
+ return f"Weather in {city}, {country}: Sunny, 22°C"
60
+
61
+ async def search_database(query: str) -> list[dict]:
62
+ # Your actual implementation
63
+ return [{"id": 1, "name": "Result"}]
64
+
65
+ # Execute LLM-generated code that uses your tools
66
+ code = """
67
+ from tools import get_weather
68
+
69
+ weather = await get_weather("London", "UK")
70
+ print(f"The weather is: {weather}")
71
+ """
72
+
73
+ result = await client.execute_code(
74
+ code=code,
75
+ tool_call_handlers={
76
+ "get_weather": get_weather,
77
+ "search_database": search_database,
78
+ },
79
+ )
80
+
81
+ print(result.stdout) # "The weather is: Weather in London, UK: Sunny, 22°C"
82
+ ```
83
+
84
+ ## How It Works
85
+
86
+ 1. **Your client** sends code + a list of available tool names to the progtc server
87
+ 2. **The server** executes the code in an isolated process, injecting a `tools` module
88
+ 3. **When code calls a tool**, the server streams the call back to your client via SSE
89
+ 4. **Your client** executes the tool locally and sends the result back
90
+ 5. **The server** resumes code execution with the result
91
+ 6. **Stdout/stderr** are captured and streamed back when execution completes
92
+
93
+ ## Code Requirements
94
+
95
+ The LLM-generated code must:
96
+
97
+ - **Import tools from the `tools` module**: `from tools import my_tool`
98
+ - **Await all tool calls** (they're async)
99
+ - **Use `print()` for output** — stdout/stderr are captured and returned
100
+
101
+ ```python
102
+ from tools import get_weather, search_database
103
+ import asyncio
104
+
105
+ # Call tools like regular async functions
106
+ weather, results = await asyncio.gather(
107
+ get_weather("Tokyo", "Japan"),
108
+ search_database("hotels"),
109
+ )
110
+
111
+ print(f"Weather: {weather}")
112
+ print(f"Results: {results}")
113
+ ```
114
+
115
+ > **Note:** The code runs in a top-level async context, so you can use `await` directly without defining an async function.
116
+
117
+ ## CLI Options
118
+
119
+ ```bash
120
+ progtc serve [OPTIONS]
121
+ ```
122
+
123
+ | Option | Default | Description |
124
+ |--------|---------|-------------|
125
+ | `--host` | `127.0.0.1` | Host to bind to |
126
+ | `--port` | `8000` | Port to bind to |
127
+ | `--api-key` | (env: `PROGTC_API_KEY`) | API key for authentication |
128
+ | `--tool-call-timeout` | `10.0` | Timeout for individual tool calls (seconds) |
129
+ | `--code-execution-timeout` | `30.0` | Total timeout for code execution (seconds) |
130
+
131
+ ## Error Handling
132
+
133
+ The client returns a discriminated union—either success or one of several error types:
134
+
135
+ ```python
136
+ from progtc.types import MessageType
137
+
138
+ result = await client.execute_code(code, tool_call_handlers)
139
+
140
+ match result.type:
141
+ case MessageType.SUCCESS:
142
+ print(f"Stdout: {result.stdout}")
143
+ print(f"Stderr: {result.stderr}")
144
+ case MessageType.ERROR:
145
+ print(f"Error: {result.message}")
146
+ print(f"Code: {result.code}") # compilation, runtime, timeout, etc.
147
+ ```
148
+
149
+ Error codes:
150
+ - `code_compilation_error` — Code failed to compile/exec
151
+ - `code_runtime_error` — Exception raised during execution
152
+ - `code_timeout_error` — Execution exceeded timeout
153
+
154
+ ## Example: E2B + pydantic-ai
155
+
156
+ See [`examples/e2b-example/`](examples/e2b-example/) for a complete example using progtc with [E2B](https://e2b.dev) sandboxes and [pydantic-ai](https://ai.pydantic.dev) agents.
157
+
158
+ The example demonstrates an AI agent that can execute Python code in a secure sandbox while calling tools defined in your application.
159
+
160
+ ## License
161
+
162
+ MIT
163
+
164
+ ---
165
+
166
+ <p align="center">
167
+ <b>Building AI agents?</b> We're hiring: <a href="https://capsa.ai/careers">capsa.ai/careers</a>
168
+ </p>
@@ -0,0 +1,62 @@
1
+ [project]
2
+ name = "progtc"
3
+ version = "0.1.0"
4
+ description = "Add your description here"
5
+ readme = "README.md"
6
+ authors = [
7
+ { name = "Callum Downie", email = "70471360+calmdown13@users.noreply.github.com" },
8
+ ]
9
+ requires-python = ">=3.12"
10
+ dependencies = [
11
+ "pydantic>=2.12.5",
12
+ "httpx>=0.28.1",
13
+ "httpx-sse>=0.4.3",
14
+ ]
15
+
16
+ [project.optional-dependencies]
17
+ server = [
18
+ "fastapi>=0.123.10",
19
+ "rich>=14.2.0",
20
+ "typer>=0.20.0",
21
+ "uvicorn[standard]>=0.38.0",
22
+ ]
23
+
24
+ [project.scripts]
25
+ progtc = "progtc:main"
26
+
27
+ [build-system]
28
+ requires = ["hatchling"]
29
+ build-backend = "hatchling.build"
30
+
31
+ [dependency-groups]
32
+ dev = [
33
+ "progtc[server]",
34
+ "mypy>=1.19.0",
35
+ "pytest>=9.0.1",
36
+ "ruff>=0.14.8",
37
+ "pytest-asyncio>=1.3.0",
38
+ ]
39
+
40
+ [tool.mypy]
41
+ strict = true
42
+
43
+ [tool.ruff]
44
+ target-version = "py312"
45
+ line-length = 88
46
+
47
+ [tool.ruff.lint]
48
+ select = [
49
+ "E", # pycodestyle
50
+ "F", # Pyflakes
51
+ "UP", # pyupgrade
52
+ "B", # flake8-bugbear
53
+ "SIM", # flake8-simplify
54
+ "I", # isort
55
+ ]
56
+ ignore = ["B008"]
57
+
58
+ [tool.pytest.ini_options]
59
+ filterwarnings = [
60
+ "ignore:websockets.legacy is deprecated:DeprecationWarning",
61
+ "ignore:websockets.server.WebSocketServerProtocol is deprecated:DeprecationWarning",
62
+ ]
@@ -0,0 +1,4 @@
1
+ from progtc.cli import main
2
+ from progtc.client import AsyncProgtcClient
3
+
4
+ __all__ = ["AsyncProgtcClient", "main"]
@@ -0,0 +1,78 @@
1
+ from typing import Annotated
2
+
3
+ import typer
4
+ import uvicorn
5
+ from rich import print as rprint
6
+
7
+ from progtc.server.config import server_config
8
+
9
+ app = typer.Typer(
10
+ name="progtc",
11
+ help="progtc CLI",
12
+ add_completion=True,
13
+ )
14
+
15
+
16
+ @app.callback()
17
+ def callback() -> None:
18
+ """Progtc - programmatic tool calling"""
19
+
20
+
21
+ ARTWORK = """
22
+ [bright_cyan]
23
+ ██████╗ ██████╗ ██████╗ ██████╗ ████████╗ ██████╗
24
+ ██╔══██╗██╔══██╗██╔═══██╗██╔════╝ ╚══██╔══╝██╔════╝
25
+ ██████╔╝██████╔╝██║ ██║██║ ███╗ ██║ ██║
26
+ ██╔═══╝ ██╔══██╗██║ ██║██║ ██║ ██║ ██║
27
+ ██║ ██║ ██║╚██████╔╝╚██████╔╝ ██║ ╚██████╗
28
+ ╚═╝ ╚═╝ ╚═╝ ╚═════╝ ╚═════╝ ╚═╝ ╚═════╝
29
+ by capsa[/bright_cyan]
30
+ [dim]───────────────────────────────────────────────────────────[/dim]
31
+ [bright_white]Come build agents with us: https://capsa.ai/careers[/bright_white]
32
+ [dim]───────────────────────────────────────────────────────────[/dim]
33
+ """
34
+
35
+
36
+ @app.command()
37
+ def serve(
38
+ host: Annotated[str, typer.Option(help="Host to bind to")] = "127.0.0.1",
39
+ port: Annotated[int, typer.Option(help="Port to bind to")] = 8000,
40
+ api_key: Annotated[
41
+ str | None,
42
+ typer.Option(
43
+ help=(
44
+ "API key for authentication (can also be set via "
45
+ "PROGTC_API_KEY env var)"
46
+ ),
47
+ envvar="PROGTC_API_KEY",
48
+ ),
49
+ ] = None,
50
+ tool_call_timeout: Annotated[
51
+ float,
52
+ typer.Option(help="Timeout for tool calls in seconds"),
53
+ ] = 10.0,
54
+ code_execution_timeout: Annotated[
55
+ float,
56
+ typer.Option(help="Timeout for code execution in seconds"),
57
+ ] = 30.0,
58
+ ) -> None:
59
+ if api_key:
60
+ server_config.api_key = api_key
61
+ server_config.tool_call_timeout = tool_call_timeout
62
+ server_config.code_execution_timeout = code_execution_timeout
63
+
64
+ rprint(ARTWORK)
65
+
66
+ uvicorn.run(
67
+ "progtc.server:app",
68
+ host=host,
69
+ port=port,
70
+ )
71
+
72
+
73
+ def main() -> None:
74
+ app()
75
+
76
+
77
+ if __name__ == "__main__":
78
+ main()
@@ -0,0 +1,61 @@
1
+ from httpx import AsyncClient
2
+ from httpx_sse import aconnect_sse
3
+ from pydantic import TypeAdapter
4
+
5
+ from progtc.types import (
6
+ ExecuteCodeError,
7
+ ExecuteCodeMessage,
8
+ ExecuteCodeSuccess,
9
+ ToolCall,
10
+ ToolHandler,
11
+ )
12
+
13
+ ExecuteCodeMessageAdapter = TypeAdapter[ExecuteCodeMessage](ExecuteCodeMessage)
14
+
15
+
16
+ class AsyncProgtcClient:
17
+ def __init__(
18
+ self,
19
+ base_url: str,
20
+ api_key: str,
21
+ ):
22
+ self._base_url = base_url
23
+ self._api_key = api_key
24
+
25
+ async def execute_code(
26
+ self,
27
+ code: str,
28
+ tool_call_handlers: dict[str, ToolHandler],
29
+ ) -> ExecuteCodeSuccess | ExecuteCodeError:
30
+ async with (
31
+ AsyncClient(
32
+ base_url=self._base_url,
33
+ headers={"Authorization": f"Bearer {self._api_key}"},
34
+ ) as client,
35
+ aconnect_sse(
36
+ client,
37
+ "POST",
38
+ "/execute-code",
39
+ json={
40
+ "tool_names": list(tool_call_handlers.keys()),
41
+ "code": code,
42
+ },
43
+ ) as event_source,
44
+ ):
45
+ async for event in event_source.aiter_sse():
46
+ event_obj = ExecuteCodeMessageAdapter.validate_python(event.json())
47
+ if isinstance(event_obj, ToolCall):
48
+ handler = tool_call_handlers[event_obj.tool_name]
49
+ result = await handler(*event_obj.args, **event_obj.kwargs)
50
+ await client.post(
51
+ "/tool-results",
52
+ json={
53
+ "execution_id": event_obj.execution_id,
54
+ "tool_call_id": event_obj.id,
55
+ "result": result,
56
+ },
57
+ )
58
+ else:
59
+ return event_obj
60
+
61
+ raise RuntimeError("Stream ended without result")
File without changes
@@ -0,0 +1,63 @@
1
+ import secrets
2
+ import time
3
+ from collections.abc import Generator
4
+
5
+ from fastapi import Depends, FastAPI, HTTPException
6
+ from fastapi.responses import StreamingResponse
7
+ from fastapi.security import HTTPAuthorizationCredentials, HTTPBearer
8
+
9
+ from progtc.server.code_execution_manager import CodeExecutionManager
10
+ from progtc.server.config import server_config
11
+ from progtc.types import (
12
+ CodeTimeoutError,
13
+ ExecuteCodeRequest,
14
+ ToolCallResult,
15
+ )
16
+
17
+ code_execution_manager = CodeExecutionManager()
18
+
19
+
20
+ def authenticate(
21
+ credentials: HTTPAuthorizationCredentials = Depends(HTTPBearer()),
22
+ ) -> None:
23
+ if not secrets.compare_digest(credentials.credentials, server_config.api_key):
24
+ raise HTTPException(status_code=401, detail="Unauthorized")
25
+
26
+
27
+ app = FastAPI(dependencies=[Depends(authenticate)])
28
+
29
+
30
+ @app.post("/tool-results")
31
+ def add_tool_result(tool_call_result: ToolCallResult) -> None:
32
+ code_execution_manager.send_tool_result(tool_call_result)
33
+
34
+
35
+ @app.post("/execute-code")
36
+ def execute_code(body: ExecuteCodeRequest) -> StreamingResponse:
37
+ def stream() -> Generator[str, None, None]:
38
+ with code_execution_manager.run(body.code, body.tool_names) as process:
39
+ t0 = time.monotonic()
40
+
41
+ # Yield tool calls to the client until the process
42
+ # is done or the timeout is reached
43
+ while (
44
+ not process.is_done()
45
+ and time.monotonic() - t0 < server_config.code_execution_timeout
46
+ ):
47
+ tool_call = process.next_tool_call()
48
+ if tool_call is None:
49
+ continue
50
+ yield f"data: {tool_call.model_dump_json()}\n\n"
51
+
52
+ # Yield the final result to the client.
53
+ if process.is_done():
54
+ result = process.result()
55
+ else:
56
+ result = CodeTimeoutError(
57
+ message="Code execution timed out",
58
+ stdout="",
59
+ stderr="",
60
+ )
61
+ yield f"data: {result.model_dump_json()}\n\n"
62
+
63
+ return StreamingResponse(stream(), media_type="text/event-stream")