opencode-agent 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,293 @@
1
+ from __future__ import annotations
2
+
3
+ import logging
4
+ import uuid
5
+ from concurrent.futures import ThreadPoolExecutor
6
+ from dataclasses import dataclass
7
+ from typing import Any
8
+
9
+ from .client import OpenCodeClient
10
+ from .config import OpenCodeConfig
11
+ from .logging import log_event
12
+ from .managers import MCPManager, SessionManager, ToolManager
13
+ from .models import ExecutionMetadata, ExecutionResult, MultiAgentResult, StreamEvent, SubtaskTrace
14
+
15
+
16
+ @dataclass(slots=True)
17
+ class PlannedStep:
18
+ name: str
19
+ agent: str
20
+ prompt_template: str
21
+ fork_session: bool = True
22
+
23
+
24
+ class TaskPlanner:
25
+ def __init__(self, config: OpenCodeConfig | None = None):
26
+ self.config = config or OpenCodeConfig.from_env()
27
+
28
+ def plan(self, prompt: str, strategy: str = "plan-explore-build-review") -> list[PlannedStep]:
29
+ if strategy == "coordinator-specialists-synthesizer":
30
+ return [
31
+ PlannedStep("coordinator", "plan", "Define the execution approach for this task and call out major workstreams:\n\n{prompt}", False),
32
+ PlannedStep("specialist-build", "build", "Implement the most important technical solution for this task:\n\n{prompt}"),
33
+ PlannedStep("specialist-explore", "explore", "Inspect risks, dependencies, and supporting context for this task:\n\n{prompt}"),
34
+ PlannedStep("synthesizer", "build", "Synthesize the specialist outputs into one actionable final result for:\n\n{prompt}", False),
35
+ ]
36
+ return [
37
+ PlannedStep("plan", "plan", "Create a concise execution plan for this task:\n\n{prompt}", False),
38
+ PlannedStep("explore", "explore", "Explore the workspace and relevant context for this task:\n\n{prompt}"),
39
+ PlannedStep("build", "build", "Execute the implementation work for this task:\n\n{prompt}"),
40
+ PlannedStep("review", "plan", "Review the implementation critically and summarize remaining risks for:\n\n{prompt}", False),
41
+ ]
42
+
43
+
44
+ class EventStreamer:
45
+ def __init__(self, client: OpenCodeClient):
46
+ self.client = client
47
+
48
+ def stream(self, limit: int | None = None) -> list[StreamEvent]:
49
+ events: list[StreamEvent] = []
50
+ for event in self.client.iter_events():
51
+ events.append(event)
52
+ if limit is not None and len(events) >= limit:
53
+ break
54
+ return events
55
+
56
+
57
+ class SingleAgentRunner:
58
+ def __init__(
59
+ self,
60
+ client: OpenCodeClient | None = None,
61
+ config: OpenCodeConfig | None = None,
62
+ session_manager: SessionManager | None = None,
63
+ tool_manager: ToolManager | None = None,
64
+ mcp_manager: MCPManager | None = None,
65
+ ):
66
+ self.config = config or OpenCodeConfig.from_env()
67
+ self.client = client or OpenCodeClient(self.config)
68
+ self.session_manager = session_manager or SessionManager(self.client)
69
+ self.tool_manager = tool_manager or ToolManager(self.client)
70
+ self.mcp_manager = mcp_manager or MCPManager(self.client)
71
+ self.logger = logging.getLogger(__name__)
72
+
73
+ def _scoped_prompt(self, prompt: str) -> str:
74
+ """
75
+ Prepend project path context so the OpenCode server agent knows which
76
+ directory to operate in. All file/terminal operations are executed on
77
+ the OpenCode server—not on the Python client machine.
78
+ """
79
+ lines = [
80
+ f"Project path (on the OpenCode server): {self.config.project_path}",
81
+ "All file reads/writes, terminal commands, and tool calls must target",
82
+ "this path on the OpenCode server. Do NOT reference paths on the client.",
83
+ "",
84
+ prompt,
85
+ ]
86
+ return "\n".join(lines)
87
+
88
+ def run(
89
+ self,
90
+ prompt: str,
91
+ *,
92
+ agent: str | None = None,
93
+ model: str | None = None,
94
+ session_id: str | None = None,
95
+ title: str | None = None,
96
+ tools: list[str] | None = None,
97
+ mcp_servers: list[str] | None = None,
98
+ streaming: bool | None = None,
99
+ system: str | None = None,
100
+ attachments: list[str] | None = None,
101
+ ) -> ExecutionResult:
102
+ task_id = f"task_{uuid.uuid4().hex[:12]}"
103
+ session = self.session_manager.reuse_or_create(task_id, session_id=session_id, title=title or prompt[:80])
104
+ requested_tools = tools if tools is not None else self.config.default_tools
105
+ resolved_tools = self.tool_manager.resolve_allowlist(requested_tools) if requested_tools else None
106
+ logs: list[dict[str, Any]] = []
107
+
108
+ if mcp_servers:
109
+ servers = self.mcp_manager.ensure_servers(mcp_servers, self.config.mcp_servers)
110
+ logs.append({"event": "mcp.ready", "servers": [server.name for server in servers]})
111
+
112
+ streaming_enabled = streaming if streaming is not None else self.config.streaming
113
+ if streaming_enabled:
114
+ logs.append({"event": "streaming.enabled", "session_id": session.id})
115
+
116
+ selected_agent = agent or self.config.default_agent
117
+ log_event(self.logger, "task.run", task_id=task_id, session_id=session.id, agent=selected_agent)
118
+ scoped_prompt = self._scoped_prompt(prompt)
119
+ message = self.client.send_message(
120
+ session.id,
121
+ scoped_prompt,
122
+ agent=selected_agent,
123
+ model=model or self.config.default_model,
124
+ tools=resolved_tools,
125
+ system=system,
126
+ attachments=attachments,
127
+ )
128
+ logs.append({"event": "message.completed", "message_id": message.info.id})
129
+ metadata = ExecutionMetadata(
130
+ task_id=task_id,
131
+ session_id=session.id,
132
+ agent=selected_agent,
133
+ status="completed",
134
+ message_id=message.info.id,
135
+ tool_events=message.tool_events,
136
+ logs=logs,
137
+ )
138
+ return ExecutionResult(output=message.text_output, metadata=metadata, message=message)
139
+
140
+ def run_async(
141
+ self,
142
+ prompt: str,
143
+ *,
144
+ agent: str | None = None,
145
+ model: str | None = None,
146
+ session_id: str | None = None,
147
+ title: str | None = None,
148
+ tools: list[str] | None = None,
149
+ ) -> ExecutionMetadata:
150
+ task_id = f"task_{uuid.uuid4().hex[:12]}"
151
+ session = self.session_manager.reuse_or_create(task_id, session_id=session_id, title=title or prompt[:80])
152
+ requested_tools = tools if tools is not None else self.config.default_tools
153
+ resolved_tools = self.tool_manager.resolve_allowlist(requested_tools) if requested_tools else None
154
+ selected_agent = agent or self.config.default_agent
155
+ scoped_prompt = self._scoped_prompt(prompt)
156
+ self.client.prompt_async(
157
+ session.id,
158
+ scoped_prompt,
159
+ agent=selected_agent,
160
+ model=model or self.config.default_model,
161
+ tools=resolved_tools,
162
+ )
163
+ return ExecutionMetadata(task_id=task_id, session_id=session.id, agent=selected_agent, status="submitted")
164
+
165
+ def run_shell(
166
+ self,
167
+ command: str,
168
+ *,
169
+ session_id: str,
170
+ agent: str | None = None,
171
+ model: str | None = None,
172
+ ) -> ExecutionResult:
173
+ # Send the command directly to the OpenCode server shell.
174
+ # The server session already has the correct working directory.
175
+ # We intentionally do NOT prepend `cd {client_path}` because that
176
+ # would inject a client-side filesystem path into a server-side shell.
177
+ message = self.client.run_shell(
178
+ session_id,
179
+ command,
180
+ agent=agent or self.config.default_agent,
181
+ model=model or self.config.default_model,
182
+ )
183
+ metadata = ExecutionMetadata(
184
+ task_id=f"shell_{uuid.uuid4().hex[:12]}",
185
+ session_id=session_id,
186
+ agent=agent or self.config.default_agent,
187
+ status="completed",
188
+ message_id=message.info.id,
189
+ tool_events=message.tool_events,
190
+ logs=[{"event": "shell.completed", "command": command}],
191
+ )
192
+ return ExecutionResult(output=message.text_output, metadata=metadata, message=message)
193
+
194
+
195
+ class ResultSynthesizer:
196
+ def synthesize(self, prompt: str, traces: list[SubtaskTrace]) -> str:
197
+ sections = [f"Parent objective: {prompt}", "Subtask results:"]
198
+ for trace in traces:
199
+ sections.append(f"[{trace.agent}] {trace.output}".strip())
200
+ return "\n\n".join(sections)
201
+
202
+
203
+ class MultiAgentRunner:
204
+ def __init__(
205
+ self,
206
+ client: OpenCodeClient | None = None,
207
+ config: OpenCodeConfig | None = None,
208
+ session_manager: SessionManager | None = None,
209
+ tool_manager: ToolManager | None = None,
210
+ mcp_manager: MCPManager | None = None,
211
+ planner: TaskPlanner | None = None,
212
+ synthesizer: ResultSynthesizer | None = None,
213
+ ):
214
+ self.config = config or OpenCodeConfig.from_env()
215
+ self.client = client or OpenCodeClient(self.config)
216
+ self.session_manager = session_manager or SessionManager(self.client)
217
+ self.tool_manager = tool_manager or ToolManager(self.client)
218
+ self.mcp_manager = mcp_manager or MCPManager(self.client)
219
+ self.planner = planner or TaskPlanner(self.config)
220
+ self.synthesizer = synthesizer or ResultSynthesizer()
221
+ self.single_runner = SingleAgentRunner(
222
+ client=self.client,
223
+ config=self.config,
224
+ session_manager=self.session_manager,
225
+ tool_manager=self.tool_manager,
226
+ mcp_manager=self.mcp_manager,
227
+ )
228
+
229
+ def run(
230
+ self,
231
+ prompt: str,
232
+ *,
233
+ strategy: str = "plan-explore-build-review",
234
+ session_id: str | None = None,
235
+ title: str | None = None,
236
+ tools: list[str] | None = None,
237
+ mcp_servers: list[str] | None = None,
238
+ parallel: bool = False,
239
+ ) -> MultiAgentResult:
240
+ parent_task_id = f"task_{uuid.uuid4().hex[:12]}"
241
+ parent_session = self.session_manager.reuse_or_create(parent_task_id, session_id=session_id, title=title or prompt[:80])
242
+ if mcp_servers:
243
+ self.mcp_manager.ensure_servers(mcp_servers, self.config.mcp_servers)
244
+
245
+ steps = self.planner.plan(prompt, strategy=strategy)
246
+ traces: list[SubtaskTrace] = []
247
+
248
+ def execute_step(index: int, step: PlannedStep) -> SubtaskTrace:
249
+ child_task_id = f"{parent_task_id}_{index}_{step.name}"
250
+ child_session = parent_session
251
+ if step.fork_session:
252
+ child_session = self.session_manager.fork(parent_task_id, child_task_id)
253
+ else:
254
+ self.session_manager.link_existing(
255
+ child_task_id,
256
+ session_id=parent_session.id,
257
+ title=parent_session.title,
258
+ )
259
+ result = self.single_runner.run(
260
+ step.prompt_template.format(prompt=prompt),
261
+ agent=step.agent,
262
+ session_id=child_session.id,
263
+ tools=tools,
264
+ title=f"{step.name}: {prompt[:48]}",
265
+ )
266
+ return SubtaskTrace(
267
+ task_id=result.metadata.task_id,
268
+ session_id=result.metadata.session_id,
269
+ agent=result.metadata.agent,
270
+ prompt=step.prompt_template.format(prompt=prompt),
271
+ output=result.output,
272
+ message_id=result.metadata.message_id,
273
+ status=result.metadata.status,
274
+ )
275
+
276
+ if parallel:
277
+ with ThreadPoolExecutor(max_workers=min(4, len(steps))) as executor:
278
+ futures = [executor.submit(execute_step, index, step) for index, step in enumerate(steps, start=1)]
279
+ for future in futures:
280
+ traces.append(future.result())
281
+ else:
282
+ for index, step in enumerate(steps, start=1):
283
+ traces.append(execute_step(index, step))
284
+
285
+ output = self.synthesizer.synthesize(prompt, traces)
286
+ metadata = ExecutionMetadata(
287
+ task_id=parent_task_id,
288
+ session_id=parent_session.id,
289
+ agent="multi-agent",
290
+ status="completed",
291
+ logs=[{"event": "multi-agent.completed", "strategy": strategy, "subtasks": len(traces)}],
292
+ )
293
+ return MultiAgentResult(output=output, metadata=metadata, trace=traces)
@@ -0,0 +1,209 @@
1
+ Metadata-Version: 2.4
2
+ Name: opencode-agent
3
+ Version: 0.1.0
4
+ Summary: Python orchestration layer for the OpenCode Agent API
5
+ License-Expression: MIT
6
+ Keywords: opencode,agent,ai,orchestration,llm
7
+ Classifier: Development Status :: 3 - Alpha
8
+ Classifier: Intended Audience :: Developers
9
+ Classifier: Programming Language :: Python :: 3
10
+ Classifier: Programming Language :: Python :: 3.11
11
+ Classifier: Programming Language :: Python :: 3.12
12
+ Classifier: Topic :: Software Development :: Libraries
13
+ Requires-Python: >=3.11
14
+ Description-Content-Type: text/markdown
15
+ Requires-Dist: httpx<1.0,>=0.27
16
+
17
+ # OpenCode Orchestrator
18
+
19
+ `opencode-orchestrator` is a Python 3.11+ client wrapper for the OpenCode server. The main API is a simple `Agent` class; defaults come from `.env`, MCP definitions come from `mcp.json`, and file or shell work runs on the OpenCode server inside the configured project path.
20
+
21
+ ## Features
22
+
23
+ - Defaults loaded from `.env`
24
+ - MCP server definitions loaded from `mcp.json`
25
+ - Required `OPENCODE_PROJECT_PATH` for project-scoped file and shell operations
26
+ - Simple `Agent` wrapper for server-side execution
27
+ - File, terminal, tool, streaming, and MCP usage through the OpenCode server
28
+ - Optional lower-level client and runner APIs if needed
29
+ - Shell execution via `POST /session/:id/shell`
30
+ - Optional SSE event streaming via `GET /event`
31
+ - Live examples for running against a real OpenCode server
32
+
33
+ ## Install
34
+
35
+ ```bash
36
+ python -m venv .venv
37
+ source .venv/bin/activate
38
+ pip install -e .
39
+ ```
40
+
41
+ ## Configuration
42
+
43
+ Load configuration from `.env`. MCP server definitions are loaded from `mcp.json`.
44
+
45
+ The included `.env` file sets:
46
+
47
+ ```bash
48
+ OPENCODE_SERVER=http://localhost:4096
49
+ OPENCODE_PROJECT_PATH=/absolute/path/to/project
50
+ OPENCODE_MODEL=openai/gpt-5.4-mini
51
+ ```
52
+
53
+ `OPENCODE_PROJECT_PATH` is required. The client uses it as the default scope for file calls and prefixes shell commands with `cd <project_path> && ...` so the code can be run from any local directory while still operating inside the intended OpenCode project.
54
+
55
+ ## Minimal Usage
56
+
57
+ ```python
58
+ from opencode_orchestrator import Agent
59
+
60
+ agent = Agent(
61
+ name="MainAgent",
62
+ system_prompt="You are a highly capable AI assistant that uses the OpenCode server.",
63
+ )
64
+
65
+ reply = agent.run(
66
+ user_prompt="Who are you and what time is it? Do not use tools."
67
+ )
68
+ print(reply)
69
+
70
+ agent.close()
71
+ ```
72
+
73
+ If you want tool access and full metadata instead of only the text response:
74
+
75
+ ```python
76
+ from opencode_orchestrator import Agent
77
+
78
+ agent = Agent(agent="explore", tools=["glob", "read"])
79
+ result = agent.run("Summarize this project.", return_result=True)
80
+ print(result.output)
81
+ print(result.metadata.session_id)
82
+ agent.close()
83
+ ```
84
+
85
+ Terminal execution is also minimal:
86
+
87
+ ```python
88
+ from opencode_orchestrator import Agent
89
+
90
+ agent = Agent(name="TerminalAgent")
91
+ result = agent.run_shell("pwd && ls", return_result=True)
92
+ print(result.metadata.tool_events)
93
+ agent.close()
94
+ ```
95
+
96
+ Available models can be listed from the already usable provider config only:
97
+
98
+ ```python
99
+ from opencode_orchestrator import Agent
100
+
101
+ agent = Agent()
102
+ print(agent.list_models())
103
+ agent.close()
104
+ ```
105
+
106
+ You can also pass a custom model directly, either as the default for the agent or per call:
107
+
108
+ ```python
109
+ from opencode_orchestrator import Agent
110
+
111
+ agent = Agent(model="openai/gpt-5.4")
112
+ reply = agent.run("Reply in one line.")
113
+
114
+ other = agent.run(
115
+ "Reply in one line with a different model.",
116
+ model="openai/gpt-5.4-mini",
117
+ )
118
+
119
+ agent.close()
120
+ ```
121
+
122
+ ## Live Examples
123
+
124
+ These examples are intended to run against a real OpenCode server at `http://localhost:4096`.
125
+
126
+ Simple usage:
127
+
128
+ ```bash
129
+ .venv/bin/python examples/simple_usage.py
130
+ ```
131
+
132
+ Tool usage:
133
+
134
+ ```bash
135
+ .venv/bin/python examples/tool_usage.py
136
+ ```
137
+
138
+ Multiple agents:
139
+
140
+ ```bash
141
+ .venv/bin/python examples/multi_agent_usage.py
142
+ ```
143
+
144
+ File operations:
145
+
146
+ ```bash
147
+ .venv/bin/python examples/file_operations.py
148
+ ```
149
+
150
+ Terminal operations:
151
+
152
+ ```bash
153
+ .venv/bin/python examples/terminal_operations.py
154
+ ```
155
+
156
+ Streaming:
157
+
158
+ ```bash
159
+ .venv/bin/python examples/streaming_usage.py
160
+ ```
161
+
162
+ ## CLI Usage
163
+
164
+ Run a single task:
165
+
166
+ ```bash
167
+ opencode-orchestrator run "Create a release checklist" --agent plan
168
+ ```
169
+
170
+ Run a multi-agent task:
171
+
172
+ ```bash
173
+ opencode-orchestrator orchestrate "Implement and review a feature" --parallel
174
+ ```
175
+
176
+ Inspect sessions:
177
+
178
+ ```bash
179
+ opencode-orchestrator sessions list
180
+ opencode-orchestrator sessions status
181
+ opencode-orchestrator sessions abort ses_123
182
+ ```
183
+
184
+ Inspect tools and MCP servers:
185
+
186
+ ```bash
187
+ opencode-orchestrator tools --ids
188
+ opencode-orchestrator mcp list
189
+ ```
190
+
191
+ Run a shell command through OpenCode:
192
+
193
+ ```bash
194
+ opencode-orchestrator shell ses_123 "pytest -q"
195
+ ```
196
+
197
+ Read a few streaming events:
198
+
199
+ ```bash
200
+ opencode-orchestrator stream --limit 5
201
+ ```
202
+
203
+ ## Notes
204
+
205
+ - Defaults are taken from `.env` unless you override fields directly in `Agent(...)` or `OpenCodeConfig`.
206
+ - MCP registration settings come from `mcp.json`.
207
+ - Tool allowlists are filtered against the live tool IDs reported by the server.
208
+ - `Agent.run()` returns plain text by default so the common case stays minimal.
209
+ - `Agent.list_models()` filters the server provider config down to models that are directly usable with the currently configured credentials.
@@ -0,0 +1,15 @@
1
+ opencode_agent/__init__.py,sha256=KuVnubVdN-SE9AqcCqM9T7ayePSQ74oBovqjYWlmVWI,590
2
+ opencode_agent/agent.py,sha256=z4zEPiCTVXRoBVj3Z1PZ0YYUD_-UumZ9MQmQVRMiMG8,4637
3
+ opencode_agent/cli.py,sha256=RFGEX26snm3mU19J4LsoQgmz47OKbRL87ljbstEbxbs,7835
4
+ opencode_agent/client.py,sha256=nPZLkKa6O0oihxl9BWCxWYJY3s3iRtQcF1AUA-v8GW8,13048
5
+ opencode_agent/config.py,sha256=Xzu3faOd6M8Kf3Uc8B2EM3WmGc8V8aZDmhofShAOmFQ,4221
6
+ opencode_agent/exceptions.py,sha256=kAaHM6cot9lFol5OrEG5wG1NQhZZFtNrJ5uOlcUk-14,784
7
+ opencode_agent/logging.py,sha256=1yrzN3QU_iQWfhK9M2_q0PWUxIHjtWsZagpMagm3unk,408
8
+ opencode_agent/managers.py,sha256=NF3a4CdGwYS51R_bFoXxi8Nq-v77WEHTcspZo4V4X7o,6184
9
+ opencode_agent/models.py,sha256=K1yeYTslWdHK0TBsoiNqoZAmLiGQ2c6d1kuDUWgqm0g,3163
10
+ opencode_agent/orchestrator.py,sha256=uSsMDmB3dbDlWHuBPgNNI3cpc6N5iXh_O80pg43Hqpc,12385
11
+ opencode_agent-0.1.0.dist-info/METADATA,sha256=-TsGuZjp0nhygno0p9FMjKItWH-jGXGvOp0Wvf0KwZs,5129
12
+ opencode_agent-0.1.0.dist-info/WHEEL,sha256=aeYiig01lYGDzBgS8HxWXOg3uV61G9ijOsup-k9o1sk,91
13
+ opencode_agent-0.1.0.dist-info/entry_points.txt,sha256=SIrL2cWr0YQePT9AwQ0qsIU-wniX1BP8QZo8NJi-X_A,59
14
+ opencode_agent-0.1.0.dist-info/top_level.txt,sha256=wkQW9W5NQ66pKpNwD3D9bhlPZxRToDX_IXrIdz2ss08,15
15
+ opencode_agent-0.1.0.dist-info/RECORD,,
@@ -0,0 +1,5 @@
1
+ Wheel-Version: 1.0
2
+ Generator: setuptools (82.0.1)
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
5
+
@@ -0,0 +1,2 @@
1
+ [console_scripts]
2
+ opencode-agent = opencode_agent.cli:main
@@ -0,0 +1 @@
1
+ opencode_agent