codex-autorunner 0.1.0__py3-none-any.whl → 0.1.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- codex_autorunner/agents/__init__.py +1 -0
- codex_autorunner/agents/base.py +62 -0
- codex_autorunner/agents/codex/__init__.py +5 -0
- codex_autorunner/agents/codex/harness.py +220 -0
- codex_autorunner/agents/opencode/__init__.py +14 -0
- codex_autorunner/agents/opencode/client.py +309 -0
- codex_autorunner/agents/opencode/events.py +67 -0
- codex_autorunner/agents/opencode/harness.py +212 -0
- codex_autorunner/agents/opencode/runtime.py +509 -0
- codex_autorunner/agents/opencode/supervisor.py +382 -0
- codex_autorunner/agents/types.py +42 -0
- codex_autorunner/bootstrap.py +29 -18
- codex_autorunner/cli.py +315 -107
- codex_autorunner/codex_cli.py +5 -0
- codex_autorunner/core/about_car.py +20 -7
- codex_autorunner/core/app_server_events.py +183 -0
- codex_autorunner/core/app_server_logging.py +169 -0
- codex_autorunner/core/app_server_prompts.py +376 -0
- codex_autorunner/core/app_server_threads.py +195 -0
- codex_autorunner/core/config.py +739 -65
- codex_autorunner/core/doc_chat.py +1210 -342
- codex_autorunner/core/docs.py +40 -0
- codex_autorunner/core/engine.py +1452 -73
- codex_autorunner/core/git_utils.py +28 -0
- codex_autorunner/core/hub.py +225 -94
- codex_autorunner/core/locks.py +117 -3
- codex_autorunner/core/logging_utils.py +5 -1
- codex_autorunner/core/optional_dependencies.py +7 -4
- codex_autorunner/core/patch_utils.py +224 -0
- codex_autorunner/core/prompt.py +4 -31
- codex_autorunner/core/request_context.py +18 -0
- codex_autorunner/core/runner_controller.py +12 -0
- codex_autorunner/core/snapshot.py +136 -132
- codex_autorunner/core/state.py +29 -25
- codex_autorunner/core/update.py +15 -1
- codex_autorunner/core/usage.py +663 -4
- codex_autorunner/core/utils.py +30 -5
- codex_autorunner/discovery.py +113 -30
- codex_autorunner/integrations/app_server/client.py +17 -0
- codex_autorunner/integrations/app_server/env.py +110 -0
- codex_autorunner/integrations/app_server/supervisor.py +1 -0
- codex_autorunner/integrations/github/chatops.py +268 -0
- codex_autorunner/integrations/github/pr_flow.py +1314 -0
- codex_autorunner/integrations/github/service.py +269 -1
- codex_autorunner/integrations/telegram/adapter.py +39 -0
- codex_autorunner/integrations/telegram/config.py +97 -1
- codex_autorunner/integrations/telegram/constants.py +14 -0
- codex_autorunner/integrations/telegram/dispatch.py +59 -36
- codex_autorunner/integrations/telegram/handlers/callbacks.py +8 -1
- codex_autorunner/integrations/telegram/handlers/commands.py +10 -12
- codex_autorunner/integrations/telegram/handlers/commands_runtime.py +2379 -165
- codex_autorunner/integrations/telegram/handlers/messages.py +144 -6
- codex_autorunner/integrations/telegram/handlers/selections.py +76 -0
- codex_autorunner/integrations/telegram/helpers.py +127 -119
- codex_autorunner/integrations/telegram/notifications.py +313 -8
- codex_autorunner/integrations/telegram/outbox.py +72 -51
- codex_autorunner/integrations/telegram/overflow.py +194 -0
- codex_autorunner/integrations/telegram/progress_stream.py +207 -0
- codex_autorunner/integrations/telegram/runtime.py +9 -7
- codex_autorunner/integrations/telegram/service.py +198 -2
- codex_autorunner/integrations/telegram/state.py +45 -0
- codex_autorunner/integrations/telegram/transport.py +89 -9
- codex_autorunner/manifest.py +48 -1
- codex_autorunner/routes/__init__.py +12 -0
- codex_autorunner/routes/agents.py +198 -0
- codex_autorunner/routes/app_server.py +132 -0
- codex_autorunner/routes/base.py +128 -30
- codex_autorunner/routes/docs.py +132 -26
- codex_autorunner/routes/github.py +136 -6
- codex_autorunner/routes/repos.py +59 -0
- codex_autorunner/routes/runs.py +118 -0
- codex_autorunner/routes/sessions.py +37 -6
- codex_autorunner/routes/settings.py +147 -0
- codex_autorunner/routes/shared.py +116 -7
- codex_autorunner/server.py +0 -7
- codex_autorunner/spec_ingest.py +754 -79
- codex_autorunner/static/agentControls.js +344 -0
- codex_autorunner/static/app.js +83 -78
- codex_autorunner/static/autoRefresh.js +118 -147
- codex_autorunner/static/bootstrap.js +117 -99
- codex_autorunner/static/bus.js +16 -17
- codex_autorunner/static/cache.js +26 -41
- codex_autorunner/static/constants.js +44 -45
- codex_autorunner/static/dashboard.js +676 -716
- codex_autorunner/static/docChatActions.js +279 -0
- codex_autorunner/static/docChatEvents.js +300 -0
- codex_autorunner/static/docChatRender.js +205 -0
- codex_autorunner/static/docChatStream.js +361 -0
- codex_autorunner/static/docs.js +18 -1512
- codex_autorunner/static/docsClipboard.js +69 -0
- codex_autorunner/static/docsCrud.js +257 -0
- codex_autorunner/static/docsDocUpdates.js +62 -0
- codex_autorunner/static/docsDrafts.js +16 -0
- codex_autorunner/static/docsElements.js +69 -0
- codex_autorunner/static/docsInit.js +274 -0
- codex_autorunner/static/docsParse.js +160 -0
- codex_autorunner/static/docsSnapshot.js +87 -0
- codex_autorunner/static/docsSpecIngest.js +263 -0
- codex_autorunner/static/docsState.js +127 -0
- codex_autorunner/static/docsThreadRegistry.js +44 -0
- codex_autorunner/static/docsUi.js +153 -0
- codex_autorunner/static/docsVoice.js +56 -0
- codex_autorunner/static/env.js +29 -79
- codex_autorunner/static/github.js +427 -153
- codex_autorunner/static/hub.js +1216 -1330
- codex_autorunner/static/index.html +341 -35
- codex_autorunner/static/loader.js +26 -26
- codex_autorunner/static/logs.js +560 -610
- codex_autorunner/static/mobileCompact.js +215 -263
- codex_autorunner/static/runs.js +409 -0
- codex_autorunner/static/settings.js +335 -0
- codex_autorunner/static/snapshot.js +104 -96
- codex_autorunner/static/state.js +68 -69
- codex_autorunner/static/styles.css +1680 -328
- codex_autorunner/static/tabs.js +34 -43
- codex_autorunner/static/terminal.js +6 -15
- codex_autorunner/static/terminalManager.js +3513 -3468
- codex_autorunner/static/todoPreview.js +25 -23
- codex_autorunner/static/utils.js +567 -534
- codex_autorunner/static/voice.js +488 -540
- codex_autorunner/web/app.py +836 -143
- codex_autorunner/web/middleware.py +6 -5
- codex_autorunner/web/schemas.py +119 -0
- codex_autorunner/web/static_assets.py +1 -0
- {codex_autorunner-0.1.0.dist-info → codex_autorunner-0.1.1.dist-info}/METADATA +15 -8
- codex_autorunner-0.1.1.dist-info/RECORD +191 -0
- codex_autorunner/static/types.d.ts +0 -8
- codex_autorunner-0.1.0.dist-info/RECORD +0 -147
- {codex_autorunner-0.1.0.dist-info → codex_autorunner-0.1.1.dist-info}/WHEEL +0 -0
- {codex_autorunner-0.1.0.dist-info → codex_autorunner-0.1.1.dist-info}/entry_points.txt +0 -0
- {codex_autorunner-0.1.0.dist-info → codex_autorunner-0.1.1.dist-info}/licenses/LICENSE +0 -0
- {codex_autorunner-0.1.0.dist-info → codex_autorunner-0.1.1.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1 @@
|
|
|
1
|
+
"""Agent harness abstractions."""
|
|
@@ -0,0 +1,62 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from pathlib import Path
|
|
4
|
+
from typing import Any, AsyncIterator, Optional, Protocol
|
|
5
|
+
|
|
6
|
+
from .types import ConversationRef, ModelCatalog, TurnRef
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class AgentHarness(Protocol):
|
|
10
|
+
agent_id: str
|
|
11
|
+
display_name: str
|
|
12
|
+
|
|
13
|
+
async def ensure_ready(self, workspace_root: Path) -> None: ...
|
|
14
|
+
|
|
15
|
+
async def model_catalog(self, workspace_root: Path) -> ModelCatalog: ...
|
|
16
|
+
|
|
17
|
+
async def new_conversation(
|
|
18
|
+
self, workspace_root: Path, title: Optional[str] = None
|
|
19
|
+
) -> ConversationRef: ...
|
|
20
|
+
|
|
21
|
+
async def list_conversations(
|
|
22
|
+
self, workspace_root: Path
|
|
23
|
+
) -> list[ConversationRef]: ...
|
|
24
|
+
|
|
25
|
+
async def resume_conversation(
|
|
26
|
+
self, workspace_root: Path, conversation_id: str
|
|
27
|
+
) -> ConversationRef: ...
|
|
28
|
+
|
|
29
|
+
async def start_turn(
|
|
30
|
+
self,
|
|
31
|
+
workspace_root: Path,
|
|
32
|
+
conversation_id: str,
|
|
33
|
+
prompt: str,
|
|
34
|
+
model: Optional[str],
|
|
35
|
+
reasoning: Optional[str],
|
|
36
|
+
*,
|
|
37
|
+
approval_mode: Optional[str],
|
|
38
|
+
sandbox_policy: Optional[Any],
|
|
39
|
+
) -> TurnRef: ...
|
|
40
|
+
|
|
41
|
+
async def start_review(
|
|
42
|
+
self,
|
|
43
|
+
workspace_root: Path,
|
|
44
|
+
conversation_id: str,
|
|
45
|
+
prompt: str,
|
|
46
|
+
model: Optional[str],
|
|
47
|
+
reasoning: Optional[str],
|
|
48
|
+
*,
|
|
49
|
+
approval_mode: Optional[str],
|
|
50
|
+
sandbox_policy: Optional[Any],
|
|
51
|
+
) -> TurnRef: ...
|
|
52
|
+
|
|
53
|
+
async def interrupt(
|
|
54
|
+
self, workspace_root: Path, conversation_id: str, turn_id: Optional[str]
|
|
55
|
+
) -> None: ...
|
|
56
|
+
|
|
57
|
+
def stream_events(
|
|
58
|
+
self, workspace_root: Path, conversation_id: str, turn_id: str
|
|
59
|
+
) -> AsyncIterator[str]: ...
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
__all__ = ["AgentHarness"]
|
|
@@ -0,0 +1,220 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from pathlib import Path
|
|
4
|
+
from typing import Any, AsyncIterator, Optional
|
|
5
|
+
|
|
6
|
+
from ...core.app_server_events import AppServerEventBuffer
|
|
7
|
+
from ...integrations.app_server.supervisor import WorkspaceAppServerSupervisor
|
|
8
|
+
from ..base import AgentHarness
|
|
9
|
+
from ..types import AgentId, ConversationRef, ModelCatalog, ModelSpec, TurnRef
|
|
10
|
+
|
|
11
|
+
_DEFAULT_REASONING_EFFORTS = ("none", "minimal", "low", "medium", "high", "xhigh")
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
def _coerce_entries(result: Any, keys: tuple[str, ...]) -> list[dict[str, Any]]:
|
|
15
|
+
if isinstance(result, list):
|
|
16
|
+
return [entry for entry in result if isinstance(entry, dict)]
|
|
17
|
+
if isinstance(result, dict):
|
|
18
|
+
for key in keys:
|
|
19
|
+
value = result.get(key)
|
|
20
|
+
if isinstance(value, list):
|
|
21
|
+
return [entry for entry in value if isinstance(entry, dict)]
|
|
22
|
+
return []
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
def _select_default_model(result: Any, entries: list[dict[str, Any]]) -> str:
|
|
26
|
+
if isinstance(result, dict):
|
|
27
|
+
for key in (
|
|
28
|
+
"defaultModel",
|
|
29
|
+
"default_model",
|
|
30
|
+
"default",
|
|
31
|
+
"model",
|
|
32
|
+
"modelId",
|
|
33
|
+
"model_id",
|
|
34
|
+
):
|
|
35
|
+
value = result.get(key)
|
|
36
|
+
if isinstance(value, str) and value:
|
|
37
|
+
return value
|
|
38
|
+
config = result.get("config")
|
|
39
|
+
if isinstance(config, dict):
|
|
40
|
+
for key in ("defaultModel", "default_model", "model", "modelId"):
|
|
41
|
+
value = config.get(key)
|
|
42
|
+
if isinstance(value, str) and value:
|
|
43
|
+
return value
|
|
44
|
+
for entry in entries:
|
|
45
|
+
if entry.get("default") or entry.get("isDefault"):
|
|
46
|
+
model_id = entry.get("model") or entry.get("id")
|
|
47
|
+
if isinstance(model_id, str) and model_id:
|
|
48
|
+
return model_id
|
|
49
|
+
for entry in entries:
|
|
50
|
+
model_id = entry.get("model") or entry.get("id")
|
|
51
|
+
if isinstance(model_id, str) and model_id:
|
|
52
|
+
return model_id
|
|
53
|
+
return ""
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
def _coerce_reasoning_efforts(entry: dict[str, Any]) -> list[str]:
|
|
57
|
+
efforts_raw = entry.get("supportedReasoningEfforts")
|
|
58
|
+
efforts: list[str] = []
|
|
59
|
+
if isinstance(efforts_raw, list):
|
|
60
|
+
for effort in efforts_raw:
|
|
61
|
+
if isinstance(effort, dict):
|
|
62
|
+
value = effort.get("reasoningEffort")
|
|
63
|
+
if isinstance(value, str):
|
|
64
|
+
efforts.append(value)
|
|
65
|
+
elif isinstance(effort, str):
|
|
66
|
+
efforts.append(effort)
|
|
67
|
+
default_effort = entry.get("defaultReasoningEffort")
|
|
68
|
+
if isinstance(default_effort, str) and default_effort:
|
|
69
|
+
efforts.append(default_effort)
|
|
70
|
+
if not efforts:
|
|
71
|
+
efforts = list(_DEFAULT_REASONING_EFFORTS)
|
|
72
|
+
return list(dict.fromkeys(efforts))
|
|
73
|
+
|
|
74
|
+
|
|
75
|
+
class CodexHarness(AgentHarness):
|
|
76
|
+
agent_id: AgentId = "codex"
|
|
77
|
+
display_name = "Codex"
|
|
78
|
+
|
|
79
|
+
def __init__(
|
|
80
|
+
self,
|
|
81
|
+
supervisor: WorkspaceAppServerSupervisor,
|
|
82
|
+
events: AppServerEventBuffer,
|
|
83
|
+
) -> None:
|
|
84
|
+
self._supervisor = supervisor
|
|
85
|
+
self._events = events
|
|
86
|
+
|
|
87
|
+
async def ensure_ready(self, workspace_root: Path) -> None:
|
|
88
|
+
await self._supervisor.get_client(workspace_root)
|
|
89
|
+
|
|
90
|
+
async def model_catalog(self, workspace_root: Path) -> ModelCatalog:
|
|
91
|
+
client = await self._supervisor.get_client(workspace_root)
|
|
92
|
+
result = await client.model_list()
|
|
93
|
+
entries = _coerce_entries(result, ("data", "models", "items", "results"))
|
|
94
|
+
models: list[ModelSpec] = []
|
|
95
|
+
for entry in entries:
|
|
96
|
+
model_id = entry.get("model") or entry.get("id")
|
|
97
|
+
if not isinstance(model_id, str) or not model_id:
|
|
98
|
+
continue
|
|
99
|
+
display_name = entry.get("displayName") or entry.get("name") or model_id
|
|
100
|
+
if not isinstance(display_name, str) or not display_name:
|
|
101
|
+
display_name = model_id
|
|
102
|
+
efforts = _coerce_reasoning_efforts(entry)
|
|
103
|
+
models.append(
|
|
104
|
+
ModelSpec(
|
|
105
|
+
id=model_id,
|
|
106
|
+
display_name=display_name,
|
|
107
|
+
supports_reasoning=bool(efforts),
|
|
108
|
+
reasoning_options=efforts,
|
|
109
|
+
)
|
|
110
|
+
)
|
|
111
|
+
default_model = _select_default_model(result, entries)
|
|
112
|
+
if not default_model and models:
|
|
113
|
+
default_model = models[0].id
|
|
114
|
+
return ModelCatalog(default_model=default_model, models=models)
|
|
115
|
+
|
|
116
|
+
async def new_conversation(
|
|
117
|
+
self, workspace_root: Path, title: Optional[str] = None
|
|
118
|
+
) -> ConversationRef:
|
|
119
|
+
client = await self._supervisor.get_client(workspace_root)
|
|
120
|
+
result = await client.thread_start(str(workspace_root))
|
|
121
|
+
thread_id = result.get("id")
|
|
122
|
+
if not isinstance(thread_id, str) or not thread_id:
|
|
123
|
+
raise ValueError("Codex app-server did not return a thread id")
|
|
124
|
+
return ConversationRef(agent=self.agent_id, id=thread_id)
|
|
125
|
+
|
|
126
|
+
async def list_conversations(self, workspace_root: Path) -> list[ConversationRef]:
|
|
127
|
+
client = await self._supervisor.get_client(workspace_root)
|
|
128
|
+
result = await client.thread_list()
|
|
129
|
+
entries = _coerce_entries(result, ("threads", "data", "items", "results"))
|
|
130
|
+
conversations: list[ConversationRef] = []
|
|
131
|
+
for entry in entries:
|
|
132
|
+
thread_id = entry.get("id")
|
|
133
|
+
if isinstance(thread_id, str) and thread_id:
|
|
134
|
+
conversations.append(ConversationRef(agent=self.agent_id, id=thread_id))
|
|
135
|
+
return conversations
|
|
136
|
+
|
|
137
|
+
async def resume_conversation(
|
|
138
|
+
self, workspace_root: Path, conversation_id: str
|
|
139
|
+
) -> ConversationRef:
|
|
140
|
+
client = await self._supervisor.get_client(workspace_root)
|
|
141
|
+
result = await client.thread_resume(conversation_id)
|
|
142
|
+
thread_id = result.get("id") or conversation_id
|
|
143
|
+
if not isinstance(thread_id, str) or not thread_id:
|
|
144
|
+
thread_id = conversation_id
|
|
145
|
+
return ConversationRef(agent=self.agent_id, id=thread_id)
|
|
146
|
+
|
|
147
|
+
async def start_turn(
|
|
148
|
+
self,
|
|
149
|
+
workspace_root: Path,
|
|
150
|
+
conversation_id: str,
|
|
151
|
+
prompt: str,
|
|
152
|
+
model: Optional[str],
|
|
153
|
+
reasoning: Optional[str],
|
|
154
|
+
*,
|
|
155
|
+
approval_mode: Optional[str],
|
|
156
|
+
sandbox_policy: Optional[Any],
|
|
157
|
+
) -> TurnRef:
|
|
158
|
+
client = await self._supervisor.get_client(workspace_root)
|
|
159
|
+
turn_kwargs: dict[str, Any] = {}
|
|
160
|
+
if model:
|
|
161
|
+
turn_kwargs["model"] = model
|
|
162
|
+
if reasoning:
|
|
163
|
+
turn_kwargs["effort"] = reasoning
|
|
164
|
+
handle = await client.turn_start(
|
|
165
|
+
conversation_id,
|
|
166
|
+
prompt,
|
|
167
|
+
approval_policy=approval_mode,
|
|
168
|
+
sandbox_policy=sandbox_policy,
|
|
169
|
+
**turn_kwargs,
|
|
170
|
+
)
|
|
171
|
+
await self._events.register_turn(handle.thread_id, handle.turn_id)
|
|
172
|
+
return TurnRef(conversation_id=handle.thread_id, turn_id=handle.turn_id)
|
|
173
|
+
|
|
174
|
+
async def start_review(
|
|
175
|
+
self,
|
|
176
|
+
workspace_root: Path,
|
|
177
|
+
conversation_id: str,
|
|
178
|
+
prompt: str,
|
|
179
|
+
model: Optional[str],
|
|
180
|
+
reasoning: Optional[str],
|
|
181
|
+
*,
|
|
182
|
+
approval_mode: Optional[str],
|
|
183
|
+
sandbox_policy: Optional[Any],
|
|
184
|
+
) -> TurnRef:
|
|
185
|
+
client = await self._supervisor.get_client(workspace_root)
|
|
186
|
+
review_kwargs: dict[str, Any] = {}
|
|
187
|
+
if model:
|
|
188
|
+
review_kwargs["model"] = model
|
|
189
|
+
if reasoning:
|
|
190
|
+
review_kwargs["effort"] = reasoning
|
|
191
|
+
instructions = (prompt or "").strip()
|
|
192
|
+
if instructions:
|
|
193
|
+
target = {"type": "custom", "instructions": instructions}
|
|
194
|
+
else:
|
|
195
|
+
target = {"type": "uncommittedChanges"}
|
|
196
|
+
handle = await client.review_start(
|
|
197
|
+
conversation_id,
|
|
198
|
+
target=target,
|
|
199
|
+
approval_policy=approval_mode,
|
|
200
|
+
sandbox_policy=sandbox_policy,
|
|
201
|
+
**review_kwargs,
|
|
202
|
+
)
|
|
203
|
+
await self._events.register_turn(handle.thread_id, handle.turn_id)
|
|
204
|
+
return TurnRef(conversation_id=handle.thread_id, turn_id=handle.turn_id)
|
|
205
|
+
|
|
206
|
+
async def interrupt(
|
|
207
|
+
self, workspace_root: Path, conversation_id: str, turn_id: Optional[str]
|
|
208
|
+
) -> None:
|
|
209
|
+
if not turn_id:
|
|
210
|
+
return
|
|
211
|
+
client = await self._supervisor.get_client(workspace_root)
|
|
212
|
+
await client.turn_interrupt(turn_id, thread_id=conversation_id)
|
|
213
|
+
|
|
214
|
+
def stream_events(
|
|
215
|
+
self, workspace_root: Path, conversation_id: str, turn_id: str
|
|
216
|
+
) -> AsyncIterator[str]:
|
|
217
|
+
return self._events.stream(conversation_id, turn_id)
|
|
218
|
+
|
|
219
|
+
|
|
220
|
+
__all__ = ["CodexHarness"]
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
"""OpenCode harness support."""
|
|
2
|
+
|
|
3
|
+
from .client import OpenCodeClient
|
|
4
|
+
from .events import SSEEvent, parse_sse_lines
|
|
5
|
+
from .harness import OpenCodeHarness
|
|
6
|
+
from .supervisor import OpenCodeSupervisor
|
|
7
|
+
|
|
8
|
+
__all__ = [
|
|
9
|
+
"OpenCodeClient",
|
|
10
|
+
"OpenCodeHarness",
|
|
11
|
+
"OpenCodeSupervisor",
|
|
12
|
+
"SSEEvent",
|
|
13
|
+
"parse_sse_lines",
|
|
14
|
+
]
|
|
@@ -0,0 +1,309 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
import logging
|
|
5
|
+
from typing import Any, AsyncIterator, Optional
|
|
6
|
+
|
|
7
|
+
import httpx
|
|
8
|
+
|
|
9
|
+
from ...core.logging_utils import log_event
|
|
10
|
+
from .events import SSEEvent, parse_sse_lines
|
|
11
|
+
|
|
12
|
+
_MAX_INVALID_JSON_PREVIEW_BYTES = 512
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class OpenCodeProtocolError(Exception):
|
|
16
|
+
def __init__(
|
|
17
|
+
self,
|
|
18
|
+
message: str,
|
|
19
|
+
*,
|
|
20
|
+
status_code: Optional[int] = None,
|
|
21
|
+
content_type: Optional[str] = None,
|
|
22
|
+
body_preview: Optional[str] = None,
|
|
23
|
+
) -> None:
|
|
24
|
+
super().__init__(message)
|
|
25
|
+
self.status_code = status_code
|
|
26
|
+
self.content_type = content_type
|
|
27
|
+
self.body_preview = body_preview
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
class OpenCodeClient:
|
|
31
|
+
def __init__(
|
|
32
|
+
self,
|
|
33
|
+
base_url: str,
|
|
34
|
+
*,
|
|
35
|
+
auth: Optional[tuple[str, str]] = None,
|
|
36
|
+
timeout: Optional[float] = None,
|
|
37
|
+
logger: Optional[logging.Logger] = None,
|
|
38
|
+
) -> None:
|
|
39
|
+
self._client = httpx.AsyncClient(
|
|
40
|
+
base_url=base_url,
|
|
41
|
+
auth=auth,
|
|
42
|
+
timeout=timeout,
|
|
43
|
+
)
|
|
44
|
+
self._logger = logger or logging.getLogger(__name__)
|
|
45
|
+
|
|
46
|
+
async def close(self) -> None:
|
|
47
|
+
await self._client.aclose()
|
|
48
|
+
|
|
49
|
+
def _dir_params(self, directory: Optional[str]) -> dict[str, str]:
|
|
50
|
+
return {"directory": directory} if directory else {}
|
|
51
|
+
|
|
52
|
+
async def _request(
|
|
53
|
+
self,
|
|
54
|
+
method: str,
|
|
55
|
+
path: str,
|
|
56
|
+
*,
|
|
57
|
+
params: Optional[dict[str, Any]] = None,
|
|
58
|
+
json_body: Optional[dict[str, Any]] = None,
|
|
59
|
+
expect_json: bool = True,
|
|
60
|
+
) -> Any:
|
|
61
|
+
response = await self._client.request(
|
|
62
|
+
method, path, params=params, json=json_body
|
|
63
|
+
)
|
|
64
|
+
response.raise_for_status()
|
|
65
|
+
raw = response.content
|
|
66
|
+
if not raw or not raw.strip():
|
|
67
|
+
return None
|
|
68
|
+
try:
|
|
69
|
+
return json.loads(raw)
|
|
70
|
+
except json.JSONDecodeError as exc:
|
|
71
|
+
self._log_invalid_json(
|
|
72
|
+
method,
|
|
73
|
+
path,
|
|
74
|
+
response,
|
|
75
|
+
raw,
|
|
76
|
+
expect_json=expect_json,
|
|
77
|
+
)
|
|
78
|
+
if expect_json:
|
|
79
|
+
preview = (
|
|
80
|
+
raw[:_MAX_INVALID_JSON_PREVIEW_BYTES]
|
|
81
|
+
.decode("utf-8", errors="replace")
|
|
82
|
+
.strip()
|
|
83
|
+
)
|
|
84
|
+
content_type = response.headers.get("content-type")
|
|
85
|
+
hint = ""
|
|
86
|
+
if content_type and "text/html" in content_type.lower():
|
|
87
|
+
hint = (
|
|
88
|
+
" Response looks like HTML; the OpenCode server may have "
|
|
89
|
+
"proxied the request instead of handling an API route."
|
|
90
|
+
)
|
|
91
|
+
elif preview.startswith("<"):
|
|
92
|
+
hint = (
|
|
93
|
+
" Response looks like HTML; check that the OpenCode API "
|
|
94
|
+
"endpoint is correct."
|
|
95
|
+
)
|
|
96
|
+
raise OpenCodeProtocolError(
|
|
97
|
+
f"OpenCode returned invalid JSON.{hint}",
|
|
98
|
+
status_code=response.status_code,
|
|
99
|
+
content_type=content_type,
|
|
100
|
+
body_preview=preview or None,
|
|
101
|
+
) from exc
|
|
102
|
+
return None
|
|
103
|
+
|
|
104
|
+
def _log_invalid_json(
|
|
105
|
+
self,
|
|
106
|
+
method: str,
|
|
107
|
+
path: str,
|
|
108
|
+
response: httpx.Response,
|
|
109
|
+
raw: bytes,
|
|
110
|
+
*,
|
|
111
|
+
expect_json: bool,
|
|
112
|
+
) -> None:
|
|
113
|
+
preview = raw[:_MAX_INVALID_JSON_PREVIEW_BYTES].decode(
|
|
114
|
+
"utf-8", errors="replace"
|
|
115
|
+
)
|
|
116
|
+
log_event(
|
|
117
|
+
self._logger,
|
|
118
|
+
logging.WARNING,
|
|
119
|
+
"opencode.response.invalid_json",
|
|
120
|
+
method=method,
|
|
121
|
+
path=path,
|
|
122
|
+
status_code=response.status_code,
|
|
123
|
+
content_length=len(raw),
|
|
124
|
+
content_type=response.headers.get("content-type"),
|
|
125
|
+
expect_json=expect_json,
|
|
126
|
+
preview=preview,
|
|
127
|
+
)
|
|
128
|
+
|
|
129
|
+
async def providers(self, directory: Optional[str] = None) -> Any:
|
|
130
|
+
return await self._request(
|
|
131
|
+
"GET",
|
|
132
|
+
"/config/providers",
|
|
133
|
+
params=self._dir_params(directory),
|
|
134
|
+
expect_json=True,
|
|
135
|
+
)
|
|
136
|
+
|
|
137
|
+
async def create_session(
|
|
138
|
+
self,
|
|
139
|
+
*,
|
|
140
|
+
title: Optional[str] = None,
|
|
141
|
+
directory: Optional[str] = None,
|
|
142
|
+
) -> Any:
|
|
143
|
+
payload: dict[str, Any] = {}
|
|
144
|
+
if title:
|
|
145
|
+
payload["title"] = title
|
|
146
|
+
if directory:
|
|
147
|
+
payload["directory"] = directory
|
|
148
|
+
return await self._request(
|
|
149
|
+
"POST", "/session", json_body=payload, expect_json=True
|
|
150
|
+
)
|
|
151
|
+
|
|
152
|
+
async def list_sessions(self, directory: Optional[str] = None) -> Any:
|
|
153
|
+
return await self._request(
|
|
154
|
+
"GET", "/session", params=self._dir_params(directory), expect_json=True
|
|
155
|
+
)
|
|
156
|
+
|
|
157
|
+
async def get_session(self, session_id: str) -> Any:
|
|
158
|
+
return await self._request("GET", f"/session/{session_id}", expect_json=True)
|
|
159
|
+
|
|
160
|
+
async def send_message(
|
|
161
|
+
self,
|
|
162
|
+
session_id: str,
|
|
163
|
+
*,
|
|
164
|
+
message: str,
|
|
165
|
+
agent: Optional[str] = None,
|
|
166
|
+
model: Optional[dict[str, str]] = None,
|
|
167
|
+
variant: Optional[str] = None,
|
|
168
|
+
) -> Any:
|
|
169
|
+
payload: dict[str, Any] = {
|
|
170
|
+
"parts": [{"type": "text", "text": message}],
|
|
171
|
+
}
|
|
172
|
+
if agent:
|
|
173
|
+
payload["agent"] = agent
|
|
174
|
+
if model:
|
|
175
|
+
payload["model"] = model
|
|
176
|
+
if variant:
|
|
177
|
+
payload["variant"] = variant
|
|
178
|
+
return await self._request(
|
|
179
|
+
"POST",
|
|
180
|
+
f"/session/{session_id}/message",
|
|
181
|
+
json_body=payload,
|
|
182
|
+
expect_json=False,
|
|
183
|
+
)
|
|
184
|
+
|
|
185
|
+
async def prompt(
|
|
186
|
+
self,
|
|
187
|
+
session_id: str,
|
|
188
|
+
*,
|
|
189
|
+
message: str,
|
|
190
|
+
agent: Optional[str] = None,
|
|
191
|
+
model: Optional[dict[str, str]] = None,
|
|
192
|
+
variant: Optional[str] = None,
|
|
193
|
+
) -> Any:
|
|
194
|
+
payload: dict[str, Any] = {
|
|
195
|
+
"parts": [{"type": "text", "text": message}],
|
|
196
|
+
}
|
|
197
|
+
if agent:
|
|
198
|
+
payload["agent"] = agent
|
|
199
|
+
if model:
|
|
200
|
+
payload["model"] = model
|
|
201
|
+
if variant:
|
|
202
|
+
payload["variant"] = variant
|
|
203
|
+
try:
|
|
204
|
+
return await self._request(
|
|
205
|
+
"POST",
|
|
206
|
+
f"/session/{session_id}/message",
|
|
207
|
+
json_body=payload,
|
|
208
|
+
expect_json=True,
|
|
209
|
+
)
|
|
210
|
+
except httpx.HTTPStatusError as exc:
|
|
211
|
+
if exc.response.status_code in (404, 405):
|
|
212
|
+
return await self._request(
|
|
213
|
+
"POST",
|
|
214
|
+
f"/session/{session_id}/prompt_async",
|
|
215
|
+
json_body=payload,
|
|
216
|
+
expect_json=False,
|
|
217
|
+
)
|
|
218
|
+
raise
|
|
219
|
+
|
|
220
|
+
async def send_command(
|
|
221
|
+
self,
|
|
222
|
+
session_id: str,
|
|
223
|
+
*,
|
|
224
|
+
command: str,
|
|
225
|
+
arguments: Optional[str] = None,
|
|
226
|
+
model: Optional[str] = None,
|
|
227
|
+
agent: Optional[str] = None,
|
|
228
|
+
) -> Any:
|
|
229
|
+
payload: dict[str, Any] = {
|
|
230
|
+
"command": command,
|
|
231
|
+
"arguments": arguments or "",
|
|
232
|
+
}
|
|
233
|
+
if model:
|
|
234
|
+
payload["model"] = model
|
|
235
|
+
if agent:
|
|
236
|
+
payload["agent"] = agent
|
|
237
|
+
return await self._request(
|
|
238
|
+
"POST",
|
|
239
|
+
f"/session/{session_id}/command",
|
|
240
|
+
json_body=payload,
|
|
241
|
+
expect_json=False,
|
|
242
|
+
)
|
|
243
|
+
|
|
244
|
+
async def summarize(
|
|
245
|
+
self,
|
|
246
|
+
session_id: str,
|
|
247
|
+
*,
|
|
248
|
+
provider_id: str,
|
|
249
|
+
model_id: str,
|
|
250
|
+
auto: Optional[bool] = None,
|
|
251
|
+
) -> Any:
|
|
252
|
+
payload: dict[str, Any] = {
|
|
253
|
+
"providerID": provider_id,
|
|
254
|
+
"modelID": model_id,
|
|
255
|
+
}
|
|
256
|
+
if auto is not None:
|
|
257
|
+
payload["auto"] = auto
|
|
258
|
+
return await self._request(
|
|
259
|
+
"POST",
|
|
260
|
+
f"/session/{session_id}/summarize",
|
|
261
|
+
json_body=payload,
|
|
262
|
+
expect_json=True,
|
|
263
|
+
)
|
|
264
|
+
|
|
265
|
+
async def respond_permission(
|
|
266
|
+
self,
|
|
267
|
+
*,
|
|
268
|
+
request_id: str,
|
|
269
|
+
reply: str,
|
|
270
|
+
message: Optional[str] = None,
|
|
271
|
+
) -> Any:
|
|
272
|
+
payload: dict[str, Any] = {"reply": reply}
|
|
273
|
+
if message:
|
|
274
|
+
payload["message"] = message
|
|
275
|
+
return await self._request(
|
|
276
|
+
"POST",
|
|
277
|
+
f"/permission/{request_id}/reply",
|
|
278
|
+
json_body=payload,
|
|
279
|
+
expect_json=False,
|
|
280
|
+
)
|
|
281
|
+
|
|
282
|
+
async def abort(self, session_id: str) -> Any:
|
|
283
|
+
return await self._request(
|
|
284
|
+
"POST", f"/session/{session_id}/abort", expect_json=False
|
|
285
|
+
)
|
|
286
|
+
|
|
287
|
+
async def stream_events(
|
|
288
|
+
self, *, directory: Optional[str] = None
|
|
289
|
+
) -> AsyncIterator[SSEEvent]:
|
|
290
|
+
params = self._dir_params(directory)
|
|
291
|
+
async with self._client.stream("GET", "/event", params=params) as response:
|
|
292
|
+
response.raise_for_status()
|
|
293
|
+
async for sse in parse_sse_lines(response.aiter_lines()):
|
|
294
|
+
event_type = sse.event
|
|
295
|
+
try:
|
|
296
|
+
payload = json.loads(sse.data) if sse.data else None
|
|
297
|
+
if isinstance(payload, dict) and "type" in payload:
|
|
298
|
+
event_type = str(payload["type"])
|
|
299
|
+
except (json.JSONDecodeError, TypeError):
|
|
300
|
+
pass
|
|
301
|
+
yield SSEEvent(
|
|
302
|
+
event=event_type,
|
|
303
|
+
data=sse.data,
|
|
304
|
+
id=sse.id,
|
|
305
|
+
retry=sse.retry,
|
|
306
|
+
)
|
|
307
|
+
|
|
308
|
+
|
|
309
|
+
__all__ = ["OpenCodeClient", "OpenCodeProtocolError"]
|
|
@@ -0,0 +1,67 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from dataclasses import dataclass
|
|
4
|
+
from typing import AsyncIterator, Optional
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
@dataclass(frozen=True)
|
|
8
|
+
class SSEEvent:
|
|
9
|
+
event: str
|
|
10
|
+
data: str
|
|
11
|
+
id: Optional[str] = None
|
|
12
|
+
retry: Optional[int] = None
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
async def parse_sse_lines(lines: AsyncIterator[str]) -> AsyncIterator[SSEEvent]:
|
|
16
|
+
event_name = "message"
|
|
17
|
+
data_lines: list[str] = []
|
|
18
|
+
event_id: Optional[str] = None
|
|
19
|
+
retry_value: Optional[int] = None
|
|
20
|
+
|
|
21
|
+
async for line in lines:
|
|
22
|
+
if not line:
|
|
23
|
+
if data_lines or event_id is not None or retry_value is not None:
|
|
24
|
+
yield SSEEvent(
|
|
25
|
+
event=event_name or "message",
|
|
26
|
+
data="\n".join(data_lines),
|
|
27
|
+
id=event_id,
|
|
28
|
+
retry=retry_value,
|
|
29
|
+
)
|
|
30
|
+
event_name = "message"
|
|
31
|
+
data_lines = []
|
|
32
|
+
event_id = None
|
|
33
|
+
retry_value = None
|
|
34
|
+
continue
|
|
35
|
+
|
|
36
|
+
if line.startswith(":"):
|
|
37
|
+
continue
|
|
38
|
+
|
|
39
|
+
if ":" in line:
|
|
40
|
+
field, value = line.split(":", 1)
|
|
41
|
+
if value.startswith(" "):
|
|
42
|
+
value = value[1:]
|
|
43
|
+
else:
|
|
44
|
+
field, value = line, ""
|
|
45
|
+
|
|
46
|
+
if field == "event":
|
|
47
|
+
event_name = value
|
|
48
|
+
elif field == "data":
|
|
49
|
+
data_lines.append(value)
|
|
50
|
+
elif field == "id":
|
|
51
|
+
event_id = value
|
|
52
|
+
elif field == "retry":
|
|
53
|
+
try:
|
|
54
|
+
retry_value = int(value)
|
|
55
|
+
except ValueError:
|
|
56
|
+
retry_value = None
|
|
57
|
+
|
|
58
|
+
if data_lines or event_id is not None or retry_value is not None:
|
|
59
|
+
yield SSEEvent(
|
|
60
|
+
event=event_name or "message",
|
|
61
|
+
data="\n".join(data_lines),
|
|
62
|
+
id=event_id,
|
|
63
|
+
retry=retry_value,
|
|
64
|
+
)
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
__all__ = ["SSEEvent", "parse_sse_lines"]
|