autopilot-code 1.0.0 → 2.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +1 -3
- package/package.json +1 -1
- package/scripts/issue_runner/agents/__init__.py +10 -2
- package/scripts/issue_runner/agents/opencode_client.py +486 -0
- package/scripts/issue_runner/agents/opencode_server.py +247 -0
- package/scripts/issue_runner/agents/test_opencode_server.py +321 -0
- package/scripts/issue_runner/runner.py +11 -0
- package/scripts/run_autopilot.py +293 -149
- package/templates/autopilot.json +0 -1
- package/scripts/run_opencode_issue.sh +0 -690
package/README.md
CHANGED
|
@@ -43,7 +43,6 @@ The new runner provides enhanced progress tracking and session continuity:
|
|
|
43
43
|
|
|
44
44
|
```json
|
|
45
45
|
{
|
|
46
|
-
"useNewRunner": true,
|
|
47
46
|
"enablePlanningStep": true
|
|
48
47
|
}
|
|
49
48
|
```
|
|
@@ -104,7 +103,6 @@ Example:
|
|
|
104
103
|
Notes:
|
|
105
104
|
- `repo` must be the GitHub `owner/name`.
|
|
106
105
|
- `agent` (optional, default `"opencode"`): set to `"opencode"` or `"claude"` to choose which coding agent to use.
|
|
107
|
-
- `useNewRunner` (optional, default `true`): enable the new runner with step labels and session continuity. The legacy bash runner is deprecated and will be removed in a future version.
|
|
108
106
|
- `autoMerge` (optional, default `true`): if `true`, autopilot will automatically merge PRs after checks pass.
|
|
109
107
|
- `mergeMethod` (optional, default `"squash"`): merge strategy to use. Options: `"squash"`, `"merge"`, or `"rebase"`.
|
|
110
108
|
- `allowedMergeUsers` (required when `autoMerge=true`): list of GitHub usernames allowed to auto-merge. The runner verifies the authenticated GitHub user is in this list before merging.
|
|
@@ -114,7 +112,7 @@ Notes:
|
|
|
114
112
|
- `conflictResolutionMaxAttempts` (optional, default `3`): maximum number of attempts to resolve merge conflicts.
|
|
115
113
|
- `autoFixChecks` (optional, default `true`): if `true`, autopilot will attempt to automatically fix failing CI checks.
|
|
116
114
|
- `autoFixChecksMaxAttempts` (optional, default `3`): maximum number of attempts to fix failing checks.
|
|
117
|
-
- `enablePlanningStep` (optional, default `true`): if `true`, add an explicit planning phase before implementation
|
|
115
|
+
- `enablePlanningStep` (optional, default `true`): if `true`, add an explicit planning phase before implementation.
|
|
118
116
|
- `agentPath` (optional): custom path to agent executable (defaults to searching PATH).
|
|
119
117
|
|
|
120
118
|
## Workflow (labels)
|
package/package.json
CHANGED
|
@@ -1,8 +1,15 @@
|
|
|
1
1
|
from .base import BaseAgent, AgentResult
|
|
2
2
|
from .opencode import OpenCodeAgent
|
|
3
|
+
from .opencode_server import OpenCodeServerAgent
|
|
3
4
|
from .claude import ClaudeCodeAgent
|
|
4
5
|
|
|
5
|
-
__all__ = [
|
|
6
|
+
__all__ = [
|
|
7
|
+
"BaseAgent",
|
|
8
|
+
"AgentResult",
|
|
9
|
+
"OpenCodeAgent",
|
|
10
|
+
"OpenCodeServerAgent",
|
|
11
|
+
"ClaudeCodeAgent",
|
|
12
|
+
]
|
|
6
13
|
|
|
7
14
|
|
|
8
15
|
def get_agent(agent_type: str, config: dict) -> BaseAgent:
|
|
@@ -10,7 +17,7 @@ def get_agent(agent_type: str, config: dict) -> BaseAgent:
|
|
|
10
17
|
Factory function to create the appropriate agent.
|
|
11
18
|
|
|
12
19
|
Args:
|
|
13
|
-
agent_type: "opencode" or "claude"
|
|
20
|
+
agent_type: "opencode", "opencode-server", or "claude"
|
|
14
21
|
config: Agent configuration from autopilot.json
|
|
15
22
|
|
|
16
23
|
Returns:
|
|
@@ -21,6 +28,7 @@ def get_agent(agent_type: str, config: dict) -> BaseAgent:
|
|
|
21
28
|
"""
|
|
22
29
|
agents = {
|
|
23
30
|
"opencode": OpenCodeAgent,
|
|
31
|
+
"opencode-server": OpenCodeServerAgent,
|
|
24
32
|
"claude": ClaudeCodeAgent,
|
|
25
33
|
}
|
|
26
34
|
|
|
@@ -0,0 +1,486 @@
|
|
|
1
|
+
"""
|
|
2
|
+
HTTP client for OpenCode server API.
|
|
3
|
+
|
|
4
|
+
This module provides a client for interacting with the OpenCode server
|
|
5
|
+
via its HTTP API, enabling session-based conversations with proper
|
|
6
|
+
session persistence across server restarts.
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
import json
|
|
10
|
+
import logging
|
|
11
|
+
import os
|
|
12
|
+
import re
|
|
13
|
+
import signal
|
|
14
|
+
import subprocess
|
|
15
|
+
import time
|
|
16
|
+
from dataclasses import dataclass
|
|
17
|
+
from pathlib import Path
|
|
18
|
+
from typing import Optional, List, Dict, Any
|
|
19
|
+
|
|
20
|
+
logger = logging.getLogger(__name__)
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
@dataclass
|
|
24
|
+
class ServerInfo:
|
|
25
|
+
"""Information about a running OpenCode server."""
|
|
26
|
+
port: int
|
|
27
|
+
pid: int
|
|
28
|
+
worktree: Path
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
@dataclass
|
|
32
|
+
class MessagePart:
|
|
33
|
+
"""A part of a message response."""
|
|
34
|
+
type: str
|
|
35
|
+
text: Optional[str] = None
|
|
36
|
+
tool: Optional[str] = None
|
|
37
|
+
tool_input: Optional[Dict[str, Any]] = None
|
|
38
|
+
tool_output: Optional[str] = None
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
@dataclass
|
|
42
|
+
class MessageResponse:
|
|
43
|
+
"""Response from sending a message."""
|
|
44
|
+
message_id: str
|
|
45
|
+
session_id: str
|
|
46
|
+
role: str
|
|
47
|
+
parts: List[MessagePart]
|
|
48
|
+
tokens: Dict[str, int]
|
|
49
|
+
finish_reason: Optional[str] = None
|
|
50
|
+
|
|
51
|
+
def get_text(self) -> str:
|
|
52
|
+
"""Extract all text content from response parts."""
|
|
53
|
+
text_parts = [p.text for p in self.parts if p.type == "text" and p.text]
|
|
54
|
+
return "\n".join(text_parts)
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
class OpenCodeClient:
|
|
58
|
+
"""
|
|
59
|
+
HTTP client for OpenCode server.
|
|
60
|
+
|
|
61
|
+
Handles communication with an OpenCode server instance running
|
|
62
|
+
in a specific worktree directory.
|
|
63
|
+
"""
|
|
64
|
+
|
|
65
|
+
def __init__(self, port: int, host: str = "127.0.0.1", timeout: int = 1800):
|
|
66
|
+
"""
|
|
67
|
+
Initialize client for a specific server.
|
|
68
|
+
|
|
69
|
+
Args:
|
|
70
|
+
port: Port the server is listening on
|
|
71
|
+
host: Hostname (default localhost)
|
|
72
|
+
timeout: Request timeout in seconds (default 30 minutes for long agent runs)
|
|
73
|
+
"""
|
|
74
|
+
# 30-minute timeout matches the agent execution limit - complex implementations
|
|
75
|
+
# or CI fixes can take significant time as the LLM explores the codebase.
|
|
76
|
+
self.port = port
|
|
77
|
+
self.host = host
|
|
78
|
+
self.timeout = timeout
|
|
79
|
+
self.base_url = f"http://{host}:{port}"
|
|
80
|
+
|
|
81
|
+
def _request(
|
|
82
|
+
self,
|
|
83
|
+
method: str,
|
|
84
|
+
path: str,
|
|
85
|
+
data: Optional[Dict] = None,
|
|
86
|
+
timeout: Optional[int] = None,
|
|
87
|
+
) -> Optional[Dict]:
|
|
88
|
+
"""
|
|
89
|
+
Make an HTTP request to the server using curl.
|
|
90
|
+
|
|
91
|
+
We use curl instead of urllib/requests because:
|
|
92
|
+
1. Zero dependencies - curl is universally available
|
|
93
|
+
2. Reliable timeout handling for very long requests (30+ min)
|
|
94
|
+
3. The ~10ms subprocess overhead is negligible vs. agent execution time
|
|
95
|
+
|
|
96
|
+
Args:
|
|
97
|
+
method: HTTP method (GET, POST, etc.)
|
|
98
|
+
path: API path (e.g., /session)
|
|
99
|
+
data: JSON data to send (for POST/PATCH)
|
|
100
|
+
timeout: Override default timeout
|
|
101
|
+
|
|
102
|
+
Returns:
|
|
103
|
+
Parsed JSON response or None on error
|
|
104
|
+
"""
|
|
105
|
+
url = f"{self.base_url}{path}"
|
|
106
|
+
# Use -w to append HTTP status code on a new line for validation
|
|
107
|
+
cmd = ["curl", "-s", "-X", method, "-w", "\n%{http_code}", url]
|
|
108
|
+
|
|
109
|
+
if data is not None:
|
|
110
|
+
cmd.extend(["-H", "Content-Type: application/json"])
|
|
111
|
+
cmd.extend(["-d", json.dumps(data)])
|
|
112
|
+
|
|
113
|
+
req_timeout = timeout if timeout is not None else self.timeout
|
|
114
|
+
cmd.extend(["--max-time", str(req_timeout)])
|
|
115
|
+
|
|
116
|
+
try:
|
|
117
|
+
result = subprocess.run(
|
|
118
|
+
cmd,
|
|
119
|
+
capture_output=True,
|
|
120
|
+
text=True,
|
|
121
|
+
# Extra 10s buffer for subprocess overhead beyond curl's timeout
|
|
122
|
+
timeout=req_timeout + 10,
|
|
123
|
+
)
|
|
124
|
+
|
|
125
|
+
if result.returncode != 0:
|
|
126
|
+
logger.warning(f"Request failed: {result.stderr}")
|
|
127
|
+
return None
|
|
128
|
+
|
|
129
|
+
# Parse response body and HTTP status code
|
|
130
|
+
output = result.stdout.strip()
|
|
131
|
+
if not output:
|
|
132
|
+
return None
|
|
133
|
+
|
|
134
|
+
# Status code is on the last line (added by -w flag)
|
|
135
|
+
lines = output.rsplit("\n", 1)
|
|
136
|
+
if len(lines) != 2:
|
|
137
|
+
# Malformed response - -w flag should always add status code line
|
|
138
|
+
logger.warning(f"Malformed curl response (no status code): {output[:100]}")
|
|
139
|
+
return None
|
|
140
|
+
|
|
141
|
+
body, status_code = lines
|
|
142
|
+
if not status_code.isdigit():
|
|
143
|
+
logger.warning(f"Invalid HTTP status code: {status_code}")
|
|
144
|
+
return None
|
|
145
|
+
|
|
146
|
+
if int(status_code) >= 400:
|
|
147
|
+
logger.warning(f"HTTP {status_code} from {method} {path}")
|
|
148
|
+
return None
|
|
149
|
+
|
|
150
|
+
if not body:
|
|
151
|
+
return None
|
|
152
|
+
|
|
153
|
+
return json.loads(body)
|
|
154
|
+
except subprocess.TimeoutExpired:
|
|
155
|
+
logger.error(f"Request timed out: {method} {path}")
|
|
156
|
+
return None
|
|
157
|
+
except json.JSONDecodeError as e:
|
|
158
|
+
logger.error(f"Invalid JSON response: {e}")
|
|
159
|
+
return None
|
|
160
|
+
except Exception as e:
|
|
161
|
+
logger.error(f"Request error: {e}")
|
|
162
|
+
return None
|
|
163
|
+
|
|
164
|
+
def health_check(self) -> bool:
|
|
165
|
+
"""
|
|
166
|
+
Check if the server is healthy.
|
|
167
|
+
|
|
168
|
+
Returns:
|
|
169
|
+
True if server is responding and healthy
|
|
170
|
+
"""
|
|
171
|
+
response = self._request("GET", "/global/health", timeout=5)
|
|
172
|
+
return response is not None and response.get("healthy", False)
|
|
173
|
+
|
|
174
|
+
def create_session(self, title: Optional[str] = None) -> Optional[str]:
|
|
175
|
+
"""
|
|
176
|
+
Create a new session.
|
|
177
|
+
|
|
178
|
+
Args:
|
|
179
|
+
title: Optional title for the session
|
|
180
|
+
|
|
181
|
+
Returns:
|
|
182
|
+
Session ID or None on error
|
|
183
|
+
"""
|
|
184
|
+
data = {}
|
|
185
|
+
if title:
|
|
186
|
+
data["title"] = title
|
|
187
|
+
|
|
188
|
+
response = self._request("POST", "/session", data=data, timeout=30)
|
|
189
|
+
if response:
|
|
190
|
+
session_id = response.get("id")
|
|
191
|
+
logger.info(f"Created session: {session_id}")
|
|
192
|
+
return session_id
|
|
193
|
+
return None
|
|
194
|
+
|
|
195
|
+
def get_session(self, session_id: str) -> Optional[Dict]:
|
|
196
|
+
"""
|
|
197
|
+
Get session details.
|
|
198
|
+
|
|
199
|
+
Args:
|
|
200
|
+
session_id: Session ID to retrieve
|
|
201
|
+
|
|
202
|
+
Returns:
|
|
203
|
+
Session data or None if not found
|
|
204
|
+
"""
|
|
205
|
+
return self._request("GET", f"/session/{session_id}", timeout=10)
|
|
206
|
+
|
|
207
|
+
def session_exists(self, session_id: str) -> bool:
|
|
208
|
+
"""
|
|
209
|
+
Check if a session exists.
|
|
210
|
+
|
|
211
|
+
Args:
|
|
212
|
+
session_id: Session ID to check
|
|
213
|
+
|
|
214
|
+
Returns:
|
|
215
|
+
True if session exists
|
|
216
|
+
"""
|
|
217
|
+
session = self.get_session(session_id)
|
|
218
|
+
return session is not None and "id" in session
|
|
219
|
+
|
|
220
|
+
def send_message(
|
|
221
|
+
self,
|
|
222
|
+
session_id: str,
|
|
223
|
+
text: str,
|
|
224
|
+
timeout: Optional[int] = None,
|
|
225
|
+
) -> Optional[MessageResponse]:
|
|
226
|
+
"""
|
|
227
|
+
Send a message to a session and wait for response.
|
|
228
|
+
|
|
229
|
+
Args:
|
|
230
|
+
session_id: Session to send message to
|
|
231
|
+
text: Message text
|
|
232
|
+
timeout: Override default timeout
|
|
233
|
+
|
|
234
|
+
Returns:
|
|
235
|
+
MessageResponse or None on error
|
|
236
|
+
"""
|
|
237
|
+
data = {
|
|
238
|
+
"parts": [{"type": "text", "text": text}]
|
|
239
|
+
}
|
|
240
|
+
|
|
241
|
+
response = self._request(
|
|
242
|
+
"POST",
|
|
243
|
+
f"/session/{session_id}/message",
|
|
244
|
+
data=data,
|
|
245
|
+
timeout=timeout,
|
|
246
|
+
)
|
|
247
|
+
|
|
248
|
+
if not response:
|
|
249
|
+
return None
|
|
250
|
+
|
|
251
|
+
try:
|
|
252
|
+
info = response.get("info", {})
|
|
253
|
+
raw_parts = response.get("parts", [])
|
|
254
|
+
|
|
255
|
+
parts = []
|
|
256
|
+
for p in raw_parts:
|
|
257
|
+
part = MessagePart(type=p.get("type", "unknown"))
|
|
258
|
+
if p.get("type") == "text":
|
|
259
|
+
part.text = p.get("text", "")
|
|
260
|
+
elif p.get("type") == "tool":
|
|
261
|
+
part.tool = p.get("tool")
|
|
262
|
+
state = p.get("state", {})
|
|
263
|
+
part.tool_input = state.get("input")
|
|
264
|
+
part.tool_output = state.get("output")
|
|
265
|
+
elif p.get("type") == "reasoning":
|
|
266
|
+
part.text = p.get("text", "")
|
|
267
|
+
parts.append(part)
|
|
268
|
+
|
|
269
|
+
return MessageResponse(
|
|
270
|
+
message_id=info.get("id", ""),
|
|
271
|
+
session_id=info.get("sessionID", session_id),
|
|
272
|
+
role=info.get("role", "assistant"),
|
|
273
|
+
parts=parts,
|
|
274
|
+
tokens=info.get("tokens", {}),
|
|
275
|
+
finish_reason=info.get("finish"),
|
|
276
|
+
)
|
|
277
|
+
except Exception as e:
|
|
278
|
+
logger.error(f"Failed to parse message response: {e}")
|
|
279
|
+
return None
|
|
280
|
+
|
|
281
|
+
def get_messages(
|
|
282
|
+
self,
|
|
283
|
+
session_id: str,
|
|
284
|
+
limit: Optional[int] = None,
|
|
285
|
+
) -> List[Dict]:
|
|
286
|
+
"""
|
|
287
|
+
Get message history for a session.
|
|
288
|
+
|
|
289
|
+
Args:
|
|
290
|
+
session_id: Session to get messages from
|
|
291
|
+
limit: Maximum number of messages to retrieve
|
|
292
|
+
|
|
293
|
+
Returns:
|
|
294
|
+
List of message dictionaries
|
|
295
|
+
"""
|
|
296
|
+
path = f"/session/{session_id}/message"
|
|
297
|
+
if limit:
|
|
298
|
+
path += f"?limit={limit}"
|
|
299
|
+
|
|
300
|
+
response = self._request("GET", path, timeout=30)
|
|
301
|
+
return response if isinstance(response, list) else []
|
|
302
|
+
|
|
303
|
+
|
|
304
|
+
class OpenCodeServerManager:
|
|
305
|
+
"""
|
|
306
|
+
Manages OpenCode server instances for worktrees.
|
|
307
|
+
|
|
308
|
+
Handles starting, stopping, and connecting to OpenCode servers
|
|
309
|
+
for different worktree directories.
|
|
310
|
+
"""
|
|
311
|
+
|
|
312
|
+
def __init__(self, opencode_binary: str = "opencode"):
|
|
313
|
+
"""
|
|
314
|
+
Initialize server manager.
|
|
315
|
+
|
|
316
|
+
Args:
|
|
317
|
+
opencode_binary: Path to opencode binary
|
|
318
|
+
"""
|
|
319
|
+
self.binary = opencode_binary
|
|
320
|
+
self._servers: Dict[str, ServerInfo] = {} # worktree path -> server info
|
|
321
|
+
|
|
322
|
+
def _parse_port_from_output(self, output: str) -> Optional[int]:
|
|
323
|
+
"""Parse port number from server startup output."""
|
|
324
|
+
match = re.search(r"listening on http://[^:]+:(\d+)", output)
|
|
325
|
+
if match:
|
|
326
|
+
return int(match.group(1))
|
|
327
|
+
return None
|
|
328
|
+
|
|
329
|
+
def start_server(
|
|
330
|
+
self,
|
|
331
|
+
worktree: Path,
|
|
332
|
+
timeout: int = 30,
|
|
333
|
+
) -> Optional[ServerInfo]:
|
|
334
|
+
"""
|
|
335
|
+
Start an OpenCode server for a worktree.
|
|
336
|
+
|
|
337
|
+
Args:
|
|
338
|
+
worktree: Path to the worktree directory
|
|
339
|
+
timeout: Seconds to wait for server to start
|
|
340
|
+
|
|
341
|
+
Returns:
|
|
342
|
+
ServerInfo or None on failure
|
|
343
|
+
"""
|
|
344
|
+
worktree_str = str(worktree.resolve())
|
|
345
|
+
|
|
346
|
+
# Check if we already have a server for this worktree.
|
|
347
|
+
# Note: There's a theoretical TOCTOU race between health_check() and using
|
|
348
|
+
# the server, but Python's GIL + our single-threaded runner make this safe.
|
|
349
|
+
# If the server dies between check and use, the HTTP call will fail and
|
|
350
|
+
# the caller can retry, which will start a fresh server.
|
|
351
|
+
if worktree_str in self._servers:
|
|
352
|
+
info = self._servers[worktree_str]
|
|
353
|
+
client = OpenCodeClient(info.port)
|
|
354
|
+
if client.health_check():
|
|
355
|
+
logger.info(f"Reusing existing server on port {info.port}")
|
|
356
|
+
return info
|
|
357
|
+
else:
|
|
358
|
+
# Server died or unresponsive - kill stale process and remove from cache
|
|
359
|
+
logger.warning(f"Server on port {info.port} not responding, killing PID {info.pid}")
|
|
360
|
+
try:
|
|
361
|
+
os.kill(info.pid, signal.SIGTERM)
|
|
362
|
+
except ProcessLookupError:
|
|
363
|
+
pass # Already dead
|
|
364
|
+
except Exception as e:
|
|
365
|
+
logger.debug(f"Error killing stale server: {e}")
|
|
366
|
+
del self._servers[worktree_str]
|
|
367
|
+
|
|
368
|
+
logger.info(f"Starting OpenCode server for {worktree}")
|
|
369
|
+
|
|
370
|
+
# Start server process
|
|
371
|
+
proc = subprocess.Popen(
|
|
372
|
+
[self.binary, "serve"],
|
|
373
|
+
cwd=worktree,
|
|
374
|
+
stdout=subprocess.PIPE,
|
|
375
|
+
stderr=subprocess.STDOUT,
|
|
376
|
+
text=True,
|
|
377
|
+
)
|
|
378
|
+
|
|
379
|
+
# Wait for server to output port
|
|
380
|
+
port = None
|
|
381
|
+
start_time = time.time()
|
|
382
|
+
|
|
383
|
+
while time.time() - start_time < timeout:
|
|
384
|
+
if proc.poll() is not None:
|
|
385
|
+
# Process exited
|
|
386
|
+
output = proc.stdout.read() if proc.stdout else ""
|
|
387
|
+
logger.error(f"Server exited unexpectedly: {output}")
|
|
388
|
+
return None
|
|
389
|
+
|
|
390
|
+
# Blocking readline is acceptable here because:
|
|
391
|
+
# 1. OpenCode always outputs the port line quickly on startup
|
|
392
|
+
# 2. The outer timeout loop + proc.poll() handles hung processes
|
|
393
|
+
# 3. Non-blocking I/O adds complexity without real benefit
|
|
394
|
+
if proc.stdout:
|
|
395
|
+
line = proc.stdout.readline()
|
|
396
|
+
if line:
|
|
397
|
+
logger.debug(f"Server output: {line.strip()}")
|
|
398
|
+
port = self._parse_port_from_output(line)
|
|
399
|
+
if port:
|
|
400
|
+
break
|
|
401
|
+
# Warn if we see output but can't parse port (format may have changed)
|
|
402
|
+
if "listen" in line.lower():
|
|
403
|
+
logger.warning(f"Could not parse port from: {line.strip()}")
|
|
404
|
+
|
|
405
|
+
time.sleep(0.1)
|
|
406
|
+
|
|
407
|
+
if not port:
|
|
408
|
+
logger.error("Failed to get server port - check OpenCode version/output format")
|
|
409
|
+
proc.terminate()
|
|
410
|
+
return None
|
|
411
|
+
|
|
412
|
+
# Close stdout to prevent buffer fill-up since the server runs detached.
|
|
413
|
+
# The server communicates via HTTP after startup, not stdout, so any
|
|
414
|
+
# SIGPIPE from further writes is harmless and expected.
|
|
415
|
+
if proc.stdout:
|
|
416
|
+
proc.stdout.close()
|
|
417
|
+
|
|
418
|
+
# Verify server is responding
|
|
419
|
+
client = OpenCodeClient(port)
|
|
420
|
+
if not self._wait_for_health(client, timeout=10):
|
|
421
|
+
logger.error("Server not responding to health checks")
|
|
422
|
+
proc.terminate()
|
|
423
|
+
return None
|
|
424
|
+
|
|
425
|
+
info = ServerInfo(port=port, pid=proc.pid, worktree=worktree)
|
|
426
|
+
self._servers[worktree_str] = info
|
|
427
|
+
logger.info(f"Server started on port {port} (PID {proc.pid})")
|
|
428
|
+
|
|
429
|
+
return info
|
|
430
|
+
|
|
431
|
+
def _wait_for_health(self, client: OpenCodeClient, timeout: int = 10) -> bool:
|
|
432
|
+
"""Wait for server to become healthy."""
|
|
433
|
+
start_time = time.time()
|
|
434
|
+
while time.time() - start_time < timeout:
|
|
435
|
+
if client.health_check():
|
|
436
|
+
return True
|
|
437
|
+
time.sleep(0.5)
|
|
438
|
+
return False
|
|
439
|
+
|
|
440
|
+
def get_client(self, worktree: Path) -> Optional[OpenCodeClient]:
|
|
441
|
+
"""
|
|
442
|
+
Get a client for a worktree, starting server if needed.
|
|
443
|
+
|
|
444
|
+
Args:
|
|
445
|
+
worktree: Path to worktree
|
|
446
|
+
|
|
447
|
+
Returns:
|
|
448
|
+
OpenCodeClient or None if server couldn't be started
|
|
449
|
+
"""
|
|
450
|
+
info = self.start_server(worktree)
|
|
451
|
+
if info:
|
|
452
|
+
return OpenCodeClient(info.port)
|
|
453
|
+
return None
|
|
454
|
+
|
|
455
|
+
def stop_server(self, worktree: Path) -> bool:
|
|
456
|
+
"""
|
|
457
|
+
Stop the server for a worktree.
|
|
458
|
+
|
|
459
|
+
Args:
|
|
460
|
+
worktree: Path to worktree
|
|
461
|
+
|
|
462
|
+
Returns:
|
|
463
|
+
True if server was stopped
|
|
464
|
+
"""
|
|
465
|
+
worktree_str = str(worktree.resolve())
|
|
466
|
+
|
|
467
|
+
if worktree_str not in self._servers:
|
|
468
|
+
return False
|
|
469
|
+
|
|
470
|
+
info = self._servers[worktree_str]
|
|
471
|
+
|
|
472
|
+
try:
|
|
473
|
+
os.kill(info.pid, signal.SIGTERM)
|
|
474
|
+
logger.info(f"Stopped server on port {info.port} (PID {info.pid})")
|
|
475
|
+
except ProcessLookupError:
|
|
476
|
+
logger.debug(f"Server already stopped (PID {info.pid})")
|
|
477
|
+
except Exception as e:
|
|
478
|
+
logger.warning(f"Error stopping server: {e}")
|
|
479
|
+
|
|
480
|
+
del self._servers[worktree_str]
|
|
481
|
+
return True
|
|
482
|
+
|
|
483
|
+
def stop_all(self) -> None:
|
|
484
|
+
"""Stop all managed servers."""
|
|
485
|
+
for worktree_str in list(self._servers.keys()):
|
|
486
|
+
self.stop_server(Path(worktree_str))
|