ripperdoc 0.2.9__py3-none-any.whl → 0.3.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ripperdoc/__init__.py +1 -1
- ripperdoc/cli/cli.py +379 -51
- ripperdoc/cli/commands/__init__.py +6 -0
- ripperdoc/cli/commands/agents_cmd.py +128 -5
- ripperdoc/cli/commands/clear_cmd.py +8 -0
- ripperdoc/cli/commands/doctor_cmd.py +29 -0
- ripperdoc/cli/commands/exit_cmd.py +1 -0
- ripperdoc/cli/commands/memory_cmd.py +2 -1
- ripperdoc/cli/commands/models_cmd.py +63 -7
- ripperdoc/cli/commands/resume_cmd.py +5 -0
- ripperdoc/cli/commands/skills_cmd.py +103 -0
- ripperdoc/cli/commands/stats_cmd.py +244 -0
- ripperdoc/cli/commands/status_cmd.py +10 -0
- ripperdoc/cli/commands/tasks_cmd.py +6 -3
- ripperdoc/cli/commands/themes_cmd.py +139 -0
- ripperdoc/cli/ui/file_mention_completer.py +63 -13
- ripperdoc/cli/ui/helpers.py +6 -3
- ripperdoc/cli/ui/interrupt_handler.py +34 -0
- ripperdoc/cli/ui/panels.py +14 -8
- ripperdoc/cli/ui/rich_ui.py +737 -47
- ripperdoc/cli/ui/spinner.py +93 -18
- ripperdoc/cli/ui/thinking_spinner.py +1 -2
- ripperdoc/cli/ui/tool_renderers.py +10 -9
- ripperdoc/cli/ui/wizard.py +24 -19
- ripperdoc/core/agents.py +14 -3
- ripperdoc/core/config.py +238 -6
- ripperdoc/core/default_tools.py +91 -10
- ripperdoc/core/hooks/events.py +4 -0
- ripperdoc/core/hooks/llm_callback.py +58 -0
- ripperdoc/core/hooks/manager.py +6 -0
- ripperdoc/core/permissions.py +160 -9
- ripperdoc/core/providers/openai.py +84 -28
- ripperdoc/core/query.py +489 -87
- ripperdoc/core/query_utils.py +17 -14
- ripperdoc/core/skills.py +1 -0
- ripperdoc/core/theme.py +298 -0
- ripperdoc/core/tool.py +15 -5
- ripperdoc/protocol/__init__.py +14 -0
- ripperdoc/protocol/models.py +300 -0
- ripperdoc/protocol/stdio.py +1453 -0
- ripperdoc/tools/background_shell.py +354 -139
- ripperdoc/tools/bash_tool.py +117 -22
- ripperdoc/tools/file_edit_tool.py +228 -50
- ripperdoc/tools/file_read_tool.py +154 -3
- ripperdoc/tools/file_write_tool.py +53 -11
- ripperdoc/tools/grep_tool.py +98 -8
- ripperdoc/tools/lsp_tool.py +609 -0
- ripperdoc/tools/multi_edit_tool.py +26 -3
- ripperdoc/tools/skill_tool.py +52 -1
- ripperdoc/tools/task_tool.py +539 -65
- ripperdoc/utils/conversation_compaction.py +1 -1
- ripperdoc/utils/file_watch.py +216 -7
- ripperdoc/utils/image_utils.py +125 -0
- ripperdoc/utils/log.py +30 -3
- ripperdoc/utils/lsp.py +812 -0
- ripperdoc/utils/mcp.py +80 -18
- ripperdoc/utils/message_formatting.py +7 -4
- ripperdoc/utils/messages.py +198 -33
- ripperdoc/utils/pending_messages.py +50 -0
- ripperdoc/utils/permissions/shell_command_validation.py +3 -3
- ripperdoc/utils/permissions/tool_permission_utils.py +180 -15
- ripperdoc/utils/platform.py +198 -0
- ripperdoc/utils/session_heatmap.py +242 -0
- ripperdoc/utils/session_history.py +2 -2
- ripperdoc/utils/session_stats.py +294 -0
- ripperdoc/utils/shell_utils.py +8 -5
- ripperdoc/utils/todo.py +0 -6
- {ripperdoc-0.2.9.dist-info → ripperdoc-0.3.0.dist-info}/METADATA +55 -17
- ripperdoc-0.3.0.dist-info/RECORD +136 -0
- {ripperdoc-0.2.9.dist-info → ripperdoc-0.3.0.dist-info}/WHEEL +1 -1
- ripperdoc/sdk/__init__.py +0 -9
- ripperdoc/sdk/client.py +0 -333
- ripperdoc-0.2.9.dist-info/RECORD +0 -123
- {ripperdoc-0.2.9.dist-info → ripperdoc-0.3.0.dist-info}/entry_points.txt +0 -0
- {ripperdoc-0.2.9.dist-info → ripperdoc-0.3.0.dist-info}/licenses/LICENSE +0 -0
- {ripperdoc-0.2.9.dist-info → ripperdoc-0.3.0.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,1453 @@
|
|
|
1
|
+
"""Stdio command for SDK subprocess communication.
|
|
2
|
+
|
|
3
|
+
This module implements the stdio command that enables Ripperdoc to communicate
|
|
4
|
+
with SDKs via JSON Control Protocol over stdin/stdout, following Claude SDK's
|
|
5
|
+
elegant subprocess architecture patterns.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from __future__ import annotations
|
|
9
|
+
|
|
10
|
+
import asyncio
|
|
11
|
+
import json
|
|
12
|
+
import logging
|
|
13
|
+
import os
|
|
14
|
+
import sys
|
|
15
|
+
import time
|
|
16
|
+
import traceback
|
|
17
|
+
import uuid
|
|
18
|
+
from collections.abc import AsyncIterator
|
|
19
|
+
from contextlib import asynccontextmanager
|
|
20
|
+
from pathlib import Path
|
|
21
|
+
from typing import Any, AsyncGenerator, Callable, TypeVar
|
|
22
|
+
|
|
23
|
+
import click
|
|
24
|
+
|
|
25
|
+
from ripperdoc.core.config import get_project_config, get_effective_model_profile
|
|
26
|
+
from ripperdoc.core.default_tools import get_default_tools
|
|
27
|
+
from ripperdoc.core.query import query, QueryContext
|
|
28
|
+
from ripperdoc.core.query_utils import resolve_model_profile
|
|
29
|
+
from ripperdoc.core.system_prompt import build_system_prompt
|
|
30
|
+
from ripperdoc.core.hooks.manager import hook_manager
|
|
31
|
+
from ripperdoc.core.hooks.llm_callback import build_hook_llm_callback
|
|
32
|
+
from ripperdoc.utils.messages import create_user_message
|
|
33
|
+
from ripperdoc.utils.memory import build_memory_instructions
|
|
34
|
+
from ripperdoc.core.permissions import make_permission_checker
|
|
35
|
+
from ripperdoc.utils.session_history import SessionHistory
|
|
36
|
+
from ripperdoc.utils.mcp import (
|
|
37
|
+
load_mcp_servers_async,
|
|
38
|
+
format_mcp_instructions,
|
|
39
|
+
shutdown_mcp_runtime,
|
|
40
|
+
)
|
|
41
|
+
from ripperdoc.utils.lsp import shutdown_lsp_manager
|
|
42
|
+
from ripperdoc.tools.background_shell import shutdown_background_shell
|
|
43
|
+
from ripperdoc.tools.mcp_tools import load_dynamic_mcp_tools_async, merge_tools_with_dynamic
|
|
44
|
+
from ripperdoc.protocol.models import (
|
|
45
|
+
ControlResponseMessage,
|
|
46
|
+
ControlResponseSuccess,
|
|
47
|
+
ControlResponseError,
|
|
48
|
+
AssistantStreamMessage,
|
|
49
|
+
UserStreamMessage,
|
|
50
|
+
AssistantMessageData,
|
|
51
|
+
UserMessageData,
|
|
52
|
+
ResultMessage,
|
|
53
|
+
UsageInfo,
|
|
54
|
+
MCPServerInfo,
|
|
55
|
+
InitializeResponseData,
|
|
56
|
+
PermissionResponseAllow,
|
|
57
|
+
PermissionResponseDeny,
|
|
58
|
+
model_to_dict,
|
|
59
|
+
)
|
|
60
|
+
|
|
61
|
+
logger = logging.getLogger(__name__)
|
|
62
|
+
|
|
63
|
+
# Timeout constants for stdio operations
|
|
64
|
+
STDIO_READ_TIMEOUT_SEC = float(os.getenv("RIPPERDOC_STDIO_READ_TIMEOUT", "300")) # 5 minutes default
|
|
65
|
+
STDIO_QUERY_TIMEOUT_SEC = float(os.getenv("RIPPERDOC_STDIO_QUERY_TIMEOUT", "600")) # 10 minutes default
|
|
66
|
+
STDIO_WATCHDOG_INTERVAL_SEC = float(os.getenv("RIPPERDOC_STDIO_WATCHDOG_INTERVAL", "30")) # 30 seconds
|
|
67
|
+
STDIO_TOOL_TIMEOUT_SEC = float(os.getenv("RIPPERDOC_STDIO_TOOL_TIMEOUT", "300")) # 5 minutes per tool
|
|
68
|
+
STDIO_HOOK_TIMEOUT_SEC = float(os.getenv("RIPPERDOC_STDIO_HOOK_TIMEOUT", "30")) # 30 seconds for hooks
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
T = TypeVar("T")
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
@asynccontextmanager
|
|
75
|
+
async def timeout_wrapper(
|
|
76
|
+
timeout_sec: float,
|
|
77
|
+
operation_name: str,
|
|
78
|
+
on_timeout: Callable[[str], Any] | None = None,
|
|
79
|
+
) -> AsyncGenerator[None, None]:
|
|
80
|
+
"""Context manager that wraps an async operation with timeout and comprehensive error handling.
|
|
81
|
+
|
|
82
|
+
Args:
|
|
83
|
+
timeout_sec: Maximum seconds to wait for the operation
|
|
84
|
+
operation_name: Human-readable name for logging
|
|
85
|
+
on_timeout: Optional callback called on timeout
|
|
86
|
+
|
|
87
|
+
Yields:
|
|
88
|
+
None
|
|
89
|
+
|
|
90
|
+
Raises:
|
|
91
|
+
asyncio.TimeoutError: If operation exceeds timeout
|
|
92
|
+
"""
|
|
93
|
+
try:
|
|
94
|
+
async with asyncio.timeout(timeout_sec):
|
|
95
|
+
yield
|
|
96
|
+
except asyncio.TimeoutError:
|
|
97
|
+
error_msg = f"{operation_name} timed out after {timeout_sec:.1f}s"
|
|
98
|
+
logger.error(f"[timeout] {error_msg}", exc_info=True)
|
|
99
|
+
if on_timeout:
|
|
100
|
+
result = on_timeout(error_msg)
|
|
101
|
+
if inspect.isawaitable(result):
|
|
102
|
+
await result
|
|
103
|
+
raise
|
|
104
|
+
except Exception as e:
|
|
105
|
+
logger.error(f"[timeout] {operation_name} failed: {type(e).__name__}: {e}", exc_info=True)
|
|
106
|
+
raise
|
|
107
|
+
|
|
108
|
+
|
|
109
|
+
import inspect
|
|
110
|
+
|
|
111
|
+
|
|
112
|
+
class OperationWatchdog:
|
|
113
|
+
"""Watchdog that monitors long-running operations and triggers timeout if stuck."""
|
|
114
|
+
|
|
115
|
+
def __init__(self, timeout_sec: float, check_interval: float = 30.0):
|
|
116
|
+
"""Initialize watchdog.
|
|
117
|
+
|
|
118
|
+
Args:
|
|
119
|
+
timeout_sec: Maximum seconds allowed before watchdog triggers
|
|
120
|
+
check_interval: Seconds between activity checks
|
|
121
|
+
"""
|
|
122
|
+
self.timeout_sec = timeout_sec
|
|
123
|
+
self.check_interval = check_interval
|
|
124
|
+
self._last_activity: float = time.time()
|
|
125
|
+
self._stopped = False
|
|
126
|
+
self._task: asyncio.Task[None] | None = None
|
|
127
|
+
self._monitoring_task: asyncio.Task[None] | None = None
|
|
128
|
+
|
|
129
|
+
def _update_activity(self) -> None:
|
|
130
|
+
"""Update the last activity timestamp."""
|
|
131
|
+
self._last_activity = time.time()
|
|
132
|
+
|
|
133
|
+
def ping(self) -> None:
|
|
134
|
+
"""Update activity timestamp to prevent watchdog timeout."""
|
|
135
|
+
self._update_activity()
|
|
136
|
+
logger.debug(
|
|
137
|
+
f"[watchdog] Activity ping recorded, time since last: {time.time() - self._last_activity:.1f}s"
|
|
138
|
+
)
|
|
139
|
+
|
|
140
|
+
async def _watchdog_loop(self) -> None:
|
|
141
|
+
"""Background task that monitors activity and triggers timeout if stuck."""
|
|
142
|
+
while not self._stopped:
|
|
143
|
+
try:
|
|
144
|
+
await asyncio.sleep(self.check_interval)
|
|
145
|
+
except asyncio.CancelledError:
|
|
146
|
+
break
|
|
147
|
+
|
|
148
|
+
time_since_activity = time.time() - self._last_activity
|
|
149
|
+
if time_since_activity > self.timeout_sec:
|
|
150
|
+
logger.error(
|
|
151
|
+
f"[watchdog] No activity for {time_since_activity:.1f}s "
|
|
152
|
+
f"(timeout={self.timeout_sec:.1f}s) - triggering cancellation"
|
|
153
|
+
)
|
|
154
|
+
# Cancel the task being monitored
|
|
155
|
+
if self._monitoring_task and not self._monitoring_task.done():
|
|
156
|
+
self._monitoring_task.cancel()
|
|
157
|
+
break
|
|
158
|
+
|
|
159
|
+
async def __aenter__(self) -> "OperationWatchdog":
|
|
160
|
+
"""Start the watchdog."""
|
|
161
|
+
self._stopped = False
|
|
162
|
+
self._monitoring_task = asyncio.current_task()
|
|
163
|
+
self._task = asyncio.create_task(self._watchdog_loop())
|
|
164
|
+
logger.debug(
|
|
165
|
+
f"[watchdog] Started with timeout={self.timeout_sec}s, check_interval={self.check_interval}s"
|
|
166
|
+
)
|
|
167
|
+
return self
|
|
168
|
+
|
|
169
|
+
async def __aexit__(self, *args: Any) -> None:
|
|
170
|
+
"""Stop the watchdog."""
|
|
171
|
+
self._stopped = True
|
|
172
|
+
if self._task and not self._task.done():
|
|
173
|
+
self._task.cancel()
|
|
174
|
+
try:
|
|
175
|
+
await self._task
|
|
176
|
+
except asyncio.CancelledError:
|
|
177
|
+
pass
|
|
178
|
+
logger.debug("[watchdog] Stopped")
|
|
179
|
+
|
|
180
|
+
|
|
181
|
+
class StdioProtocolHandler:
|
|
182
|
+
"""Handler for stdio-based JSON Control Protocol.
|
|
183
|
+
|
|
184
|
+
This class manages bidirectional communication with the SDK:
|
|
185
|
+
- Reads JSON commands from stdin
|
|
186
|
+
- Parses control requests (initialize, query, etc.)
|
|
187
|
+
- Executes core query logic
|
|
188
|
+
- Writes JSON responses to stdout
|
|
189
|
+
|
|
190
|
+
Following Claude SDK's elegant patterns:
|
|
191
|
+
- JSON messages separated by newlines
|
|
192
|
+
- Control requests/responses for protocol management
|
|
193
|
+
- Message streaming for query results
|
|
194
|
+
"""
|
|
195
|
+
|
|
196
|
+
def __init__(self, input_format: str = "stream-json", output_format: str = "stream-json"):
|
|
197
|
+
"""Initialize the protocol handler.
|
|
198
|
+
|
|
199
|
+
Args:
|
|
200
|
+
input_format: Input format ("stream-json" or "auto")
|
|
201
|
+
output_format: Output format ("stream-json")
|
|
202
|
+
"""
|
|
203
|
+
self._input_format = input_format
|
|
204
|
+
self._output_format = output_format
|
|
205
|
+
self._initialized = False
|
|
206
|
+
self._session_id: str | None = None
|
|
207
|
+
self._project_path: Path = Path.cwd()
|
|
208
|
+
self._query_context: QueryContext | None = None
|
|
209
|
+
self._can_use_tool: Any | None = None
|
|
210
|
+
self._hooks: dict[str, list[dict[str, Any]]] = {}
|
|
211
|
+
self._pending_requests: dict[str, Any] = {}
|
|
212
|
+
|
|
213
|
+
# Conversation history for multi-turn queries
|
|
214
|
+
self._conversation_messages: list[Any] = []
|
|
215
|
+
|
|
216
|
+
async def _write_message(self, message: dict[str, Any]) -> None:
|
|
217
|
+
"""Write a JSON message to stdout.
|
|
218
|
+
|
|
219
|
+
Args:
|
|
220
|
+
message: The message dictionary to write.
|
|
221
|
+
"""
|
|
222
|
+
json_data = json.dumps(message, ensure_ascii=False)
|
|
223
|
+
msg_type = message.get("type", "unknown")
|
|
224
|
+
logger.debug(f"[stdio] Writing message: type={msg_type}, json_length={len(json_data)}")
|
|
225
|
+
sys.stdout.write(json_data + "\n")
|
|
226
|
+
sys.stdout.flush()
|
|
227
|
+
logger.debug(f"[stdio] Flushed message: type={msg_type}")
|
|
228
|
+
|
|
229
|
+
async def _write_control_response(
|
|
230
|
+
self,
|
|
231
|
+
request_id: str,
|
|
232
|
+
response: dict[str, Any] | None = None,
|
|
233
|
+
error: str | None = None,
|
|
234
|
+
) -> None:
|
|
235
|
+
"""Write a control response message.
|
|
236
|
+
|
|
237
|
+
Args:
|
|
238
|
+
request_id: The request ID this responds to.
|
|
239
|
+
response: The response data (for success).
|
|
240
|
+
error: The error message (for failure).
|
|
241
|
+
"""
|
|
242
|
+
if error:
|
|
243
|
+
response_data: ControlResponseSuccess | ControlResponseError = ControlResponseError( # type: ignore[assignment]
|
|
244
|
+
request_id=request_id,
|
|
245
|
+
error=error,
|
|
246
|
+
)
|
|
247
|
+
else:
|
|
248
|
+
response_data = ControlResponseSuccess(
|
|
249
|
+
request_id=request_id,
|
|
250
|
+
response=response,
|
|
251
|
+
)
|
|
252
|
+
|
|
253
|
+
message = ControlResponseMessage(response=response_data)
|
|
254
|
+
|
|
255
|
+
await self._write_message(model_to_dict(message))
|
|
256
|
+
|
|
257
|
+
async def _write_message_stream(
|
|
258
|
+
self,
|
|
259
|
+
message_dict: dict[str, Any],
|
|
260
|
+
) -> None:
|
|
261
|
+
"""Write a regular message to the output stream.
|
|
262
|
+
|
|
263
|
+
Args:
|
|
264
|
+
message_dict: The message dictionary to write.
|
|
265
|
+
"""
|
|
266
|
+
await self._write_message(message_dict)
|
|
267
|
+
|
|
268
|
+
async def _read_line(self) -> str | None:
|
|
269
|
+
"""Read a single line from stdin with timeout.
|
|
270
|
+
|
|
271
|
+
Returns:
|
|
272
|
+
The line content, or None if EOF or timeout.
|
|
273
|
+
"""
|
|
274
|
+
try:
|
|
275
|
+
# Wrap the blocking readline with timeout
|
|
276
|
+
line = await asyncio.wait_for(
|
|
277
|
+
asyncio.get_event_loop().run_in_executor(None, sys.stdin.readline),
|
|
278
|
+
timeout=STDIO_READ_TIMEOUT_SEC,
|
|
279
|
+
)
|
|
280
|
+
if not line:
|
|
281
|
+
return None
|
|
282
|
+
return line.rstrip("\n\r") # type: ignore[no-any-return]
|
|
283
|
+
except asyncio.TimeoutError:
|
|
284
|
+
logger.error(f"[stdio] stdin read timed out after {STDIO_READ_TIMEOUT_SEC}s")
|
|
285
|
+
# Signal EOF to allow graceful shutdown
|
|
286
|
+
return None
|
|
287
|
+
except (OSError, IOError) as e:
|
|
288
|
+
logger.error(f"Error reading from stdin: {e}")
|
|
289
|
+
return None
|
|
290
|
+
|
|
291
|
+
async def _read_messages(self) -> AsyncIterator[dict[str, Any]]:
|
|
292
|
+
"""Read and parse JSON messages from stdin with comprehensive error handling.
|
|
293
|
+
|
|
294
|
+
Yields:
|
|
295
|
+
Parsed JSON message dictionaries.
|
|
296
|
+
"""
|
|
297
|
+
json_buffer = ""
|
|
298
|
+
consecutive_empty_lines = 0
|
|
299
|
+
max_empty_lines = 100 # Prevent infinite loop on empty input
|
|
300
|
+
|
|
301
|
+
try:
|
|
302
|
+
while True:
|
|
303
|
+
line = await self._read_line()
|
|
304
|
+
if line is None:
|
|
305
|
+
logger.debug("[stdio] EOF reached, stopping message reader")
|
|
306
|
+
break
|
|
307
|
+
|
|
308
|
+
line = line.strip()
|
|
309
|
+
if not line:
|
|
310
|
+
consecutive_empty_lines += 1
|
|
311
|
+
if consecutive_empty_lines > max_empty_lines:
|
|
312
|
+
logger.warning(
|
|
313
|
+
f"[stdio] Too many empty lines ({max_empty_lines}), stopping"
|
|
314
|
+
)
|
|
315
|
+
break
|
|
316
|
+
continue
|
|
317
|
+
|
|
318
|
+
consecutive_empty_lines = 0 # Reset counter on non-empty line
|
|
319
|
+
|
|
320
|
+
# Handle JSON that may span multiple lines
|
|
321
|
+
json_lines = line.split("\n")
|
|
322
|
+
for json_line in json_lines:
|
|
323
|
+
json_line = json_line.strip()
|
|
324
|
+
if not json_line:
|
|
325
|
+
continue
|
|
326
|
+
|
|
327
|
+
json_buffer += json_line
|
|
328
|
+
|
|
329
|
+
try:
|
|
330
|
+
data = json.loads(json_buffer)
|
|
331
|
+
json_buffer = ""
|
|
332
|
+
logger.debug(
|
|
333
|
+
f"[stdio] Successfully parsed message, type={data.get('type', 'unknown')}"
|
|
334
|
+
)
|
|
335
|
+
yield data
|
|
336
|
+
except json.JSONDecodeError:
|
|
337
|
+
# Keep buffering - might be incomplete JSON
|
|
338
|
+
# But limit buffer size to prevent memory issues
|
|
339
|
+
if len(json_buffer) > 10_000_000: # 10MB limit
|
|
340
|
+
logger.error("[stdio] JSON buffer too large, resetting")
|
|
341
|
+
json_buffer = ""
|
|
342
|
+
continue
|
|
343
|
+
|
|
344
|
+
except asyncio.CancelledError:
|
|
345
|
+
logger.info("[stdio] Message reader cancelled")
|
|
346
|
+
raise
|
|
347
|
+
except Exception as e:
|
|
348
|
+
logger.error(f"[stdio] Error in message reader: {type(e).__name__}: {e}", exc_info=True)
|
|
349
|
+
raise
|
|
350
|
+
|
|
351
|
+
async def _handle_initialize(self, request: dict[str, Any], request_id: str) -> None:
|
|
352
|
+
"""Handle initialize request from SDK.
|
|
353
|
+
|
|
354
|
+
Args:
|
|
355
|
+
request: The initialize request data.
|
|
356
|
+
request_id: The request ID.
|
|
357
|
+
"""
|
|
358
|
+
if self._initialized:
|
|
359
|
+
await self._write_control_response(request_id, error="Already initialized")
|
|
360
|
+
return
|
|
361
|
+
|
|
362
|
+
try:
|
|
363
|
+
# Extract options from request
|
|
364
|
+
options = request.get("options", {})
|
|
365
|
+
self._session_id = options.get("session_id") or str(uuid.uuid4())
|
|
366
|
+
|
|
367
|
+
# Setup working directory
|
|
368
|
+
cwd = options.get("cwd")
|
|
369
|
+
if cwd:
|
|
370
|
+
self._project_path = Path(cwd)
|
|
371
|
+
else:
|
|
372
|
+
self._project_path = Path.cwd()
|
|
373
|
+
|
|
374
|
+
# Initialize project config
|
|
375
|
+
get_project_config(self._project_path)
|
|
376
|
+
|
|
377
|
+
# Parse tool options
|
|
378
|
+
allowed_tools = options.get("allowed_tools")
|
|
379
|
+
_disallowed_tools = options.get("disallowed_tools")
|
|
380
|
+
tools_list = options.get("tools")
|
|
381
|
+
|
|
382
|
+
# Get the tool list
|
|
383
|
+
if tools_list is not None:
|
|
384
|
+
# SDK provided explicit tool list
|
|
385
|
+
# For now, use default tools
|
|
386
|
+
tools = get_default_tools(allowed_tools=allowed_tools)
|
|
387
|
+
else:
|
|
388
|
+
tools = get_default_tools(allowed_tools=allowed_tools)
|
|
389
|
+
|
|
390
|
+
# Parse permission mode
|
|
391
|
+
permission_mode = options.get("permission_mode", "default")
|
|
392
|
+
yolo_mode = permission_mode == "bypassPermissions"
|
|
393
|
+
|
|
394
|
+
# Create permission checker
|
|
395
|
+
if not yolo_mode:
|
|
396
|
+
self._can_use_tool = make_permission_checker(self._project_path, yolo_mode=False)
|
|
397
|
+
|
|
398
|
+
# Setup model
|
|
399
|
+
model = options.get("model") or "main"
|
|
400
|
+
|
|
401
|
+
# 验证模型配置是否有效
|
|
402
|
+
model_profile = get_effective_model_profile(model)
|
|
403
|
+
if model_profile is None:
|
|
404
|
+
error_msg = (
|
|
405
|
+
f"No valid model configuration found for '{model}'. "
|
|
406
|
+
f"Please set RIPPERDOC_BASE_URL environment variable or complete onboarding."
|
|
407
|
+
)
|
|
408
|
+
logger.error(f"[stdio] {error_msg}")
|
|
409
|
+
await self._write_control_response(request_id, error=error_msg)
|
|
410
|
+
return
|
|
411
|
+
|
|
412
|
+
# Create query context
|
|
413
|
+
self._query_context = QueryContext(
|
|
414
|
+
tools=tools,
|
|
415
|
+
yolo_mode=yolo_mode,
|
|
416
|
+
verbose=options.get("verbose", False),
|
|
417
|
+
model=model,
|
|
418
|
+
)
|
|
419
|
+
|
|
420
|
+
# Initialize hook manager
|
|
421
|
+
hook_manager.set_project_dir(self._project_path)
|
|
422
|
+
hook_manager.set_session_id(self._session_id)
|
|
423
|
+
hook_manager.set_llm_callback(build_hook_llm_callback())
|
|
424
|
+
|
|
425
|
+
# Store hooks configuration
|
|
426
|
+
hooks = options.get("hooks", {})
|
|
427
|
+
self._hooks = hooks
|
|
428
|
+
|
|
429
|
+
# Load MCP servers and dynamic tools
|
|
430
|
+
servers = await load_mcp_servers_async(self._project_path)
|
|
431
|
+
dynamic_tools = await load_dynamic_mcp_tools_async(self._project_path)
|
|
432
|
+
if dynamic_tools:
|
|
433
|
+
tools = merge_tools_with_dynamic(tools, dynamic_tools)
|
|
434
|
+
self._query_context.tools = tools
|
|
435
|
+
|
|
436
|
+
mcp_instructions = format_mcp_instructions(servers)
|
|
437
|
+
|
|
438
|
+
# Build system prompt components
|
|
439
|
+
from ripperdoc.core.skills import load_all_skills, build_skill_summary
|
|
440
|
+
|
|
441
|
+
skill_result = load_all_skills(self._project_path)
|
|
442
|
+
skill_instructions = build_skill_summary(skill_result.skills)
|
|
443
|
+
|
|
444
|
+
additional_instructions: list[str] = []
|
|
445
|
+
if skill_instructions:
|
|
446
|
+
additional_instructions.append(skill_instructions)
|
|
447
|
+
|
|
448
|
+
memory_instructions = build_memory_instructions()
|
|
449
|
+
if memory_instructions:
|
|
450
|
+
additional_instructions.append(memory_instructions)
|
|
451
|
+
|
|
452
|
+
system_prompt = build_system_prompt(
|
|
453
|
+
tools,
|
|
454
|
+
"", # Will be set per query
|
|
455
|
+
{},
|
|
456
|
+
additional_instructions=additional_instructions or None,
|
|
457
|
+
mcp_instructions=mcp_instructions,
|
|
458
|
+
)
|
|
459
|
+
|
|
460
|
+
# Mark as initialized
|
|
461
|
+
self._initialized = True
|
|
462
|
+
|
|
463
|
+
# Send success response with available tools
|
|
464
|
+
# Use simple list format for Claude SDK compatibility
|
|
465
|
+
# Get skill info for agents list
|
|
466
|
+
from ripperdoc.core.skills import load_all_skills
|
|
467
|
+
|
|
468
|
+
skill_result = load_all_skills(self._project_path)
|
|
469
|
+
agent_names = [s.name for s in skill_result.skills] if skill_result.skills else []
|
|
470
|
+
|
|
471
|
+
init_response = InitializeResponseData(
|
|
472
|
+
session_id=self._session_id or "",
|
|
473
|
+
system_prompt=system_prompt,
|
|
474
|
+
tools=[t.name for t in tools],
|
|
475
|
+
mcp_servers=[MCPServerInfo(name=s.name) for s in servers] if servers else [],
|
|
476
|
+
slash_commands=[],
|
|
477
|
+
agents=agent_names,
|
|
478
|
+
skills=[],
|
|
479
|
+
plugins=[],
|
|
480
|
+
)
|
|
481
|
+
|
|
482
|
+
await self._write_control_response(
|
|
483
|
+
request_id,
|
|
484
|
+
response=model_to_dict(init_response),
|
|
485
|
+
)
|
|
486
|
+
|
|
487
|
+
except Exception as e:
|
|
488
|
+
logger.error(f"Initialize failed: {e}", exc_info=True)
|
|
489
|
+
await self._write_control_response(request_id, error=str(e))
|
|
490
|
+
|
|
491
|
+
async def _handle_query(self, request: dict[str, Any], request_id: str) -> None:
|
|
492
|
+
"""Handle query request from SDK with comprehensive timeout and error handling.
|
|
493
|
+
|
|
494
|
+
This method ensures ResultMessage is ALWAYS sent on any error/exception/timeout,
|
|
495
|
+
including detailed stack traces for debugging.
|
|
496
|
+
|
|
497
|
+
Args:
|
|
498
|
+
request: The query request data.
|
|
499
|
+
request_id: The request ID.
|
|
500
|
+
"""
|
|
501
|
+
if not self._initialized:
|
|
502
|
+
await self._write_control_response(request_id, error="Not initialized")
|
|
503
|
+
return
|
|
504
|
+
|
|
505
|
+
# Variables to track query state (using lists for mutable reference across async contexts)
|
|
506
|
+
num_turns = [0] # [int]
|
|
507
|
+
is_error = [False] # [bool]
|
|
508
|
+
final_result_text = [None] # [str | None]
|
|
509
|
+
|
|
510
|
+
# Track token usage
|
|
511
|
+
total_input_tokens = [0] # [int]
|
|
512
|
+
total_output_tokens = [0] # [int]
|
|
513
|
+
total_cache_read_tokens = [0] # [int]
|
|
514
|
+
total_cache_creation_tokens = [0] # [int]
|
|
515
|
+
|
|
516
|
+
start_time = time.time()
|
|
517
|
+
result_message_sent = [False] # [bool] - Track if we've sent ResultMessage
|
|
518
|
+
|
|
519
|
+
async def send_final_result(result: ResultMessage) -> None:
|
|
520
|
+
"""Send ResultMessage and mark as sent."""
|
|
521
|
+
if result_message_sent[0]:
|
|
522
|
+
logger.warning("[stdio] ResultMessage already sent, skipping duplicate")
|
|
523
|
+
return
|
|
524
|
+
logger.debug("[stdio] Sending ResultMessage")
|
|
525
|
+
try:
|
|
526
|
+
await self._write_message_stream(model_to_dict(result))
|
|
527
|
+
result_message_sent[0] = True
|
|
528
|
+
logger.debug("[stdio] ResultMessage sent successfully")
|
|
529
|
+
except Exception as e:
|
|
530
|
+
logger.error(f"[stdio] Failed to send ResultMessage: {e}", exc_info=True)
|
|
531
|
+
result_message_sent[0] = True # Mark as sent to avoid retries
|
|
532
|
+
|
|
533
|
+
async def send_error_result(error_msg: str, exc: Exception | None = None) -> None:
|
|
534
|
+
"""Send error ResultMessage with full stack trace."""
|
|
535
|
+
if exc:
|
|
536
|
+
tb_str = "".join(traceback.format_exception(type(exc), exc, exc.__traceback__))
|
|
537
|
+
error_detail = f"{type(exc).__name__}: {error_msg}\n\nStack trace:\n{tb_str}"
|
|
538
|
+
else:
|
|
539
|
+
error_detail = error_msg
|
|
540
|
+
|
|
541
|
+
result = ResultMessage(
|
|
542
|
+
duration_ms=int((time.time() - start_time) * 1000),
|
|
543
|
+
duration_api_ms=0,
|
|
544
|
+
is_error=True,
|
|
545
|
+
num_turns=num_turns[0],
|
|
546
|
+
session_id=self._session_id or "",
|
|
547
|
+
total_cost_usd=None,
|
|
548
|
+
usage=None,
|
|
549
|
+
result=error_detail[:50000] if len(error_detail) > 50000 else error_detail, # Limit size
|
|
550
|
+
structured_output=None,
|
|
551
|
+
)
|
|
552
|
+
await send_final_result(result)
|
|
553
|
+
|
|
554
|
+
try:
|
|
555
|
+
prompt = request.get("prompt", "")
|
|
556
|
+
if not prompt:
|
|
557
|
+
await self._write_control_response(request_id, error="Prompt is required")
|
|
558
|
+
return
|
|
559
|
+
|
|
560
|
+
logger.info(
|
|
561
|
+
"[stdio] Starting query handling",
|
|
562
|
+
extra={
|
|
563
|
+
"request_id": request_id,
|
|
564
|
+
"prompt_length": len(prompt),
|
|
565
|
+
"session_id": self._session_id,
|
|
566
|
+
"conversation_messages": len(self._conversation_messages),
|
|
567
|
+
"query_timeout": STDIO_QUERY_TIMEOUT_SEC,
|
|
568
|
+
},
|
|
569
|
+
)
|
|
570
|
+
|
|
571
|
+
# Create session history
|
|
572
|
+
session_history = SessionHistory(
|
|
573
|
+
self._project_path, self._session_id or str(uuid.uuid4())
|
|
574
|
+
)
|
|
575
|
+
hook_manager.set_transcript_path(str(session_history.path))
|
|
576
|
+
|
|
577
|
+
# Create initial user message
|
|
578
|
+
user_message = create_user_message(prompt)
|
|
579
|
+
self._conversation_messages.append(user_message)
|
|
580
|
+
session_history.append(user_message)
|
|
581
|
+
|
|
582
|
+
# Use the conversation history for messages
|
|
583
|
+
messages = list(self._conversation_messages)
|
|
584
|
+
|
|
585
|
+
# Build system prompt
|
|
586
|
+
additional_instructions: list[str] = []
|
|
587
|
+
|
|
588
|
+
# Run session start hooks with timeout
|
|
589
|
+
try:
|
|
590
|
+
async with asyncio.timeout(STDIO_HOOK_TIMEOUT_SEC):
|
|
591
|
+
session_start_result = await hook_manager.run_session_start_async("startup")
|
|
592
|
+
if hasattr(session_start_result, "system_message"):
|
|
593
|
+
if session_start_result.system_message:
|
|
594
|
+
additional_instructions.append(str(session_start_result.system_message))
|
|
595
|
+
if hasattr(session_start_result, "additional_context"):
|
|
596
|
+
if session_start_result.additional_context:
|
|
597
|
+
additional_instructions.append(str(session_start_result.additional_context))
|
|
598
|
+
except asyncio.TimeoutError:
|
|
599
|
+
logger.warning(f"[stdio] Session start hook timed out after {STDIO_HOOK_TIMEOUT_SEC}s")
|
|
600
|
+
except Exception as e:
|
|
601
|
+
logger.warning(f"[stdio] Session start hook failed: {e}")
|
|
602
|
+
|
|
603
|
+
# Run prompt submit hooks with timeout
|
|
604
|
+
try:
|
|
605
|
+
async with asyncio.timeout(STDIO_HOOK_TIMEOUT_SEC):
|
|
606
|
+
prompt_hook_result = await hook_manager.run_user_prompt_submit_async(prompt)
|
|
607
|
+
if hasattr(prompt_hook_result, "should_block") and prompt_hook_result.should_block:
|
|
608
|
+
reason = (
|
|
609
|
+
prompt_hook_result.block_reason
|
|
610
|
+
if hasattr(prompt_hook_result, "block_reason")
|
|
611
|
+
else "Prompt blocked by hook."
|
|
612
|
+
)
|
|
613
|
+
await self._write_control_response(request_id, error=str(reason))
|
|
614
|
+
return
|
|
615
|
+
if (
|
|
616
|
+
hasattr(prompt_hook_result, "system_message")
|
|
617
|
+
and prompt_hook_result.system_message
|
|
618
|
+
):
|
|
619
|
+
additional_instructions.append(str(prompt_hook_result.system_message))
|
|
620
|
+
if (
|
|
621
|
+
hasattr(prompt_hook_result, "additional_context")
|
|
622
|
+
and prompt_hook_result.additional_context
|
|
623
|
+
):
|
|
624
|
+
additional_instructions.append(str(prompt_hook_result.additional_context))
|
|
625
|
+
except asyncio.TimeoutError:
|
|
626
|
+
logger.warning(f"[stdio] Prompt submit hook timed out after {STDIO_HOOK_TIMEOUT_SEC}s")
|
|
627
|
+
except Exception as e:
|
|
628
|
+
logger.warning(f"[stdio] Prompt submit hook failed: {e}")
|
|
629
|
+
|
|
630
|
+
# Build final system prompt
|
|
631
|
+
servers = await load_mcp_servers_async(self._project_path)
|
|
632
|
+
mcp_instructions = format_mcp_instructions(servers)
|
|
633
|
+
|
|
634
|
+
system_prompt = build_system_prompt(
|
|
635
|
+
self._query_context.tools if self._query_context else [],
|
|
636
|
+
prompt,
|
|
637
|
+
{},
|
|
638
|
+
additional_instructions=additional_instructions or None,
|
|
639
|
+
mcp_instructions=mcp_instructions,
|
|
640
|
+
)
|
|
641
|
+
|
|
642
|
+
# Send acknowledgment that query is starting
|
|
643
|
+
await self._write_control_response(
|
|
644
|
+
request_id, response={"status": "querying", "session_id": self._session_id}
|
|
645
|
+
)
|
|
646
|
+
|
|
647
|
+
# Execute query with comprehensive timeout and error handling
|
|
648
|
+
try:
|
|
649
|
+
context: dict[str, Any] = {}
|
|
650
|
+
|
|
651
|
+
logger.debug(
|
|
652
|
+
"[stdio] Preparing query execution",
|
|
653
|
+
extra={
|
|
654
|
+
"messages_count": len(messages),
|
|
655
|
+
"system_prompt_length": len(system_prompt),
|
|
656
|
+
"query_timeout": STDIO_QUERY_TIMEOUT_SEC,
|
|
657
|
+
},
|
|
658
|
+
)
|
|
659
|
+
|
|
660
|
+
# Create watchdog for monitoring query progress
|
|
661
|
+
async with OperationWatchdog(
|
|
662
|
+
timeout_sec=STDIO_QUERY_TIMEOUT_SEC, check_interval=STDIO_WATCHDOG_INTERVAL_SEC
|
|
663
|
+
):
|
|
664
|
+
# Execute query with overall timeout
|
|
665
|
+
async with asyncio.timeout(STDIO_QUERY_TIMEOUT_SEC):
|
|
666
|
+
async for message in query(
|
|
667
|
+
messages,
|
|
668
|
+
system_prompt,
|
|
669
|
+
context,
|
|
670
|
+
self._query_context or {}, # type: ignore[arg-type]
|
|
671
|
+
self._can_use_tool,
|
|
672
|
+
):
|
|
673
|
+
msg_type = getattr(message, "type", None)
|
|
674
|
+
logger.debug(
|
|
675
|
+
f"[stdio] Received message of type: {msg_type}, "
|
|
676
|
+
f"num_turns={num_turns[0]}, "
|
|
677
|
+
f"elapsed_ms={int((time.time() - start_time) * 1000)}"
|
|
678
|
+
)
|
|
679
|
+
num_turns[0] += 1
|
|
680
|
+
|
|
681
|
+
# Handle progress messages
|
|
682
|
+
if msg_type == "progress":
|
|
683
|
+
# Check if this is a subagent message that should be forwarded to SDK
|
|
684
|
+
is_subagent_msg = getattr(message, "is_subagent_message", False)
|
|
685
|
+
if is_subagent_msg:
|
|
686
|
+
# Extract the subagent message from content
|
|
687
|
+
subagent_message = getattr(message, "content", None)
|
|
688
|
+
if subagent_message and hasattr(subagent_message, "type"):
|
|
689
|
+
logger.debug(
|
|
690
|
+
f"[stdio] Forwarding subagent message: type={getattr(subagent_message, 'type', 'unknown')}"
|
|
691
|
+
)
|
|
692
|
+
# Convert and forward the subagent message to SDK
|
|
693
|
+
message_dict = self._convert_message_to_sdk(subagent_message)
|
|
694
|
+
if message_dict:
|
|
695
|
+
await self._write_message_stream(message_dict)
|
|
696
|
+
|
|
697
|
+
# Add subagent messages to conversation history
|
|
698
|
+
subagent_msg_type = getattr(subagent_message, "type", "")
|
|
699
|
+
if subagent_msg_type == "assistant":
|
|
700
|
+
self._conversation_messages.append(subagent_message)
|
|
701
|
+
|
|
702
|
+
# Track token usage from subagent assistant messages
|
|
703
|
+
total_input_tokens[0] += getattr(subagent_message, "input_tokens", 0)
|
|
704
|
+
total_output_tokens[0] += getattr(subagent_message, "output_tokens", 0)
|
|
705
|
+
total_cache_read_tokens[0] += getattr(subagent_message, "cache_read_tokens", 0)
|
|
706
|
+
total_cache_creation_tokens[0] += getattr(
|
|
707
|
+
subagent_message, "cache_creation_tokens", 0
|
|
708
|
+
)
|
|
709
|
+
# Continue to filter out normal progress messages
|
|
710
|
+
continue
|
|
711
|
+
|
|
712
|
+
# Track token usage from assistant messages
|
|
713
|
+
if msg_type == "assistant":
|
|
714
|
+
total_input_tokens[0] += getattr(message, "input_tokens", 0)
|
|
715
|
+
total_output_tokens[0] += getattr(message, "output_tokens", 0)
|
|
716
|
+
total_cache_read_tokens[0] += getattr(message, "cache_read_tokens", 0)
|
|
717
|
+
total_cache_creation_tokens[0] += getattr(
|
|
718
|
+
message, "cache_creation_tokens", 0
|
|
719
|
+
)
|
|
720
|
+
|
|
721
|
+
msg_content = getattr(message, "message", None)
|
|
722
|
+
if msg_content:
|
|
723
|
+
content = getattr(msg_content, "content", None)
|
|
724
|
+
if content:
|
|
725
|
+
# Extract text blocks for result field
|
|
726
|
+
if isinstance(content, str):
|
|
727
|
+
final_result_text[0] = content
|
|
728
|
+
elif isinstance(content, list):
|
|
729
|
+
text_parts = []
|
|
730
|
+
for block in content:
|
|
731
|
+
if isinstance(block, dict):
|
|
732
|
+
if block.get("type") == "text":
|
|
733
|
+
text_parts.append(block.get("text", ""))
|
|
734
|
+
elif block.get("type") == "tool_use":
|
|
735
|
+
text_parts.clear()
|
|
736
|
+
elif hasattr(block, "type"):
|
|
737
|
+
if block.type == "text":
|
|
738
|
+
text_parts.append(getattr(block, "text", ""))
|
|
739
|
+
if text_parts:
|
|
740
|
+
final_result_text[0] = "\n".join(text_parts)
|
|
741
|
+
|
|
742
|
+
# Convert message to SDK format
|
|
743
|
+
message_dict = self._convert_message_to_sdk(message)
|
|
744
|
+
if message_dict is None:
|
|
745
|
+
continue
|
|
746
|
+
await self._write_message_stream(message_dict)
|
|
747
|
+
|
|
748
|
+
# Add to conversation history
|
|
749
|
+
if msg_type == "assistant":
|
|
750
|
+
self._conversation_messages.append(message)
|
|
751
|
+
|
|
752
|
+
# Add to local history and session history
|
|
753
|
+
messages.append(message) # type: ignore[arg-type]
|
|
754
|
+
session_history.append(message) # type: ignore[arg-type]
|
|
755
|
+
|
|
756
|
+
logger.debug("[stdio] Query loop ended successfully")
|
|
757
|
+
|
|
758
|
+
except asyncio.TimeoutError:
|
|
759
|
+
logger.error(f"[stdio] Query execution timed out after {STDIO_QUERY_TIMEOUT_SEC}s")
|
|
760
|
+
await send_error_result(f"Query timed out after {STDIO_QUERY_TIMEOUT_SEC}s")
|
|
761
|
+
except asyncio.CancelledError:
|
|
762
|
+
logger.warning("[stdio] Query was cancelled")
|
|
763
|
+
await send_error_result("Query was cancelled")
|
|
764
|
+
except Exception as query_error:
|
|
765
|
+
is_error[0] = True
|
|
766
|
+
logger.error(f"[stdio] Query execution error: {type(query_error).__name__}: {query_error}", exc_info=True)
|
|
767
|
+
await send_error_result(str(query_error), query_error)
|
|
768
|
+
|
|
769
|
+
# Build and send normal completion result if no error occurred
|
|
770
|
+
if not is_error[0] and not result_message_sent[0]:
|
|
771
|
+
logger.debug("[stdio] Building usage info")
|
|
772
|
+
|
|
773
|
+
# Calculate cost
|
|
774
|
+
cost_per_million_input = 3.0
|
|
775
|
+
cost_per_million_output = 15.0
|
|
776
|
+
total_cost_usd = (total_input_tokens[0] * cost_per_million_input / 1_000_000) + (
|
|
777
|
+
total_output_tokens[0] * cost_per_million_output / 1_000_000
|
|
778
|
+
)
|
|
779
|
+
|
|
780
|
+
# Build usage info
|
|
781
|
+
usage_info = None
|
|
782
|
+
if total_input_tokens[0] or total_output_tokens[0]:
|
|
783
|
+
usage_info = UsageInfo(
|
|
784
|
+
input_tokens=total_input_tokens[0],
|
|
785
|
+
cache_creation_input_tokens=total_cache_creation_tokens[0],
|
|
786
|
+
cache_read_input_tokens=total_cache_read_tokens[0],
|
|
787
|
+
output_tokens=total_output_tokens[0],
|
|
788
|
+
)
|
|
789
|
+
|
|
790
|
+
duration_ms = int((time.time() - start_time) * 1000)
|
|
791
|
+
duration_api_ms = duration_ms
|
|
792
|
+
|
|
793
|
+
result_message = ResultMessage(
|
|
794
|
+
duration_ms=duration_ms,
|
|
795
|
+
duration_api_ms=duration_api_ms,
|
|
796
|
+
is_error=is_error[0],
|
|
797
|
+
num_turns=num_turns[0],
|
|
798
|
+
session_id=self._session_id or "",
|
|
799
|
+
total_cost_usd=round(total_cost_usd, 8) if total_cost_usd > 0 else None,
|
|
800
|
+
usage=usage_info,
|
|
801
|
+
result=final_result_text[0],
|
|
802
|
+
structured_output=None,
|
|
803
|
+
)
|
|
804
|
+
await send_final_result(result_message)
|
|
805
|
+
|
|
806
|
+
# Run session end hooks with timeout
|
|
807
|
+
logger.debug("[stdio] Running session end hooks")
|
|
808
|
+
try:
|
|
809
|
+
duration = time.time() - start_time
|
|
810
|
+
async with asyncio.timeout(STDIO_HOOK_TIMEOUT_SEC):
|
|
811
|
+
await hook_manager.run_session_end_async(
|
|
812
|
+
"other",
|
|
813
|
+
duration_seconds=duration,
|
|
814
|
+
message_count=len(messages),
|
|
815
|
+
)
|
|
816
|
+
logger.debug("[stdio] Session end hooks completed")
|
|
817
|
+
except asyncio.TimeoutError:
|
|
818
|
+
logger.warning(f"[stdio] Session end hook timed out after {STDIO_HOOK_TIMEOUT_SEC}s")
|
|
819
|
+
except Exception as e:
|
|
820
|
+
logger.warning(f"[stdio] Session end hook failed: {e}")
|
|
821
|
+
|
|
822
|
+
logger.info(
|
|
823
|
+
"[stdio] Query completed",
|
|
824
|
+
extra={
|
|
825
|
+
"request_id": request_id,
|
|
826
|
+
"duration_ms": int((time.time() - start_time) * 1000),
|
|
827
|
+
"num_turns": num_turns[0],
|
|
828
|
+
"is_error": is_error[0],
|
|
829
|
+
"input_tokens": total_input_tokens[0],
|
|
830
|
+
"output_tokens": total_output_tokens[0],
|
|
831
|
+
"result_sent": result_message_sent[0],
|
|
832
|
+
},
|
|
833
|
+
)
|
|
834
|
+
|
|
835
|
+
except Exception as e:
|
|
836
|
+
logger.error(f"[stdio] Handle query failed: {type(e).__name__}: {e}", exc_info=True)
|
|
837
|
+
await self._write_control_response(request_id, error=str(e))
|
|
838
|
+
|
|
839
|
+
# Ensure ResultMessage is sent even if everything else fails
|
|
840
|
+
if not result_message_sent[0]:
|
|
841
|
+
try:
|
|
842
|
+
await send_error_result(str(e), e)
|
|
843
|
+
except Exception as send_error:
|
|
844
|
+
logger.error(
|
|
845
|
+
f"[stdio] Critical: Failed to send error ResultMessage: {send_error}",
|
|
846
|
+
exc_info=True,
|
|
847
|
+
)
|
|
848
|
+
|
|
849
|
+
def _convert_message_to_sdk(self, message: Any) -> dict[str, Any] | None:
|
|
850
|
+
"""Convert internal message to SDK format.
|
|
851
|
+
|
|
852
|
+
Args:
|
|
853
|
+
message: The internal message object.
|
|
854
|
+
|
|
855
|
+
Returns:
|
|
856
|
+
A dictionary in SDK message format, or None if message should be skipped.
|
|
857
|
+
"""
|
|
858
|
+
msg_type = getattr(message, "type", None)
|
|
859
|
+
|
|
860
|
+
# Filter out progress messages (internal implementation detail)
|
|
861
|
+
if msg_type == "progress":
|
|
862
|
+
return None
|
|
863
|
+
|
|
864
|
+
if msg_type == "assistant":
|
|
865
|
+
content_blocks = []
|
|
866
|
+
msg_content = getattr(message, "message", None)
|
|
867
|
+
if msg_content:
|
|
868
|
+
content = getattr(msg_content, "content", None)
|
|
869
|
+
if content:
|
|
870
|
+
if isinstance(content, str):
|
|
871
|
+
content_blocks.append({"type": "text", "text": content})
|
|
872
|
+
elif isinstance(content, list):
|
|
873
|
+
for block in content:
|
|
874
|
+
if isinstance(block, dict):
|
|
875
|
+
content_blocks.append(block)
|
|
876
|
+
else:
|
|
877
|
+
# Convert MessageContent (Pydantic model) to dict
|
|
878
|
+
block_dict = self._convert_content_block(block)
|
|
879
|
+
if block_dict:
|
|
880
|
+
content_blocks.append(block_dict)
|
|
881
|
+
|
|
882
|
+
# Resolve model pointer to actual model name for SDK
|
|
883
|
+
# The message may have model=None (unset), so fall back to QueryContext.model
|
|
884
|
+
# Then resolve any pointer (e.g., "main") to the actual model name
|
|
885
|
+
model_pointer = getattr(message, "model", None) or (
|
|
886
|
+
self._query_context.model if self._query_context else None
|
|
887
|
+
) # type: ignore[union-attr]
|
|
888
|
+
model_profile = resolve_model_profile(
|
|
889
|
+
str(model_pointer) if model_pointer else "claude-opus-4-5-20251101"
|
|
890
|
+
)
|
|
891
|
+
actual_model = (
|
|
892
|
+
model_profile.model
|
|
893
|
+
if model_profile
|
|
894
|
+
else (model_pointer or "claude-opus-4-5-20251101")
|
|
895
|
+
)
|
|
896
|
+
|
|
897
|
+
stream_message = AssistantStreamMessage(
|
|
898
|
+
message=AssistantMessageData(
|
|
899
|
+
content=content_blocks,
|
|
900
|
+
model=actual_model,
|
|
901
|
+
),
|
|
902
|
+
parent_tool_use_id=getattr(message, "parent_tool_use_id", None),
|
|
903
|
+
)
|
|
904
|
+
return model_to_dict(stream_message)
|
|
905
|
+
|
|
906
|
+
elif msg_type == "user":
|
|
907
|
+
msg_content = getattr(message, "message", None)
|
|
908
|
+
content = getattr(msg_content, "content", "") if msg_content else ""
|
|
909
|
+
|
|
910
|
+
# If content is a list of MessageContent objects (e.g., tool results),
|
|
911
|
+
# convert it to a string for the SDK format
|
|
912
|
+
if isinstance(content, list):
|
|
913
|
+
# For tool results, the content is a list of MessageContent objects.
|
|
914
|
+
# Convert to a string representation. For tool_result types, use empty string
|
|
915
|
+
# since the actual result data is in tool_use_result field.
|
|
916
|
+
content_str = ""
|
|
917
|
+
for block in content:
|
|
918
|
+
if isinstance(block, dict):
|
|
919
|
+
block_type = block.get("type", "")
|
|
920
|
+
if block_type == "text" and block.get("text"):
|
|
921
|
+
content_str = block.get("text", "")
|
|
922
|
+
break
|
|
923
|
+
elif hasattr(block, "type"):
|
|
924
|
+
block_type = getattr(block, "type", "")
|
|
925
|
+
if block_type == "text" and getattr(block, "text", None):
|
|
926
|
+
content_str = getattr(block, "text", "")
|
|
927
|
+
break
|
|
928
|
+
# For tool_result types, we typically use empty string
|
|
929
|
+
# since the data is in tool_use_result field
|
|
930
|
+
content = content_str
|
|
931
|
+
|
|
932
|
+
stream_message: UserStreamMessage | AssistantStreamMessage = UserStreamMessage( # type: ignore[assignment,no-redef]
|
|
933
|
+
message=UserMessageData(content=content),
|
|
934
|
+
uuid=getattr(message, "uuid", None),
|
|
935
|
+
parent_tool_use_id=getattr(message, "parent_tool_use_id", None),
|
|
936
|
+
tool_use_result=self._sanitize_for_json(getattr(message, "tool_use_result", None)),
|
|
937
|
+
)
|
|
938
|
+
return model_to_dict(stream_message)
|
|
939
|
+
|
|
940
|
+
else:
|
|
941
|
+
# Unknown message type - return None to skip
|
|
942
|
+
return None
|
|
943
|
+
|
|
944
|
+
def _sanitize_for_json(self, obj: Any) -> Any:
|
|
945
|
+
"""Recursively convert objects to JSON-serializable types.
|
|
946
|
+
|
|
947
|
+
This function ensures Pydantic models and other objects are converted
|
|
948
|
+
to dictionaries/lists/primitives that can be JSON serialized.
|
|
949
|
+
|
|
950
|
+
Args:
|
|
951
|
+
obj: The object to sanitize.
|
|
952
|
+
|
|
953
|
+
Returns:
|
|
954
|
+
A JSON-serializable version of the object.
|
|
955
|
+
"""
|
|
956
|
+
# None values
|
|
957
|
+
if obj is None:
|
|
958
|
+
return None
|
|
959
|
+
|
|
960
|
+
# Primitives
|
|
961
|
+
elif isinstance(obj, (str, int, float, bool)):
|
|
962
|
+
return obj
|
|
963
|
+
|
|
964
|
+
# Lists and tuples
|
|
965
|
+
elif isinstance(obj, (list, tuple)):
|
|
966
|
+
return [self._sanitize_for_json(item) for item in obj]
|
|
967
|
+
|
|
968
|
+
# Dictionaries
|
|
969
|
+
elif isinstance(obj, dict):
|
|
970
|
+
return {key: self._sanitize_for_json(value) for key, value in obj.items()}
|
|
971
|
+
|
|
972
|
+
# Pydantic models
|
|
973
|
+
elif hasattr(obj, "model_dump"):
|
|
974
|
+
try:
|
|
975
|
+
dumped = obj.model_dump(exclude_none=True)
|
|
976
|
+
return self._sanitize_for_json(dumped)
|
|
977
|
+
except Exception:
|
|
978
|
+
pass
|
|
979
|
+
|
|
980
|
+
# Objects with dict() method
|
|
981
|
+
elif hasattr(obj, "dict"):
|
|
982
|
+
try:
|
|
983
|
+
dumped = obj.dict(exclude_none=True)
|
|
984
|
+
return self._sanitize_for_json(dumped)
|
|
985
|
+
except Exception:
|
|
986
|
+
pass
|
|
987
|
+
|
|
988
|
+
# Fallback: try to convert to string
|
|
989
|
+
else:
|
|
990
|
+
try:
|
|
991
|
+
return str(obj)
|
|
992
|
+
except Exception:
|
|
993
|
+
return None
|
|
994
|
+
|
|
995
|
+
def _convert_content_block(self, block: Any) -> dict[str, Any] | None:
|
|
996
|
+
"""Convert a MessageContent block to dictionary.
|
|
997
|
+
|
|
998
|
+
Uses the same logic as _content_block_to_api in messages.py
|
|
999
|
+
to ensure consistency and proper field mapping.
|
|
1000
|
+
|
|
1001
|
+
Args:
|
|
1002
|
+
block: The MessageContent object.
|
|
1003
|
+
|
|
1004
|
+
Returns:
|
|
1005
|
+
A dictionary representation of the block.
|
|
1006
|
+
"""
|
|
1007
|
+
block_type = getattr(block, "type", None)
|
|
1008
|
+
|
|
1009
|
+
if block_type == "text":
|
|
1010
|
+
return {
|
|
1011
|
+
"type": "text",
|
|
1012
|
+
"text": getattr(block, "text", None) or "",
|
|
1013
|
+
}
|
|
1014
|
+
|
|
1015
|
+
elif block_type == "thinking":
|
|
1016
|
+
return {
|
|
1017
|
+
"type": "thinking",
|
|
1018
|
+
"thinking": getattr(block, "thinking", None) or getattr(block, "text", None) or "",
|
|
1019
|
+
"signature": getattr(block, "signature", None),
|
|
1020
|
+
}
|
|
1021
|
+
|
|
1022
|
+
elif block_type == "tool_use":
|
|
1023
|
+
# Use the same id extraction logic as _content_block_to_api
|
|
1024
|
+
# Try id first, then tool_use_id, then empty string
|
|
1025
|
+
tool_id = getattr(block, "id", None) or getattr(block, "tool_use_id", None) or ""
|
|
1026
|
+
return {
|
|
1027
|
+
"type": "tool_use",
|
|
1028
|
+
"id": tool_id,
|
|
1029
|
+
"name": getattr(block, "name", None) or "",
|
|
1030
|
+
"input": getattr(block, "input", None) or {},
|
|
1031
|
+
}
|
|
1032
|
+
|
|
1033
|
+
elif block_type == "tool_result":
|
|
1034
|
+
return {
|
|
1035
|
+
"type": "tool_result",
|
|
1036
|
+
"tool_use_id": getattr(block, "tool_use_id", None)
|
|
1037
|
+
or getattr(block, "id", None)
|
|
1038
|
+
or "",
|
|
1039
|
+
"content": getattr(block, "text", None) or getattr(block, "content", None) or "",
|
|
1040
|
+
"is_error": getattr(block, "is_error", None),
|
|
1041
|
+
}
|
|
1042
|
+
|
|
1043
|
+
elif block_type == "image":
|
|
1044
|
+
return {
|
|
1045
|
+
"type": "image",
|
|
1046
|
+
"source": {
|
|
1047
|
+
"type": getattr(block, "source_type", None) or "base64",
|
|
1048
|
+
"media_type": getattr(block, "media_type", None) or "image/jpeg",
|
|
1049
|
+
"data": getattr(block, "image_data", None) or "",
|
|
1050
|
+
},
|
|
1051
|
+
}
|
|
1052
|
+
|
|
1053
|
+
else:
|
|
1054
|
+
# Unknown block type - try to convert with generic approach
|
|
1055
|
+
block_dict = {}
|
|
1056
|
+
if hasattr(block, "type"):
|
|
1057
|
+
block_dict["type"] = block.type
|
|
1058
|
+
if hasattr(block, "text"):
|
|
1059
|
+
block_dict["text"] = block.text
|
|
1060
|
+
if hasattr(block, "id"):
|
|
1061
|
+
block_dict["id"] = block.id
|
|
1062
|
+
if hasattr(block, "name"):
|
|
1063
|
+
block_dict["name"] = block.name
|
|
1064
|
+
if hasattr(block, "input"):
|
|
1065
|
+
block_dict["input"] = block.input
|
|
1066
|
+
if hasattr(block, "content"):
|
|
1067
|
+
block_dict["content"] = block.content
|
|
1068
|
+
if hasattr(block, "is_error"):
|
|
1069
|
+
block_dict["is_error"] = block.is_error
|
|
1070
|
+
return block_dict if block_dict else None
|
|
1071
|
+
|
|
1072
|
+
async def _handle_control_request(self, message: dict[str, Any]) -> None:
|
|
1073
|
+
"""Handle a control request from the SDK.
|
|
1074
|
+
|
|
1075
|
+
Args:
|
|
1076
|
+
message: The control request message.
|
|
1077
|
+
"""
|
|
1078
|
+
request = message.get("request", {})
|
|
1079
|
+
request_id = message.get("request_id", "")
|
|
1080
|
+
request_subtype = request.get("subtype", "")
|
|
1081
|
+
|
|
1082
|
+
try:
|
|
1083
|
+
if request_subtype == "initialize":
|
|
1084
|
+
await self._handle_initialize(request, request_id)
|
|
1085
|
+
|
|
1086
|
+
elif request_subtype == "query":
|
|
1087
|
+
await self._handle_query(request, request_id)
|
|
1088
|
+
|
|
1089
|
+
elif request_subtype == "set_permission_mode":
|
|
1090
|
+
await self._handle_set_permission_mode(request, request_id)
|
|
1091
|
+
|
|
1092
|
+
elif request_subtype == "set_model":
|
|
1093
|
+
await self._handle_set_model(request, request_id)
|
|
1094
|
+
|
|
1095
|
+
elif request_subtype == "rewind_files":
|
|
1096
|
+
await self._handle_rewind_files(request, request_id)
|
|
1097
|
+
|
|
1098
|
+
elif request_subtype == "hook_callback":
|
|
1099
|
+
await self._handle_hook_callback(request, request_id)
|
|
1100
|
+
|
|
1101
|
+
elif request_subtype == "can_use_tool":
|
|
1102
|
+
await self._handle_can_use_tool(request, request_id)
|
|
1103
|
+
|
|
1104
|
+
else:
|
|
1105
|
+
await self._write_control_response(
|
|
1106
|
+
request_id, error=f"Unknown request subtype: {request_subtype}"
|
|
1107
|
+
)
|
|
1108
|
+
|
|
1109
|
+
except Exception as e:
|
|
1110
|
+
logger.error(f"Error handling control request: {e}", exc_info=True)
|
|
1111
|
+
await self._write_control_response(request_id, error=str(e))
|
|
1112
|
+
|
|
1113
|
+
async def _handle_set_permission_mode(self, request: dict[str, Any], request_id: str) -> None:
|
|
1114
|
+
"""Handle set_permission_mode request from SDK.
|
|
1115
|
+
|
|
1116
|
+
Args:
|
|
1117
|
+
request: The set_permission_mode request data.
|
|
1118
|
+
request_id: The request ID.
|
|
1119
|
+
"""
|
|
1120
|
+
mode = request.get("mode", "default")
|
|
1121
|
+
# Update the permission mode in the query context
|
|
1122
|
+
if self._query_context:
|
|
1123
|
+
# Map string mode to yolo_mode boolean
|
|
1124
|
+
self._query_context.yolo_mode = mode == "bypassPermissions"
|
|
1125
|
+
|
|
1126
|
+
await self._write_control_response(
|
|
1127
|
+
request_id, response={"status": "permission_mode_set", "mode": mode}
|
|
1128
|
+
)
|
|
1129
|
+
|
|
1130
|
+
async def _handle_set_model(self, request: dict[str, Any], request_id: str) -> None:
|
|
1131
|
+
"""Handle set_model request from SDK.
|
|
1132
|
+
|
|
1133
|
+
Args:
|
|
1134
|
+
request: The set_model request data.
|
|
1135
|
+
request_id: The request ID.
|
|
1136
|
+
"""
|
|
1137
|
+
model = request.get("model")
|
|
1138
|
+
# Update the model in the query context
|
|
1139
|
+
if self._query_context:
|
|
1140
|
+
self._query_context.model = model or "main"
|
|
1141
|
+
|
|
1142
|
+
await self._write_control_response(
|
|
1143
|
+
request_id, response={"status": "model_set", "model": model}
|
|
1144
|
+
)
|
|
1145
|
+
|
|
1146
|
+
async def _handle_rewind_files(self, _request: dict[str, Any], request_id: str) -> None:
|
|
1147
|
+
"""Handle rewind_files request from SDK.
|
|
1148
|
+
|
|
1149
|
+
Note: File checkpointing is not currently supported.
|
|
1150
|
+
This method exists for Claude SDK API compatibility.
|
|
1151
|
+
|
|
1152
|
+
Args:
|
|
1153
|
+
_request: The rewind_files request data.
|
|
1154
|
+
request_id: The request ID.
|
|
1155
|
+
"""
|
|
1156
|
+
await self._write_control_response(
|
|
1157
|
+
request_id, error="File checkpointing and rewind_files are not currently supported"
|
|
1158
|
+
)
|
|
1159
|
+
|
|
1160
|
+
async def _handle_hook_callback(self, request: dict[str, Any], request_id: str) -> None:
|
|
1161
|
+
"""Handle hook_callback request from SDK.
|
|
1162
|
+
|
|
1163
|
+
Args:
|
|
1164
|
+
request: The hook_callback request data.
|
|
1165
|
+
request_id: The request ID.
|
|
1166
|
+
"""
|
|
1167
|
+
# Get callback info
|
|
1168
|
+
_callback_id = request.get("callback_id")
|
|
1169
|
+
_input_data = request.get("input", {})
|
|
1170
|
+
_tool_use_id = request.get("tool_use_id")
|
|
1171
|
+
|
|
1172
|
+
# For now, return a basic response
|
|
1173
|
+
# Full hook support would require integration with hook_manager
|
|
1174
|
+
await self._write_control_response(
|
|
1175
|
+
request_id,
|
|
1176
|
+
response={
|
|
1177
|
+
"continue": True,
|
|
1178
|
+
},
|
|
1179
|
+
)
|
|
1180
|
+
|
|
1181
|
+
async def _handle_can_use_tool(self, request: dict[str, Any], request_id: str) -> None:
|
|
1182
|
+
"""Handle can_use_tool request from SDK.
|
|
1183
|
+
|
|
1184
|
+
Args:
|
|
1185
|
+
request: The can_use_tool request data.
|
|
1186
|
+
request_id: The request ID.
|
|
1187
|
+
"""
|
|
1188
|
+
tool_name = request.get("tool_name", "")
|
|
1189
|
+
tool_input = request.get("input", {})
|
|
1190
|
+
|
|
1191
|
+
# Use the permission checker if available
|
|
1192
|
+
if self._can_use_tool:
|
|
1193
|
+
try:
|
|
1194
|
+
# Call the permission checker
|
|
1195
|
+
from ripperdoc_agent_sdk.types import ToolPermissionContext # type: ignore[import-not-found]
|
|
1196
|
+
|
|
1197
|
+
context = ToolPermissionContext(
|
|
1198
|
+
signal=None,
|
|
1199
|
+
suggestions=[],
|
|
1200
|
+
)
|
|
1201
|
+
|
|
1202
|
+
result = await self._can_use_tool(tool_name, tool_input, context)
|
|
1203
|
+
|
|
1204
|
+
# Convert result to response format
|
|
1205
|
+
from ripperdoc_agent_sdk.types import PermissionResultAllow
|
|
1206
|
+
|
|
1207
|
+
if isinstance(result, PermissionResultAllow):
|
|
1208
|
+
perm_response = PermissionResponseAllow(
|
|
1209
|
+
updatedInput=result.updated_input or tool_input,
|
|
1210
|
+
)
|
|
1211
|
+
await self._write_control_response(
|
|
1212
|
+
request_id,
|
|
1213
|
+
response=model_to_dict(perm_response),
|
|
1214
|
+
)
|
|
1215
|
+
else:
|
|
1216
|
+
perm_response: PermissionResponseAllow | PermissionResponseDeny = (
|
|
1217
|
+
PermissionResponseDeny( # type: ignore[assignment,no-redef]
|
|
1218
|
+
message=result.message if hasattr(result, "message") else "",
|
|
1219
|
+
)
|
|
1220
|
+
)
|
|
1221
|
+
await self._write_control_response(
|
|
1222
|
+
request_id,
|
|
1223
|
+
response=model_to_dict(perm_response),
|
|
1224
|
+
)
|
|
1225
|
+
except Exception as e:
|
|
1226
|
+
logger.error(f"Error in permission check: {e}")
|
|
1227
|
+
await self._write_control_response(request_id, error=str(e))
|
|
1228
|
+
else:
|
|
1229
|
+
# No permission checker, allow by default
|
|
1230
|
+
perm_response = PermissionResponseAllow(
|
|
1231
|
+
updatedInput=tool_input,
|
|
1232
|
+
)
|
|
1233
|
+
await self._write_control_response(
|
|
1234
|
+
request_id,
|
|
1235
|
+
response=model_to_dict(perm_response),
|
|
1236
|
+
)
|
|
1237
|
+
|
|
1238
|
+
async def run(self) -> None:
|
|
1239
|
+
"""Main run loop for the stdio protocol handler with graceful shutdown.
|
|
1240
|
+
|
|
1241
|
+
Reads messages from stdin and handles them until EOF.
|
|
1242
|
+
"""
|
|
1243
|
+
logger.info("Stdio protocol handler starting")
|
|
1244
|
+
|
|
1245
|
+
try:
|
|
1246
|
+
async for message in self._read_messages():
|
|
1247
|
+
msg_type = message.get("type")
|
|
1248
|
+
|
|
1249
|
+
if msg_type == "control_request":
|
|
1250
|
+
await self._handle_control_request(message)
|
|
1251
|
+
else:
|
|
1252
|
+
# Unknown message type
|
|
1253
|
+
logger.warning(f"Unknown message type: {msg_type}")
|
|
1254
|
+
|
|
1255
|
+
except (OSError, IOError, json.JSONDecodeError) as e:
|
|
1256
|
+
logger.error(f"Error in stdio loop: {e}", exc_info=True)
|
|
1257
|
+
except asyncio.CancelledError:
|
|
1258
|
+
logger.info("Stdio protocol handler cancelled")
|
|
1259
|
+
except Exception as e:
|
|
1260
|
+
logger.error(f"Unexpected error in stdio loop: {type(e).__name__}: {e}", exc_info=True)
|
|
1261
|
+
finally:
|
|
1262
|
+
# Comprehensive cleanup with timeout
|
|
1263
|
+
logger.info("Stdio protocol handler shutting down...")
|
|
1264
|
+
|
|
1265
|
+
cleanup_tasks = []
|
|
1266
|
+
|
|
1267
|
+
# Add MCP runtime shutdown
|
|
1268
|
+
async def cleanup_mcp():
|
|
1269
|
+
try:
|
|
1270
|
+
async with asyncio.timeout(10):
|
|
1271
|
+
await shutdown_mcp_runtime()
|
|
1272
|
+
except asyncio.TimeoutError:
|
|
1273
|
+
logger.warning("[cleanup] MCP runtime shutdown timed out")
|
|
1274
|
+
except Exception as e:
|
|
1275
|
+
logger.error(f"[cleanup] Error shutting down MCP runtime: {e}")
|
|
1276
|
+
|
|
1277
|
+
cleanup_tasks.append(asyncio.create_task(cleanup_mcp()))
|
|
1278
|
+
|
|
1279
|
+
# Add LSP manager shutdown
|
|
1280
|
+
async def cleanup_lsp():
|
|
1281
|
+
try:
|
|
1282
|
+
async with asyncio.timeout(10):
|
|
1283
|
+
await shutdown_lsp_manager()
|
|
1284
|
+
except asyncio.TimeoutError:
|
|
1285
|
+
logger.warning("[cleanup] LSP manager shutdown timed out")
|
|
1286
|
+
except Exception as e:
|
|
1287
|
+
logger.error(f"[cleanup] Error shutting down LSP manager: {e}")
|
|
1288
|
+
|
|
1289
|
+
cleanup_tasks.append(asyncio.create_task(cleanup_lsp()))
|
|
1290
|
+
|
|
1291
|
+
# Add background shell shutdown
|
|
1292
|
+
async def cleanup_shell():
|
|
1293
|
+
try:
|
|
1294
|
+
shutdown_background_shell(force=True)
|
|
1295
|
+
except Exception:
|
|
1296
|
+
pass # Background shell cleanup is best-effort
|
|
1297
|
+
|
|
1298
|
+
cleanup_tasks.append(asyncio.create_task(cleanup_shell()))
|
|
1299
|
+
|
|
1300
|
+
# Wait for all cleanup tasks with overall timeout
|
|
1301
|
+
try:
|
|
1302
|
+
async with asyncio.timeout(30):
|
|
1303
|
+
results = await asyncio.gather(*cleanup_tasks, return_exceptions=True)
|
|
1304
|
+
# Check for any exceptions that occurred
|
|
1305
|
+
for i, result in enumerate(results):
|
|
1306
|
+
if isinstance(result, Exception):
|
|
1307
|
+
logger.error(f"[cleanup] Task {i} failed: {result}")
|
|
1308
|
+
except asyncio.TimeoutError:
|
|
1309
|
+
logger.warning("[cleanup] Cleanup tasks timed out after 30s")
|
|
1310
|
+
except Exception as e:
|
|
1311
|
+
logger.error(f"[cleanup] Error during cleanup: {e}")
|
|
1312
|
+
|
|
1313
|
+
logger.info("Stdio protocol handler shutdown complete")
|
|
1314
|
+
|
|
1315
|
+
|
|
1316
|
+
@click.command(name="stdio")
|
|
1317
|
+
@click.option(
|
|
1318
|
+
"--input-format",
|
|
1319
|
+
type=click.Choice(["stream-json", "auto"]),
|
|
1320
|
+
default="stream-json",
|
|
1321
|
+
help="Input format for messages.",
|
|
1322
|
+
)
|
|
1323
|
+
@click.option(
|
|
1324
|
+
"--output-format",
|
|
1325
|
+
type=click.Choice(["stream-json"]),
|
|
1326
|
+
default="stream-json",
|
|
1327
|
+
help="Output format for messages.",
|
|
1328
|
+
)
|
|
1329
|
+
@click.option(
|
|
1330
|
+
"--model",
|
|
1331
|
+
type=str,
|
|
1332
|
+
default=None,
|
|
1333
|
+
help="Model profile for the current session.",
|
|
1334
|
+
)
|
|
1335
|
+
@click.option(
|
|
1336
|
+
"--permission-mode",
|
|
1337
|
+
type=click.Choice(["default", "acceptEdits", "plan", "bypassPermissions"]),
|
|
1338
|
+
default="default",
|
|
1339
|
+
help="Permission mode for tool usage.",
|
|
1340
|
+
)
|
|
1341
|
+
@click.option(
|
|
1342
|
+
"--max-turns",
|
|
1343
|
+
type=int,
|
|
1344
|
+
default=None,
|
|
1345
|
+
help="Maximum number of conversation turns.",
|
|
1346
|
+
)
|
|
1347
|
+
@click.option(
|
|
1348
|
+
"--system-prompt",
|
|
1349
|
+
type=str,
|
|
1350
|
+
default=None,
|
|
1351
|
+
help="System prompt to use for the session.",
|
|
1352
|
+
)
|
|
1353
|
+
@click.option(
|
|
1354
|
+
"--print",
|
|
1355
|
+
"-p",
|
|
1356
|
+
is_flag=True,
|
|
1357
|
+
help="Print mode (for single prompt queries).",
|
|
1358
|
+
)
|
|
1359
|
+
@click.option(
|
|
1360
|
+
"--",
|
|
1361
|
+
"prompt",
|
|
1362
|
+
type=str,
|
|
1363
|
+
default=None,
|
|
1364
|
+
help="Direct prompt (for print mode).",
|
|
1365
|
+
)
|
|
1366
|
+
def stdio_cmd(
|
|
1367
|
+
input_format: str,
|
|
1368
|
+
output_format: str,
|
|
1369
|
+
model: str | None,
|
|
1370
|
+
permission_mode: str,
|
|
1371
|
+
max_turns: int | None,
|
|
1372
|
+
system_prompt: str | None,
|
|
1373
|
+
print: bool,
|
|
1374
|
+
prompt: str | None,
|
|
1375
|
+
) -> None:
|
|
1376
|
+
"""Stdio mode for SDK subprocess communication.
|
|
1377
|
+
|
|
1378
|
+
This command enables Ripperdoc to communicate with SDKs via JSON Control
|
|
1379
|
+
Protocol over stdin/stdout. It's designed for subprocess architecture where
|
|
1380
|
+
the SDK manages the CLI process.
|
|
1381
|
+
|
|
1382
|
+
The protocol supports:
|
|
1383
|
+
- control_request/control_response for protocol management
|
|
1384
|
+
- Message streaming for query results
|
|
1385
|
+
- Bidirectional communication for hooks and permissions
|
|
1386
|
+
|
|
1387
|
+
Example:
|
|
1388
|
+
ripperdoc stdio --output-format stream-json
|
|
1389
|
+
"""
|
|
1390
|
+
# Set up async event loop
|
|
1391
|
+
asyncio.run(
|
|
1392
|
+
_run_stdio(
|
|
1393
|
+
input_format=input_format,
|
|
1394
|
+
output_format=output_format,
|
|
1395
|
+
model=model,
|
|
1396
|
+
permission_mode=permission_mode,
|
|
1397
|
+
max_turns=max_turns,
|
|
1398
|
+
system_prompt=system_prompt,
|
|
1399
|
+
print_mode=print,
|
|
1400
|
+
prompt=prompt,
|
|
1401
|
+
)
|
|
1402
|
+
)
|
|
1403
|
+
|
|
1404
|
+
|
|
1405
|
+
async def _run_stdio(
|
|
1406
|
+
input_format: str,
|
|
1407
|
+
output_format: str,
|
|
1408
|
+
model: str | None,
|
|
1409
|
+
permission_mode: str,
|
|
1410
|
+
max_turns: int | None,
|
|
1411
|
+
system_prompt: str | None,
|
|
1412
|
+
print_mode: bool,
|
|
1413
|
+
prompt: str | None,
|
|
1414
|
+
) -> None:
|
|
1415
|
+
"""Async entry point for stdio command."""
|
|
1416
|
+
handler = StdioProtocolHandler(
|
|
1417
|
+
input_format=input_format,
|
|
1418
|
+
output_format=output_format,
|
|
1419
|
+
)
|
|
1420
|
+
|
|
1421
|
+
# If print mode with prompt, handle as single query
|
|
1422
|
+
if print_mode and prompt:
|
|
1423
|
+
# This is a single-shot query mode
|
|
1424
|
+
# Initialize with defaults and run query
|
|
1425
|
+
request = {
|
|
1426
|
+
"options": {
|
|
1427
|
+
"model": model,
|
|
1428
|
+
"permission_mode": permission_mode,
|
|
1429
|
+
"max_turns": max_turns,
|
|
1430
|
+
"system_prompt": system_prompt,
|
|
1431
|
+
},
|
|
1432
|
+
"prompt": prompt,
|
|
1433
|
+
}
|
|
1434
|
+
|
|
1435
|
+
# Mock request_id for print mode
|
|
1436
|
+
request_id = "print_query"
|
|
1437
|
+
|
|
1438
|
+
# Initialize
|
|
1439
|
+
await handler._handle_initialize(request, request_id)
|
|
1440
|
+
|
|
1441
|
+
# Query
|
|
1442
|
+
query_request = {
|
|
1443
|
+
"prompt": prompt,
|
|
1444
|
+
}
|
|
1445
|
+
await handler._handle_query(query_request, f"{request_id}_query")
|
|
1446
|
+
|
|
1447
|
+
return
|
|
1448
|
+
|
|
1449
|
+
# Otherwise, run the stdio protocol loop
|
|
1450
|
+
await handler.run()
|
|
1451
|
+
|
|
1452
|
+
|
|
1453
|
+
__all__ = ["stdio_cmd", "StdioProtocolHandler"]
|