ripperdoc 0.2.8__py3-none-any.whl → 0.2.10__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ripperdoc/__init__.py +1 -1
- ripperdoc/cli/cli.py +257 -123
- ripperdoc/cli/commands/__init__.py +2 -1
- ripperdoc/cli/commands/agents_cmd.py +138 -8
- ripperdoc/cli/commands/clear_cmd.py +9 -4
- ripperdoc/cli/commands/config_cmd.py +1 -1
- ripperdoc/cli/commands/context_cmd.py +3 -2
- ripperdoc/cli/commands/doctor_cmd.py +18 -4
- ripperdoc/cli/commands/exit_cmd.py +1 -0
- ripperdoc/cli/commands/hooks_cmd.py +27 -53
- ripperdoc/cli/commands/models_cmd.py +27 -10
- ripperdoc/cli/commands/permissions_cmd.py +27 -9
- ripperdoc/cli/commands/resume_cmd.py +9 -3
- ripperdoc/cli/commands/stats_cmd.py +244 -0
- ripperdoc/cli/commands/status_cmd.py +4 -4
- ripperdoc/cli/commands/tasks_cmd.py +8 -4
- ripperdoc/cli/ui/file_mention_completer.py +2 -1
- ripperdoc/cli/ui/interrupt_handler.py +2 -3
- ripperdoc/cli/ui/message_display.py +4 -2
- ripperdoc/cli/ui/panels.py +1 -0
- ripperdoc/cli/ui/provider_options.py +247 -0
- ripperdoc/cli/ui/rich_ui.py +403 -81
- ripperdoc/cli/ui/spinner.py +54 -18
- ripperdoc/cli/ui/thinking_spinner.py +1 -2
- ripperdoc/cli/ui/tool_renderers.py +8 -2
- ripperdoc/cli/ui/wizard.py +213 -0
- ripperdoc/core/agents.py +19 -6
- ripperdoc/core/config.py +51 -17
- ripperdoc/core/custom_commands.py +7 -6
- ripperdoc/core/default_tools.py +101 -12
- ripperdoc/core/hooks/config.py +1 -3
- ripperdoc/core/hooks/events.py +27 -28
- ripperdoc/core/hooks/executor.py +4 -6
- ripperdoc/core/hooks/integration.py +12 -21
- ripperdoc/core/hooks/llm_callback.py +59 -0
- ripperdoc/core/hooks/manager.py +40 -15
- ripperdoc/core/permissions.py +118 -12
- ripperdoc/core/providers/anthropic.py +109 -36
- ripperdoc/core/providers/gemini.py +70 -5
- ripperdoc/core/providers/openai.py +89 -24
- ripperdoc/core/query.py +273 -68
- ripperdoc/core/query_utils.py +2 -0
- ripperdoc/core/skills.py +9 -3
- ripperdoc/core/system_prompt.py +4 -2
- ripperdoc/core/tool.py +17 -8
- ripperdoc/sdk/client.py +79 -4
- ripperdoc/tools/ask_user_question_tool.py +5 -3
- ripperdoc/tools/background_shell.py +307 -135
- ripperdoc/tools/bash_output_tool.py +1 -1
- ripperdoc/tools/bash_tool.py +63 -24
- ripperdoc/tools/dynamic_mcp_tool.py +29 -8
- ripperdoc/tools/enter_plan_mode_tool.py +1 -1
- ripperdoc/tools/exit_plan_mode_tool.py +1 -1
- ripperdoc/tools/file_edit_tool.py +167 -54
- ripperdoc/tools/file_read_tool.py +28 -4
- ripperdoc/tools/file_write_tool.py +13 -10
- ripperdoc/tools/glob_tool.py +3 -2
- ripperdoc/tools/grep_tool.py +3 -2
- ripperdoc/tools/kill_bash_tool.py +1 -1
- ripperdoc/tools/ls_tool.py +1 -1
- ripperdoc/tools/lsp_tool.py +615 -0
- ripperdoc/tools/mcp_tools.py +13 -10
- ripperdoc/tools/multi_edit_tool.py +8 -7
- ripperdoc/tools/notebook_edit_tool.py +7 -4
- ripperdoc/tools/skill_tool.py +1 -1
- ripperdoc/tools/task_tool.py +519 -69
- ripperdoc/tools/todo_tool.py +2 -2
- ripperdoc/tools/tool_search_tool.py +3 -2
- ripperdoc/utils/conversation_compaction.py +9 -5
- ripperdoc/utils/file_watch.py +214 -5
- ripperdoc/utils/json_utils.py +2 -1
- ripperdoc/utils/lsp.py +806 -0
- ripperdoc/utils/mcp.py +11 -3
- ripperdoc/utils/memory.py +4 -2
- ripperdoc/utils/message_compaction.py +21 -7
- ripperdoc/utils/message_formatting.py +14 -7
- ripperdoc/utils/messages.py +126 -67
- ripperdoc/utils/path_ignore.py +35 -8
- ripperdoc/utils/permissions/path_validation_utils.py +2 -1
- ripperdoc/utils/permissions/shell_command_validation.py +427 -91
- ripperdoc/utils/permissions/tool_permission_utils.py +174 -15
- ripperdoc/utils/safe_get_cwd.py +2 -1
- ripperdoc/utils/session_heatmap.py +244 -0
- ripperdoc/utils/session_history.py +13 -6
- ripperdoc/utils/session_stats.py +293 -0
- ripperdoc/utils/todo.py +2 -1
- ripperdoc/utils/token_estimation.py +6 -1
- {ripperdoc-0.2.8.dist-info → ripperdoc-0.2.10.dist-info}/METADATA +8 -2
- ripperdoc-0.2.10.dist-info/RECORD +129 -0
- ripperdoc-0.2.8.dist-info/RECORD +0 -121
- {ripperdoc-0.2.8.dist-info → ripperdoc-0.2.10.dist-info}/WHEEL +0 -0
- {ripperdoc-0.2.8.dist-info → ripperdoc-0.2.10.dist-info}/entry_points.txt +0 -0
- {ripperdoc-0.2.8.dist-info → ripperdoc-0.2.10.dist-info}/licenses/LICENSE +0 -0
- {ripperdoc-0.2.8.dist-info → ripperdoc-0.2.10.dist-info}/top_level.txt +0 -0
ripperdoc/core/hooks/manager.py
CHANGED
|
@@ -6,7 +6,7 @@ throughout the application lifecycle.
|
|
|
6
6
|
|
|
7
7
|
import os
|
|
8
8
|
from pathlib import Path
|
|
9
|
-
from typing import Any,
|
|
9
|
+
from typing import Any, Dict, List, Optional
|
|
10
10
|
|
|
11
11
|
from ripperdoc.core.hooks.config import (
|
|
12
12
|
HooksConfig,
|
|
@@ -47,10 +47,7 @@ class HookResult:
|
|
|
47
47
|
@property
|
|
48
48
|
def should_block(self) -> bool:
|
|
49
49
|
"""Check if any hook returned a blocking decision."""
|
|
50
|
-
return any(
|
|
51
|
-
o.decision in (HookDecision.DENY, HookDecision.BLOCK)
|
|
52
|
-
for o in self.outputs
|
|
53
|
-
)
|
|
50
|
+
return any(o.decision in (HookDecision.DENY, HookDecision.BLOCK) for o in self.outputs)
|
|
54
51
|
|
|
55
52
|
@property
|
|
56
53
|
def should_allow(self) -> bool:
|
|
@@ -431,9 +428,7 @@ class HookManager:
|
|
|
431
428
|
|
|
432
429
|
# --- Notification ---
|
|
433
430
|
|
|
434
|
-
def run_notification(
|
|
435
|
-
self, message: str, notification_type: str = "info"
|
|
436
|
-
) -> HookResult:
|
|
431
|
+
def run_notification(self, message: str, notification_type: str = "info") -> HookResult:
|
|
437
432
|
"""Run Notification hooks synchronously.
|
|
438
433
|
|
|
439
434
|
Args:
|
|
@@ -478,11 +473,18 @@ class HookManager:
|
|
|
478
473
|
|
|
479
474
|
# --- Stop ---
|
|
480
475
|
|
|
481
|
-
def run_stop(
|
|
476
|
+
def run_stop(
|
|
477
|
+
self,
|
|
478
|
+
stop_hook_active: bool = False,
|
|
479
|
+
reason: Optional[str] = None,
|
|
480
|
+
stop_sequence: Optional[str] = None,
|
|
481
|
+
) -> HookResult:
|
|
482
482
|
"""Run Stop hooks synchronously.
|
|
483
483
|
|
|
484
484
|
Args:
|
|
485
485
|
stop_hook_active: True if already continuing from a stop hook
|
|
486
|
+
reason: Reason for stopping
|
|
487
|
+
stop_sequence: Stop sequence that triggered the stop
|
|
486
488
|
"""
|
|
487
489
|
hooks = self._get_hooks(HookEvent.STOP)
|
|
488
490
|
if not hooks:
|
|
@@ -490,6 +492,8 @@ class HookManager:
|
|
|
490
492
|
|
|
491
493
|
input_data = StopInput(
|
|
492
494
|
stop_hook_active=stop_hook_active,
|
|
495
|
+
reason=reason,
|
|
496
|
+
stop_sequence=stop_sequence,
|
|
493
497
|
session_id=self.session_id,
|
|
494
498
|
transcript_path=self.transcript_path,
|
|
495
499
|
cwd=self._get_cwd(),
|
|
@@ -499,7 +503,12 @@ class HookManager:
|
|
|
499
503
|
outputs = self.executor.execute_hooks_sync(hooks, input_data)
|
|
500
504
|
return HookResult(outputs)
|
|
501
505
|
|
|
502
|
-
async def run_stop_async(
|
|
506
|
+
async def run_stop_async(
|
|
507
|
+
self,
|
|
508
|
+
stop_hook_active: bool = False,
|
|
509
|
+
reason: Optional[str] = None,
|
|
510
|
+
stop_sequence: Optional[str] = None,
|
|
511
|
+
) -> HookResult:
|
|
503
512
|
"""Run Stop hooks asynchronously."""
|
|
504
513
|
hooks = self._get_hooks(HookEvent.STOP)
|
|
505
514
|
if not hooks:
|
|
@@ -507,6 +516,8 @@ class HookManager:
|
|
|
507
516
|
|
|
508
517
|
input_data = StopInput(
|
|
509
518
|
stop_hook_active=stop_hook_active,
|
|
519
|
+
reason=reason,
|
|
520
|
+
stop_sequence=stop_sequence,
|
|
510
521
|
session_id=self.session_id,
|
|
511
522
|
transcript_path=self.transcript_path,
|
|
512
523
|
cwd=self._get_cwd(),
|
|
@@ -558,9 +569,7 @@ class HookManager:
|
|
|
558
569
|
|
|
559
570
|
# --- Pre Compact ---
|
|
560
571
|
|
|
561
|
-
def run_pre_compact(
|
|
562
|
-
self, trigger: str, custom_instructions: str = ""
|
|
563
|
-
) -> HookResult:
|
|
572
|
+
def run_pre_compact(self, trigger: str, custom_instructions: str = "") -> HookResult:
|
|
564
573
|
"""Run PreCompact hooks synchronously.
|
|
565
574
|
|
|
566
575
|
Args:
|
|
@@ -645,11 +654,18 @@ class HookManager:
|
|
|
645
654
|
|
|
646
655
|
# --- Session End ---
|
|
647
656
|
|
|
648
|
-
def run_session_end(
|
|
657
|
+
def run_session_end(
|
|
658
|
+
self,
|
|
659
|
+
reason: str,
|
|
660
|
+
duration_seconds: Optional[float] = None,
|
|
661
|
+
message_count: Optional[int] = None,
|
|
662
|
+
) -> HookResult:
|
|
649
663
|
"""Run SessionEnd hooks synchronously.
|
|
650
664
|
|
|
651
665
|
Args:
|
|
652
666
|
reason: "clear", "logout", "prompt_input_exit", or "other"
|
|
667
|
+
duration_seconds: How long the session lasted
|
|
668
|
+
message_count: Number of messages in the session
|
|
653
669
|
"""
|
|
654
670
|
hooks = self._get_hooks(HookEvent.SESSION_END)
|
|
655
671
|
if not hooks:
|
|
@@ -657,6 +673,8 @@ class HookManager:
|
|
|
657
673
|
|
|
658
674
|
input_data = SessionEndInput(
|
|
659
675
|
reason=reason,
|
|
676
|
+
duration_seconds=duration_seconds,
|
|
677
|
+
message_count=message_count,
|
|
660
678
|
session_id=self.session_id,
|
|
661
679
|
transcript_path=self.transcript_path,
|
|
662
680
|
cwd=self._get_cwd(),
|
|
@@ -666,7 +684,12 @@ class HookManager:
|
|
|
666
684
|
outputs = self.executor.execute_hooks_sync(hooks, input_data)
|
|
667
685
|
return HookResult(outputs)
|
|
668
686
|
|
|
669
|
-
async def run_session_end_async(
|
|
687
|
+
async def run_session_end_async(
|
|
688
|
+
self,
|
|
689
|
+
reason: str,
|
|
690
|
+
duration_seconds: Optional[float] = None,
|
|
691
|
+
message_count: Optional[int] = None,
|
|
692
|
+
) -> HookResult:
|
|
670
693
|
"""Run SessionEnd hooks asynchronously."""
|
|
671
694
|
hooks = self._get_hooks(HookEvent.SESSION_END)
|
|
672
695
|
if not hooks:
|
|
@@ -674,6 +697,8 @@ class HookManager:
|
|
|
674
697
|
|
|
675
698
|
input_data = SessionEndInput(
|
|
676
699
|
reason=reason,
|
|
700
|
+
duration_seconds=duration_seconds,
|
|
701
|
+
message_count=message_count,
|
|
677
702
|
session_id=self.session_id,
|
|
678
703
|
transcript_path=self.transcript_path,
|
|
679
704
|
cwd=self._get_cwd(),
|
ripperdoc/core/permissions.py
CHANGED
|
@@ -9,6 +9,7 @@ from pathlib import Path
|
|
|
9
9
|
from typing import Any, Awaitable, Callable, Optional, Set
|
|
10
10
|
|
|
11
11
|
from ripperdoc.core.config import config_manager
|
|
12
|
+
from ripperdoc.core.hooks.manager import hook_manager
|
|
12
13
|
from ripperdoc.core.tool import Tool
|
|
13
14
|
from ripperdoc.utils.permissions import PermissionDecision, ToolRule
|
|
14
15
|
from ripperdoc.utils.log import get_logger
|
|
@@ -26,8 +27,29 @@ class PermissionResult:
|
|
|
26
27
|
decision: Optional[PermissionDecision] = None
|
|
27
28
|
|
|
28
29
|
|
|
29
|
-
def _format_input_preview(parsed_input: Any) -> str:
|
|
30
|
-
"""Create a
|
|
30
|
+
def _format_input_preview(parsed_input: Any, tool_name: Optional[str] = None) -> str:
|
|
31
|
+
"""Create a human-friendly preview for prompts.
|
|
32
|
+
|
|
33
|
+
For Bash commands, shows full details for security review.
|
|
34
|
+
For other tools, shows a concise preview.
|
|
35
|
+
"""
|
|
36
|
+
# For Bash tool, show full command details for security review
|
|
37
|
+
if tool_name == "Bash" and hasattr(parsed_input, "command"):
|
|
38
|
+
lines = [f"Command: {getattr(parsed_input, 'command')}"]
|
|
39
|
+
|
|
40
|
+
# Add other relevant parameters
|
|
41
|
+
if hasattr(parsed_input, "timeout") and parsed_input.timeout:
|
|
42
|
+
lines.append(f"Timeout: {parsed_input.timeout}ms")
|
|
43
|
+
if hasattr(parsed_input, "sandbox"):
|
|
44
|
+
lines.append(f"Sandbox: {parsed_input.sandbox}")
|
|
45
|
+
if hasattr(parsed_input, "run_in_background"):
|
|
46
|
+
lines.append(f"Background: {parsed_input.run_in_background}")
|
|
47
|
+
if hasattr(parsed_input, "shell_executable") and parsed_input.shell_executable:
|
|
48
|
+
lines.append(f"Shell: {parsed_input.shell_executable}")
|
|
49
|
+
|
|
50
|
+
return "\n ".join(lines)
|
|
51
|
+
|
|
52
|
+
# For other tools with commands, show concise preview
|
|
31
53
|
if hasattr(parsed_input, "command"):
|
|
32
54
|
return f"command='{getattr(parsed_input, 'command')}'"
|
|
33
55
|
if hasattr(parsed_input, "file_path"):
|
|
@@ -94,10 +116,13 @@ def _rule_strings(rule_suggestions: Optional[Any]) -> list[str]:
|
|
|
94
116
|
|
|
95
117
|
def make_permission_checker(
|
|
96
118
|
project_path: Path,
|
|
97
|
-
|
|
119
|
+
yolo_mode: bool,
|
|
98
120
|
prompt_fn: Optional[Callable[[str], str]] = None,
|
|
99
121
|
) -> Callable[[Tool[Any, Any], Any], Awaitable[PermissionResult]]:
|
|
100
|
-
"""Create a permission checking function for the current project.
|
|
122
|
+
"""Create a permission checking function for the current project.
|
|
123
|
+
|
|
124
|
+
In yolo mode, all tool calls are allowed without prompting.
|
|
125
|
+
"""
|
|
101
126
|
|
|
102
127
|
project_path = project_path.resolve()
|
|
103
128
|
config_manager.get_project_config(project_path)
|
|
@@ -120,17 +145,22 @@ def make_permission_checker(
|
|
|
120
145
|
"""Check and optionally persist permission for a tool invocation."""
|
|
121
146
|
config = config_manager.get_project_config(project_path)
|
|
122
147
|
|
|
123
|
-
if
|
|
148
|
+
if yolo_mode:
|
|
124
149
|
return PermissionResult(result=True)
|
|
125
150
|
|
|
126
151
|
try:
|
|
127
|
-
|
|
128
|
-
|
|
152
|
+
needs_permission = True
|
|
153
|
+
if hasattr(tool, "needs_permissions"):
|
|
154
|
+
needs_permission = tool.needs_permissions(parsed_input)
|
|
129
155
|
except (TypeError, AttributeError, ValueError) as exc:
|
|
130
156
|
# Tool implementation error - log and deny for safety
|
|
131
157
|
logger.warning(
|
|
132
158
|
"[permissions] Tool needs_permissions check failed",
|
|
133
|
-
extra={
|
|
159
|
+
extra={
|
|
160
|
+
"tool": getattr(tool, "name", None),
|
|
161
|
+
"error": str(exc),
|
|
162
|
+
"error_type": type(exc).__name__,
|
|
163
|
+
},
|
|
134
164
|
)
|
|
135
165
|
return PermissionResult(
|
|
136
166
|
result=False,
|
|
@@ -138,10 +168,25 @@ def make_permission_checker(
|
|
|
138
168
|
)
|
|
139
169
|
|
|
140
170
|
allowed_tools = set(config.allowed_tools or [])
|
|
171
|
+
|
|
172
|
+
global_config = config_manager.get_global_config()
|
|
173
|
+
local_config = config_manager.get_project_local_config(project_path)
|
|
174
|
+
|
|
141
175
|
allow_rules = {
|
|
142
|
-
"Bash":
|
|
176
|
+
"Bash": (
|
|
177
|
+
set(config.bash_allow_rules or [])
|
|
178
|
+
| set(global_config.user_allow_rules or [])
|
|
179
|
+
| set(local_config.local_allow_rules or [])
|
|
180
|
+
| session_tool_rules.get("Bash", set())
|
|
181
|
+
)
|
|
182
|
+
}
|
|
183
|
+
deny_rules = {
|
|
184
|
+
"Bash": (
|
|
185
|
+
set(config.bash_deny_rules or [])
|
|
186
|
+
| set(global_config.user_deny_rules or [])
|
|
187
|
+
| set(local_config.local_deny_rules or [])
|
|
188
|
+
)
|
|
143
189
|
}
|
|
144
|
-
deny_rules = {"Bash": set(config.bash_deny_rules or [])}
|
|
145
190
|
allowed_working_dirs = {
|
|
146
191
|
str(project_path.resolve()),
|
|
147
192
|
*[str(Path(p).resolve()) for p in config.working_directories or []],
|
|
@@ -172,7 +217,11 @@ def make_permission_checker(
|
|
|
172
217
|
# Tool implementation error - fall back to asking user
|
|
173
218
|
logger.warning(
|
|
174
219
|
"[permissions] Tool check_permissions failed",
|
|
175
|
-
extra={
|
|
220
|
+
extra={
|
|
221
|
+
"tool": getattr(tool, "name", None),
|
|
222
|
+
"error": str(exc),
|
|
223
|
+
"error_type": type(exc).__name__,
|
|
224
|
+
},
|
|
176
225
|
)
|
|
177
226
|
decision = PermissionDecision(
|
|
178
227
|
behavior="ask",
|
|
@@ -187,6 +236,22 @@ def make_permission_checker(
|
|
|
187
236
|
rule_suggestions=[ToolRule(tool_name=tool.name, rule_content=tool.name)],
|
|
188
237
|
)
|
|
189
238
|
|
|
239
|
+
# If tool doesn't normally require permission (e.g., read-only Bash),
|
|
240
|
+
# enforce deny rules but otherwise skip prompting.
|
|
241
|
+
if not needs_permission:
|
|
242
|
+
if decision.behavior == "deny":
|
|
243
|
+
return PermissionResult(
|
|
244
|
+
result=False,
|
|
245
|
+
message=decision.message or f"Permission denied for tool '{tool.name}'.",
|
|
246
|
+
decision=decision,
|
|
247
|
+
)
|
|
248
|
+
return PermissionResult(
|
|
249
|
+
result=True,
|
|
250
|
+
message=decision.message,
|
|
251
|
+
updated_input=decision.updated_input,
|
|
252
|
+
decision=decision,
|
|
253
|
+
)
|
|
254
|
+
|
|
190
255
|
if decision.behavior == "allow":
|
|
191
256
|
return PermissionResult(
|
|
192
257
|
result=True,
|
|
@@ -203,7 +268,48 @@ def make_permission_checker(
|
|
|
203
268
|
)
|
|
204
269
|
|
|
205
270
|
# Ask/passthrough flows prompt the user.
|
|
206
|
-
|
|
271
|
+
tool_input_dict = (
|
|
272
|
+
parsed_input.model_dump()
|
|
273
|
+
if hasattr(parsed_input, "model_dump")
|
|
274
|
+
else dict(parsed_input)
|
|
275
|
+
if isinstance(parsed_input, dict)
|
|
276
|
+
else {}
|
|
277
|
+
)
|
|
278
|
+
try:
|
|
279
|
+
hook_result = await hook_manager.run_permission_request_async(
|
|
280
|
+
tool.name, tool_input_dict
|
|
281
|
+
)
|
|
282
|
+
if hook_result.outputs:
|
|
283
|
+
updated_input = hook_result.updated_input or decision.updated_input
|
|
284
|
+
if hook_result.should_allow:
|
|
285
|
+
return PermissionResult(
|
|
286
|
+
result=True,
|
|
287
|
+
message=decision.message,
|
|
288
|
+
updated_input=updated_input,
|
|
289
|
+
decision=decision,
|
|
290
|
+
)
|
|
291
|
+
if hook_result.should_block or not hook_result.should_continue:
|
|
292
|
+
reason = (
|
|
293
|
+
hook_result.block_reason
|
|
294
|
+
or hook_result.stop_reason
|
|
295
|
+
or decision.message
|
|
296
|
+
or f"Permission denied for tool '{tool.name}'."
|
|
297
|
+
)
|
|
298
|
+
return PermissionResult(
|
|
299
|
+
result=False,
|
|
300
|
+
message=reason,
|
|
301
|
+
updated_input=updated_input,
|
|
302
|
+
decision=decision,
|
|
303
|
+
)
|
|
304
|
+
except (RuntimeError, ValueError, TypeError, OSError) as exc:
|
|
305
|
+
logger.warning(
|
|
306
|
+
"[permissions] PermissionRequest hook failed: %s: %s",
|
|
307
|
+
type(exc).__name__,
|
|
308
|
+
exc,
|
|
309
|
+
extra={"tool": getattr(tool, "name", None)},
|
|
310
|
+
)
|
|
311
|
+
|
|
312
|
+
input_preview = _format_input_preview(parsed_input, tool_name=tool.name)
|
|
207
313
|
prompt_lines = [
|
|
208
314
|
f"{tool.name}",
|
|
209
315
|
"",
|
|
@@ -3,6 +3,7 @@
|
|
|
3
3
|
from __future__ import annotations
|
|
4
4
|
|
|
5
5
|
import asyncio
|
|
6
|
+
import json
|
|
6
7
|
import time
|
|
7
8
|
from typing import Any, Awaitable, Callable, Dict, List, Optional
|
|
8
9
|
from uuid import uuid4
|
|
@@ -73,17 +74,21 @@ def _content_blocks_from_stream_state(
|
|
|
73
74
|
|
|
74
75
|
# Add thinking block if present
|
|
75
76
|
if collected_thinking:
|
|
76
|
-
blocks.append(
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
77
|
+
blocks.append(
|
|
78
|
+
{
|
|
79
|
+
"type": "thinking",
|
|
80
|
+
"thinking": "".join(collected_thinking),
|
|
81
|
+
}
|
|
82
|
+
)
|
|
80
83
|
|
|
81
84
|
# Add text block if present
|
|
82
85
|
if collected_text:
|
|
83
|
-
blocks.append(
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
86
|
+
blocks.append(
|
|
87
|
+
{
|
|
88
|
+
"type": "text",
|
|
89
|
+
"text": "".join(collected_text),
|
|
90
|
+
}
|
|
91
|
+
)
|
|
87
92
|
|
|
88
93
|
# Add tool_use blocks
|
|
89
94
|
for idx in sorted(collected_tool_calls.keys()):
|
|
@@ -92,12 +97,14 @@ def _content_blocks_from_stream_state(
|
|
|
92
97
|
if not name:
|
|
93
98
|
continue
|
|
94
99
|
tool_use_id = call.get("id") or str(uuid4())
|
|
95
|
-
blocks.append(
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
100
|
+
blocks.append(
|
|
101
|
+
{
|
|
102
|
+
"type": "tool_use",
|
|
103
|
+
"tool_use_id": tool_use_id,
|
|
104
|
+
"name": name,
|
|
105
|
+
"input": call.get("input", {}),
|
|
106
|
+
}
|
|
107
|
+
)
|
|
101
108
|
|
|
102
109
|
return blocks
|
|
103
110
|
|
|
@@ -110,25 +117,31 @@ def _content_blocks_from_response(response: Any) -> List[Dict[str, Any]]:
|
|
|
110
117
|
if btype == "text":
|
|
111
118
|
blocks.append({"type": "text", "text": getattr(block, "text", "")})
|
|
112
119
|
elif btype == "thinking":
|
|
113
|
-
blocks.append(
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
120
|
+
blocks.append(
|
|
121
|
+
{
|
|
122
|
+
"type": "thinking",
|
|
123
|
+
"thinking": getattr(block, "thinking", None) or "",
|
|
124
|
+
"signature": getattr(block, "signature", None),
|
|
125
|
+
}
|
|
126
|
+
)
|
|
118
127
|
elif btype == "redacted_thinking":
|
|
119
|
-
blocks.append(
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
128
|
+
blocks.append(
|
|
129
|
+
{
|
|
130
|
+
"type": "redacted_thinking",
|
|
131
|
+
"data": getattr(block, "data", None),
|
|
132
|
+
"signature": getattr(block, "signature", None),
|
|
133
|
+
}
|
|
134
|
+
)
|
|
124
135
|
elif btype == "tool_use":
|
|
125
136
|
raw_input = getattr(block, "input", {}) or {}
|
|
126
|
-
blocks.append(
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
137
|
+
blocks.append(
|
|
138
|
+
{
|
|
139
|
+
"type": "tool_use",
|
|
140
|
+
"tool_use_id": getattr(block, "id", None) or str(uuid4()),
|
|
141
|
+
"name": getattr(block, "name", None),
|
|
142
|
+
"input": raw_input if isinstance(raw_input, dict) else {},
|
|
143
|
+
}
|
|
144
|
+
)
|
|
132
145
|
return blocks
|
|
133
146
|
|
|
134
147
|
|
|
@@ -188,6 +201,15 @@ class AnthropicClient(ProviderClient):
|
|
|
188
201
|
except Exception as exc:
|
|
189
202
|
duration_ms = (time.time() - start_time) * 1000
|
|
190
203
|
error_code, error_message = _classify_anthropic_error(exc)
|
|
204
|
+
logger.debug(
|
|
205
|
+
"[anthropic_client] Exception details",
|
|
206
|
+
extra={
|
|
207
|
+
"model": model_profile.model,
|
|
208
|
+
"exception_type": type(exc).__name__,
|
|
209
|
+
"exception_str": str(exc),
|
|
210
|
+
"error_code": error_code,
|
|
211
|
+
},
|
|
212
|
+
)
|
|
191
213
|
logger.error(
|
|
192
214
|
"[anthropic_client] API call failed",
|
|
193
215
|
extra={
|
|
@@ -222,6 +244,17 @@ class AnthropicClient(ProviderClient):
|
|
|
222
244
|
tool_schemas = await build_anthropic_tool_schemas(tools)
|
|
223
245
|
response_metadata: Dict[str, Any] = {}
|
|
224
246
|
|
|
247
|
+
logger.debug(
|
|
248
|
+
"[anthropic_client] Preparing request",
|
|
249
|
+
extra={
|
|
250
|
+
"model": model_profile.model,
|
|
251
|
+
"tool_mode": tool_mode,
|
|
252
|
+
"stream": stream,
|
|
253
|
+
"max_thinking_tokens": max_thinking_tokens,
|
|
254
|
+
"num_tools": len(tool_schemas),
|
|
255
|
+
},
|
|
256
|
+
)
|
|
257
|
+
|
|
225
258
|
anthropic_kwargs: Dict[str, Any] = {}
|
|
226
259
|
if model_profile.api_base:
|
|
227
260
|
anthropic_kwargs["base_url"] = model_profile.api_base
|
|
@@ -239,9 +272,9 @@ class AnthropicClient(ProviderClient):
|
|
|
239
272
|
# The read timeout applies to waiting for each chunk from the server
|
|
240
273
|
timeout_config = httpx.Timeout(
|
|
241
274
|
connect=60.0, # 60 seconds to establish connection
|
|
242
|
-
read=600.0,
|
|
243
|
-
write=60.0,
|
|
244
|
-
pool=60.0,
|
|
275
|
+
read=600.0, # 10 minutes to wait for each chunk (model may be thinking)
|
|
276
|
+
write=60.0, # 60 seconds to send request
|
|
277
|
+
pool=60.0, # 60 seconds to get connection from pool
|
|
245
278
|
)
|
|
246
279
|
anthropic_kwargs["timeout"] = timeout_config
|
|
247
280
|
elif request_timeout and request_timeout > 0:
|
|
@@ -267,6 +300,21 @@ class AnthropicClient(ProviderClient):
|
|
|
267
300
|
if thinking_payload:
|
|
268
301
|
request_kwargs["thinking"] = thinking_payload
|
|
269
302
|
|
|
303
|
+
logger.debug(
|
|
304
|
+
"[anthropic_client] Request parameters",
|
|
305
|
+
extra={
|
|
306
|
+
"model": model_profile.model,
|
|
307
|
+
"request_kwargs": json.dumps(
|
|
308
|
+
{k: v for k, v in request_kwargs.items() if k != "messages"},
|
|
309
|
+
ensure_ascii=False,
|
|
310
|
+
default=str,
|
|
311
|
+
)[:1000],
|
|
312
|
+
"thinking_payload": json.dumps(thinking_payload, ensure_ascii=False)
|
|
313
|
+
if thinking_payload
|
|
314
|
+
else None,
|
|
315
|
+
},
|
|
316
|
+
)
|
|
317
|
+
|
|
270
318
|
async with await self._client(anthropic_kwargs) as client:
|
|
271
319
|
if stream:
|
|
272
320
|
# Streaming mode: use event-based streaming with per-token timeout
|
|
@@ -294,6 +342,16 @@ class AnthropicClient(ProviderClient):
|
|
|
294
342
|
model_profile.model, duration_ms=duration_ms, cost_usd=cost_usd, **usage_tokens
|
|
295
343
|
)
|
|
296
344
|
|
|
345
|
+
logger.debug(
|
|
346
|
+
"[anthropic_client] Response content blocks",
|
|
347
|
+
extra={
|
|
348
|
+
"model": model_profile.model,
|
|
349
|
+
"content_blocks": json.dumps(content_blocks, ensure_ascii=False)[:1000],
|
|
350
|
+
"usage_tokens": json.dumps(usage_tokens, ensure_ascii=False),
|
|
351
|
+
"metadata": json.dumps(response_metadata, ensure_ascii=False)[:500],
|
|
352
|
+
},
|
|
353
|
+
)
|
|
354
|
+
|
|
297
355
|
logger.info(
|
|
298
356
|
"[anthropic_client] Response received",
|
|
299
357
|
extra={
|
|
@@ -354,6 +412,13 @@ class AnthropicClient(ProviderClient):
|
|
|
354
412
|
event_count = 0
|
|
355
413
|
message_stop_received = False
|
|
356
414
|
|
|
415
|
+
logger.debug(
|
|
416
|
+
"[anthropic_client] Initiating stream request",
|
|
417
|
+
extra={
|
|
418
|
+
"model": request_kwargs.get("model"),
|
|
419
|
+
},
|
|
420
|
+
)
|
|
421
|
+
|
|
357
422
|
# Create the stream - this initiates the connection
|
|
358
423
|
stream = client.messages.stream(**request_kwargs)
|
|
359
424
|
|
|
@@ -448,7 +513,12 @@ class AnthropicClient(ProviderClient):
|
|
|
448
513
|
else:
|
|
449
514
|
raise
|
|
450
515
|
|
|
451
|
-
if
|
|
516
|
+
if (
|
|
517
|
+
last_error
|
|
518
|
+
and not collected_text
|
|
519
|
+
and not collected_thinking
|
|
520
|
+
and not collected_tool_calls
|
|
521
|
+
):
|
|
452
522
|
raise RuntimeError(f"Stream failed after {attempts} attempts") from last_error
|
|
453
523
|
|
|
454
524
|
# Store reasoning content in metadata
|
|
@@ -542,7 +612,8 @@ class AnthropicClient(ProviderClient):
|
|
|
542
612
|
except (RuntimeError, ValueError, TypeError, OSError) as cb_exc:
|
|
543
613
|
logger.warning(
|
|
544
614
|
"[anthropic_client] Progress callback failed: %s: %s",
|
|
545
|
-
type(cb_exc).__name__,
|
|
615
|
+
type(cb_exc).__name__,
|
|
616
|
+
cb_exc,
|
|
546
617
|
)
|
|
547
618
|
|
|
548
619
|
elif delta_type == "text_delta":
|
|
@@ -556,7 +627,8 @@ class AnthropicClient(ProviderClient):
|
|
|
556
627
|
except (RuntimeError, ValueError, TypeError, OSError) as cb_exc:
|
|
557
628
|
logger.warning(
|
|
558
629
|
"[anthropic_client] Progress callback failed: %s: %s",
|
|
559
|
-
type(cb_exc).__name__,
|
|
630
|
+
type(cb_exc).__name__,
|
|
631
|
+
cb_exc,
|
|
560
632
|
)
|
|
561
633
|
|
|
562
634
|
elif delta_type == "input_json_delta":
|
|
@@ -599,6 +671,7 @@ class AnthropicClient(ProviderClient):
|
|
|
599
671
|
# Parse accumulated JSON for tool calls
|
|
600
672
|
if index in collected_tool_calls:
|
|
601
673
|
import json
|
|
674
|
+
|
|
602
675
|
json_str = collected_tool_calls[index].get("input_json", "")
|
|
603
676
|
if json_str:
|
|
604
677
|
try:
|