ripperdoc 0.2.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (107) hide show
  1. ripperdoc/__init__.py +3 -0
  2. ripperdoc/__main__.py +20 -0
  3. ripperdoc/cli/__init__.py +1 -0
  4. ripperdoc/cli/cli.py +405 -0
  5. ripperdoc/cli/commands/__init__.py +82 -0
  6. ripperdoc/cli/commands/agents_cmd.py +263 -0
  7. ripperdoc/cli/commands/base.py +19 -0
  8. ripperdoc/cli/commands/clear_cmd.py +18 -0
  9. ripperdoc/cli/commands/compact_cmd.py +23 -0
  10. ripperdoc/cli/commands/config_cmd.py +31 -0
  11. ripperdoc/cli/commands/context_cmd.py +144 -0
  12. ripperdoc/cli/commands/cost_cmd.py +82 -0
  13. ripperdoc/cli/commands/doctor_cmd.py +221 -0
  14. ripperdoc/cli/commands/exit_cmd.py +19 -0
  15. ripperdoc/cli/commands/help_cmd.py +20 -0
  16. ripperdoc/cli/commands/mcp_cmd.py +70 -0
  17. ripperdoc/cli/commands/memory_cmd.py +202 -0
  18. ripperdoc/cli/commands/models_cmd.py +413 -0
  19. ripperdoc/cli/commands/permissions_cmd.py +302 -0
  20. ripperdoc/cli/commands/resume_cmd.py +98 -0
  21. ripperdoc/cli/commands/status_cmd.py +167 -0
  22. ripperdoc/cli/commands/tasks_cmd.py +278 -0
  23. ripperdoc/cli/commands/todos_cmd.py +69 -0
  24. ripperdoc/cli/commands/tools_cmd.py +19 -0
  25. ripperdoc/cli/ui/__init__.py +1 -0
  26. ripperdoc/cli/ui/context_display.py +298 -0
  27. ripperdoc/cli/ui/helpers.py +22 -0
  28. ripperdoc/cli/ui/rich_ui.py +1557 -0
  29. ripperdoc/cli/ui/spinner.py +49 -0
  30. ripperdoc/cli/ui/thinking_spinner.py +128 -0
  31. ripperdoc/cli/ui/tool_renderers.py +298 -0
  32. ripperdoc/core/__init__.py +1 -0
  33. ripperdoc/core/agents.py +486 -0
  34. ripperdoc/core/commands.py +33 -0
  35. ripperdoc/core/config.py +559 -0
  36. ripperdoc/core/default_tools.py +88 -0
  37. ripperdoc/core/permissions.py +252 -0
  38. ripperdoc/core/providers/__init__.py +47 -0
  39. ripperdoc/core/providers/anthropic.py +250 -0
  40. ripperdoc/core/providers/base.py +265 -0
  41. ripperdoc/core/providers/gemini.py +615 -0
  42. ripperdoc/core/providers/openai.py +487 -0
  43. ripperdoc/core/query.py +1058 -0
  44. ripperdoc/core/query_utils.py +622 -0
  45. ripperdoc/core/skills.py +295 -0
  46. ripperdoc/core/system_prompt.py +431 -0
  47. ripperdoc/core/tool.py +240 -0
  48. ripperdoc/sdk/__init__.py +9 -0
  49. ripperdoc/sdk/client.py +333 -0
  50. ripperdoc/tools/__init__.py +1 -0
  51. ripperdoc/tools/ask_user_question_tool.py +431 -0
  52. ripperdoc/tools/background_shell.py +389 -0
  53. ripperdoc/tools/bash_output_tool.py +98 -0
  54. ripperdoc/tools/bash_tool.py +1016 -0
  55. ripperdoc/tools/dynamic_mcp_tool.py +428 -0
  56. ripperdoc/tools/enter_plan_mode_tool.py +226 -0
  57. ripperdoc/tools/exit_plan_mode_tool.py +153 -0
  58. ripperdoc/tools/file_edit_tool.py +346 -0
  59. ripperdoc/tools/file_read_tool.py +203 -0
  60. ripperdoc/tools/file_write_tool.py +205 -0
  61. ripperdoc/tools/glob_tool.py +179 -0
  62. ripperdoc/tools/grep_tool.py +370 -0
  63. ripperdoc/tools/kill_bash_tool.py +136 -0
  64. ripperdoc/tools/ls_tool.py +471 -0
  65. ripperdoc/tools/mcp_tools.py +591 -0
  66. ripperdoc/tools/multi_edit_tool.py +456 -0
  67. ripperdoc/tools/notebook_edit_tool.py +386 -0
  68. ripperdoc/tools/skill_tool.py +205 -0
  69. ripperdoc/tools/task_tool.py +379 -0
  70. ripperdoc/tools/todo_tool.py +494 -0
  71. ripperdoc/tools/tool_search_tool.py +380 -0
  72. ripperdoc/utils/__init__.py +1 -0
  73. ripperdoc/utils/bash_constants.py +51 -0
  74. ripperdoc/utils/bash_output_utils.py +43 -0
  75. ripperdoc/utils/coerce.py +34 -0
  76. ripperdoc/utils/context_length_errors.py +252 -0
  77. ripperdoc/utils/exit_code_handlers.py +241 -0
  78. ripperdoc/utils/file_watch.py +135 -0
  79. ripperdoc/utils/git_utils.py +274 -0
  80. ripperdoc/utils/json_utils.py +27 -0
  81. ripperdoc/utils/log.py +176 -0
  82. ripperdoc/utils/mcp.py +560 -0
  83. ripperdoc/utils/memory.py +253 -0
  84. ripperdoc/utils/message_compaction.py +676 -0
  85. ripperdoc/utils/messages.py +519 -0
  86. ripperdoc/utils/output_utils.py +258 -0
  87. ripperdoc/utils/path_ignore.py +677 -0
  88. ripperdoc/utils/path_utils.py +46 -0
  89. ripperdoc/utils/permissions/__init__.py +27 -0
  90. ripperdoc/utils/permissions/path_validation_utils.py +174 -0
  91. ripperdoc/utils/permissions/shell_command_validation.py +552 -0
  92. ripperdoc/utils/permissions/tool_permission_utils.py +279 -0
  93. ripperdoc/utils/prompt.py +17 -0
  94. ripperdoc/utils/safe_get_cwd.py +31 -0
  95. ripperdoc/utils/sandbox_utils.py +38 -0
  96. ripperdoc/utils/session_history.py +260 -0
  97. ripperdoc/utils/session_usage.py +117 -0
  98. ripperdoc/utils/shell_token_utils.py +95 -0
  99. ripperdoc/utils/shell_utils.py +159 -0
  100. ripperdoc/utils/todo.py +203 -0
  101. ripperdoc/utils/token_estimation.py +34 -0
  102. ripperdoc-0.2.6.dist-info/METADATA +193 -0
  103. ripperdoc-0.2.6.dist-info/RECORD +107 -0
  104. ripperdoc-0.2.6.dist-info/WHEEL +5 -0
  105. ripperdoc-0.2.6.dist-info/entry_points.txt +3 -0
  106. ripperdoc-0.2.6.dist-info/licenses/LICENSE +53 -0
  107. ripperdoc-0.2.6.dist-info/top_level.txt +1 -0
@@ -0,0 +1,252 @@
1
+ """Permission handling for tool execution."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import asyncio
6
+ from collections import defaultdict
7
+ from dataclasses import dataclass
8
+ from pathlib import Path
9
+ from typing import Any, Awaitable, Callable, Optional, Set
10
+
11
+ from ripperdoc.core.config import config_manager
12
+ from ripperdoc.core.tool import Tool
13
+ from ripperdoc.utils.permissions import PermissionDecision, ToolRule
14
+ from ripperdoc.utils.log import get_logger
15
+
16
+ logger = get_logger()
17
+
18
+
19
+ @dataclass
20
+ class PermissionResult:
21
+ """Result of a permission check."""
22
+
23
+ result: bool
24
+ message: Optional[str] = None
25
+ updated_input: Any = None
26
+ decision: Optional[PermissionDecision] = None
27
+
28
+
29
+ def _format_input_preview(parsed_input: Any) -> str:
30
+ """Create a short, human-friendly preview for prompts."""
31
+ if hasattr(parsed_input, "command"):
32
+ return f"command='{getattr(parsed_input, 'command')}'"
33
+ if hasattr(parsed_input, "file_path"):
34
+ return f"file='{getattr(parsed_input, 'file_path')}'"
35
+ if hasattr(parsed_input, "path"):
36
+ return f"path='{getattr(parsed_input, 'path')}'"
37
+
38
+ preview = str(parsed_input)
39
+ if len(preview) > 140:
40
+ return preview[:137] + "..."
41
+ return preview
42
+
43
+
44
+ def permission_key(tool: Tool[Any, Any], parsed_input: Any) -> str:
45
+ """Build a stable permission key for persistence."""
46
+ if hasattr(parsed_input, "command"):
47
+ return f"{tool.name}::command::{getattr(parsed_input, 'command')}"
48
+ if hasattr(parsed_input, "file_path"):
49
+ try:
50
+ return f"{tool.name}::path::{Path(getattr(parsed_input, 'file_path')).resolve()}"
51
+ except (OSError, RuntimeError) as exc:
52
+ logger.warning(
53
+ "[permissions] Failed to resolve file_path for permission key",
54
+ extra={"tool": getattr(tool, "name", None), "error": str(exc)},
55
+ )
56
+ return f"{tool.name}::path::{getattr(parsed_input, 'file_path')}"
57
+ if hasattr(parsed_input, "path"):
58
+ try:
59
+ return f"{tool.name}::path::{Path(getattr(parsed_input, 'path')).resolve()}"
60
+ except (OSError, RuntimeError) as exc:
61
+ logger.warning(
62
+ "[permissions] Failed to resolve path for permission key",
63
+ extra={"tool": getattr(tool, "name", None), "error": str(exc)},
64
+ )
65
+ return f"{tool.name}::path::{getattr(parsed_input, 'path')}"
66
+ return tool.name
67
+
68
+
69
+ def _render_options_prompt(prompt: str, options: list[tuple[str, str]]) -> str:
70
+ """Render a simple numbered prompt."""
71
+ border = "─" * 120
72
+ lines = [border, prompt, ""]
73
+ for idx, (_, label) in enumerate(options, start=1):
74
+ prefix = "❯" if idx == 1 else " "
75
+ lines.append(f"{prefix} {idx}. {label}")
76
+ numeric_choices = "/".join(str(i) for i in range(1, len(options) + 1))
77
+ shortcut_choices = "/".join(opt[0] for opt in options)
78
+ lines.append(f"Choice ({numeric_choices} or {shortcut_choices}): ")
79
+ return "\n".join(lines)
80
+
81
+
82
+ def _rule_strings(rule_suggestions: Optional[Any]) -> list[str]:
83
+ """Normalize rule suggestions to simple strings."""
84
+ if not rule_suggestions:
85
+ return []
86
+ rules: list[str] = []
87
+ for suggestion in rule_suggestions:
88
+ if isinstance(suggestion, ToolRule):
89
+ rules.append(suggestion.rule_content)
90
+ else:
91
+ rules.append(str(suggestion))
92
+ return [rule for rule in rules if rule]
93
+
94
+
95
+ def make_permission_checker(
96
+ project_path: Path,
97
+ safe_mode: bool,
98
+ prompt_fn: Optional[Callable[[str], str]] = None,
99
+ ) -> Callable[[Tool[Any, Any], Any], Awaitable[PermissionResult]]:
100
+ """Create a permission checking function for the current project."""
101
+
102
+ project_path = project_path.resolve()
103
+ config_manager.get_project_config(project_path)
104
+
105
+ session_allowed_tools: Set[str] = set()
106
+ session_tool_rules: dict[str, Set[str]] = defaultdict(set)
107
+
108
+ async def _prompt_user(prompt: str, options: list[tuple[str, str]]) -> str:
109
+ """Prompt the user without blocking the event loop."""
110
+ loop = asyncio.get_running_loop()
111
+ responder = prompt_fn or input
112
+
113
+ def _ask() -> str:
114
+ rendered = _render_options_prompt(prompt, options)
115
+ return responder(rendered)
116
+
117
+ return await loop.run_in_executor(None, _ask)
118
+
119
+ async def can_use_tool(tool: Tool[Any, Any], parsed_input: Any) -> PermissionResult:
120
+ """Check and optionally persist permission for a tool invocation."""
121
+ config = config_manager.get_project_config(project_path)
122
+
123
+ if not safe_mode:
124
+ return PermissionResult(result=True)
125
+
126
+ try:
127
+ if hasattr(tool, "needs_permissions") and not tool.needs_permissions(parsed_input):
128
+ return PermissionResult(result=True)
129
+ except (TypeError, AttributeError, ValueError) as exc:
130
+ # Tool implementation error - log and deny for safety
131
+ logger.warning(
132
+ "[permissions] Tool needs_permissions check failed",
133
+ extra={"tool": getattr(tool, "name", None), "error": str(exc), "error_type": type(exc).__name__},
134
+ )
135
+ return PermissionResult(
136
+ result=False,
137
+ message=f"Permission check failed: {type(exc).__name__}: {exc}",
138
+ )
139
+
140
+ allowed_tools = set(config.allowed_tools or [])
141
+ allow_rules = {
142
+ "Bash": set(config.bash_allow_rules or []) | session_tool_rules.get("Bash", set())
143
+ }
144
+ deny_rules = {"Bash": set(config.bash_deny_rules or [])}
145
+ allowed_working_dirs = {
146
+ str(project_path.resolve()),
147
+ *[str(Path(p).resolve()) for p in config.working_directories or []],
148
+ }
149
+
150
+ # Persisted approvals
151
+ if tool.name in allowed_tools or tool.name in session_allowed_tools:
152
+ return PermissionResult(result=True)
153
+
154
+ decision: Optional[PermissionDecision] = None
155
+ if hasattr(tool, "check_permissions"):
156
+ try:
157
+ maybe_decision = tool.check_permissions(
158
+ parsed_input,
159
+ {
160
+ "allowed_rules": allow_rules.get(tool.name, set()),
161
+ "denied_rules": deny_rules.get(tool.name, set()),
162
+ "allowed_working_directories": allowed_working_dirs,
163
+ },
164
+ )
165
+ decision = (
166
+ await maybe_decision if asyncio.iscoroutine(maybe_decision) else maybe_decision
167
+ )
168
+ # Allow tools to return a plain dict shaped like PermissionDecision.
169
+ if isinstance(decision, dict) and "behavior" in decision:
170
+ decision = PermissionDecision(**decision)
171
+ except (TypeError, AttributeError, ValueError, KeyError) as exc:
172
+ # Tool implementation error - fall back to asking user
173
+ logger.warning(
174
+ "[permissions] Tool check_permissions failed",
175
+ extra={"tool": getattr(tool, "name", None), "error": str(exc), "error_type": type(exc).__name__},
176
+ )
177
+ decision = PermissionDecision(
178
+ behavior="ask",
179
+ message=f"Error checking permissions: {type(exc).__name__}",
180
+ rule_suggestions=None,
181
+ )
182
+
183
+ if decision is None:
184
+ decision = PermissionDecision(
185
+ behavior="passthrough",
186
+ message=f"Allow tool '{tool.name}'?",
187
+ rule_suggestions=[ToolRule(tool_name=tool.name, rule_content=tool.name)],
188
+ )
189
+
190
+ if decision.behavior == "allow":
191
+ return PermissionResult(
192
+ result=True,
193
+ message=decision.message,
194
+ updated_input=decision.updated_input,
195
+ decision=decision,
196
+ )
197
+
198
+ if decision.behavior == "deny":
199
+ return PermissionResult(
200
+ result=False,
201
+ message=decision.message or f"Permission denied for tool '{tool.name}'.",
202
+ decision=decision,
203
+ )
204
+
205
+ # Ask/passthrough flows prompt the user.
206
+ input_preview = _format_input_preview(parsed_input)
207
+ prompt_lines = [
208
+ f"{tool.name}",
209
+ "",
210
+ f" {input_preview}",
211
+ ]
212
+ if decision.message:
213
+ prompt_lines.append(f" {decision.message}")
214
+ prompt_lines.append(" Do you want to proceed?")
215
+ prompt = "\n".join(prompt_lines)
216
+
217
+ options = [
218
+ ("y", "Yes"),
219
+ ("s", "Yes, for this session"),
220
+ ("n", "No"),
221
+ ]
222
+
223
+ answer = (await _prompt_user(prompt, options=options)).strip().lower()
224
+ logger.debug(
225
+ "[permissions] User answer for permission prompt",
226
+ extra={"answer": answer, "tool": getattr(tool, "name", None)},
227
+ )
228
+ rule_suggestions = _rule_strings(decision.rule_suggestions) or [
229
+ permission_key(tool, parsed_input)
230
+ ]
231
+
232
+ if answer in ("1", "y", "yes"):
233
+ return PermissionResult(
234
+ result=True, updated_input=decision.updated_input, decision=decision
235
+ )
236
+
237
+ if answer in ("2", "s", "session", "a"):
238
+ if tool.name == "Bash":
239
+ session_tool_rules["Bash"].update(rule_suggestions)
240
+ else:
241
+ session_allowed_tools.add(tool.name)
242
+ return PermissionResult(
243
+ result=True, updated_input=decision.updated_input, decision=decision
244
+ )
245
+
246
+ return PermissionResult(
247
+ result=False,
248
+ message=decision.message or f"Permission denied for tool '{tool.name}'.",
249
+ decision=decision,
250
+ )
251
+
252
+ return can_use_tool
@@ -0,0 +1,47 @@
1
+ """Provider client registry with optional dependencies."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import importlib
6
+ from typing import Optional, TYPE_CHECKING, Type, cast
7
+
8
+ from ripperdoc.core.config import ProviderType
9
+ from ripperdoc.core.providers.base import ProviderClient
10
+ from ripperdoc.utils.log import get_logger
11
+
12
+ if TYPE_CHECKING: # pragma: no cover - type checking only
13
+ from ripperdoc.core.providers.anthropic import AnthropicClient # noqa: F401
14
+ from ripperdoc.core.providers.gemini import GeminiClient # noqa: F401
15
+ from ripperdoc.core.providers.openai import OpenAIClient # noqa: F401
16
+
17
+ logger = get_logger()
18
+
19
+
20
+ def _load_client(module: str, cls: str, extra: str) -> Type[ProviderClient]:
21
+ """Dynamically import a provider client, pointing users to the right extra."""
22
+ try:
23
+ mod = importlib.import_module(f"ripperdoc.core.providers.{module}")
24
+ client_cls = cast(Type[ProviderClient], getattr(mod, cls, None))
25
+ if client_cls is None:
26
+ raise ImportError(f"{cls} not found in {module}")
27
+ return client_cls
28
+ except ImportError as exc:
29
+ raise RuntimeError(
30
+ f"{cls} requires optional dependency group '{extra}'. "
31
+ f"Install with `pip install ripperdoc[{extra}]`."
32
+ ) from exc
33
+
34
+
35
+ def get_provider_client(provider: ProviderType) -> Optional[ProviderClient]:
36
+ """Return a provider client for the given protocol."""
37
+ if provider == ProviderType.ANTHROPIC:
38
+ return _load_client("anthropic", "AnthropicClient", "anthropic")()
39
+ if provider == ProviderType.OPENAI_COMPATIBLE:
40
+ return _load_client("openai", "OpenAIClient", "openai")()
41
+ if provider == ProviderType.GEMINI:
42
+ return _load_client("gemini", "GeminiClient", "gemini")()
43
+ logger.warning("[providers] Unsupported provider", extra={"provider": provider})
44
+ return None
45
+
46
+
47
+ __all__ = ["ProviderClient", "get_provider_client"]
@@ -0,0 +1,250 @@
1
+ """Anthropic provider client."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import asyncio
6
+ import time
7
+ from typing import Any, Awaitable, Callable, Dict, List, Optional
8
+
9
+ import anthropic
10
+ from anthropic import AsyncAnthropic
11
+
12
+ from ripperdoc.core.config import ModelProfile
13
+ from ripperdoc.core.providers.base import (
14
+ ProgressCallback,
15
+ ProviderClient,
16
+ ProviderResponse,
17
+ call_with_timeout_and_retries,
18
+ iter_with_timeout,
19
+ sanitize_tool_history,
20
+ )
21
+ from ripperdoc.core.query_utils import (
22
+ anthropic_usage_tokens,
23
+ build_anthropic_tool_schemas,
24
+ content_blocks_from_anthropic_response,
25
+ estimate_cost_usd,
26
+ )
27
+ from ripperdoc.core.tool import Tool
28
+ from ripperdoc.utils.log import get_logger
29
+ from ripperdoc.utils.session_usage import record_usage
30
+
31
+ logger = get_logger()
32
+
33
+
34
+ def _classify_anthropic_error(exc: Exception) -> tuple[str, str]:
35
+ """Classify an Anthropic exception into error code and user-friendly message."""
36
+ exc_type = type(exc).__name__
37
+ exc_msg = str(exc)
38
+
39
+ if isinstance(exc, anthropic.AuthenticationError):
40
+ return "authentication_error", f"Authentication failed: {exc_msg}"
41
+ if isinstance(exc, anthropic.PermissionDeniedError):
42
+ if "balance" in exc_msg.lower() or "insufficient" in exc_msg.lower():
43
+ return "insufficient_balance", f"Insufficient balance: {exc_msg}"
44
+ return "permission_denied", f"Permission denied: {exc_msg}"
45
+ if isinstance(exc, anthropic.NotFoundError):
46
+ return "model_not_found", f"Model not found: {exc_msg}"
47
+ if isinstance(exc, anthropic.BadRequestError):
48
+ if "context" in exc_msg.lower() or "token" in exc_msg.lower():
49
+ return "context_length_exceeded", f"Context length exceeded: {exc_msg}"
50
+ if "content" in exc_msg.lower() and "policy" in exc_msg.lower():
51
+ return "content_policy_violation", f"Content policy violation: {exc_msg}"
52
+ return "bad_request", f"Invalid request: {exc_msg}"
53
+ if isinstance(exc, anthropic.RateLimitError):
54
+ return "rate_limit", f"Rate limit exceeded: {exc_msg}"
55
+ if isinstance(exc, anthropic.APIConnectionError):
56
+ return "connection_error", f"Connection error: {exc_msg}"
57
+ if isinstance(exc, anthropic.APIStatusError):
58
+ status = getattr(exc, "status_code", "unknown")
59
+ return "api_error", f"API error ({status}): {exc_msg}"
60
+ if isinstance(exc, asyncio.TimeoutError):
61
+ return "timeout", f"Request timed out: {exc_msg}"
62
+
63
+ return "unknown_error", f"Unexpected error ({exc_type}): {exc_msg}"
64
+
65
+
66
+ class AnthropicClient(ProviderClient):
67
+ """Anthropic client with streaming and non-streaming support."""
68
+
69
+ def __init__(self, client_factory: Optional[Callable[[], Awaitable[AsyncAnthropic]]] = None):
70
+ self._client_factory = client_factory
71
+
72
+ async def _client(self, kwargs: Dict[str, Any]) -> AsyncAnthropic:
73
+ if self._client_factory:
74
+ return await self._client_factory()
75
+ return AsyncAnthropic(**kwargs)
76
+
77
+ async def call(
78
+ self,
79
+ *,
80
+ model_profile: ModelProfile,
81
+ system_prompt: str,
82
+ normalized_messages: Any,
83
+ tools: List[Tool[Any, Any]],
84
+ tool_mode: str,
85
+ stream: bool,
86
+ progress_callback: Optional[ProgressCallback],
87
+ request_timeout: Optional[float],
88
+ max_retries: int,
89
+ max_thinking_tokens: int,
90
+ ) -> ProviderResponse:
91
+ start_time = time.time()
92
+
93
+ try:
94
+ return await self._call_impl(
95
+ model_profile=model_profile,
96
+ system_prompt=system_prompt,
97
+ normalized_messages=normalized_messages,
98
+ tools=tools,
99
+ tool_mode=tool_mode,
100
+ stream=stream,
101
+ progress_callback=progress_callback,
102
+ request_timeout=request_timeout,
103
+ max_retries=max_retries,
104
+ max_thinking_tokens=max_thinking_tokens,
105
+ start_time=start_time,
106
+ )
107
+ except asyncio.CancelledError:
108
+ raise # Don't suppress task cancellation
109
+ except Exception as exc:
110
+ duration_ms = (time.time() - start_time) * 1000
111
+ error_code, error_message = _classify_anthropic_error(exc)
112
+ logger.error(
113
+ "[anthropic_client] API call failed",
114
+ extra={
115
+ "model": model_profile.model,
116
+ "error_code": error_code,
117
+ "error_message": error_message,
118
+ "duration_ms": round(duration_ms, 2),
119
+ },
120
+ )
121
+ return ProviderResponse.create_error(
122
+ error_code=error_code,
123
+ error_message=error_message,
124
+ duration_ms=duration_ms,
125
+ )
126
+
127
+ async def _call_impl(
128
+ self,
129
+ *,
130
+ model_profile: ModelProfile,
131
+ system_prompt: str,
132
+ normalized_messages: Any,
133
+ tools: List[Tool[Any, Any]],
134
+ tool_mode: str,
135
+ stream: bool,
136
+ progress_callback: Optional[ProgressCallback],
137
+ request_timeout: Optional[float],
138
+ max_retries: int,
139
+ max_thinking_tokens: int,
140
+ start_time: float,
141
+ ) -> ProviderResponse:
142
+ """Internal implementation of call, may raise exceptions."""
143
+ tool_schemas = await build_anthropic_tool_schemas(tools)
144
+ collected_text: List[str] = []
145
+ reasoning_parts: List[str] = []
146
+ response_metadata: Dict[str, Any] = {}
147
+
148
+ anthropic_kwargs = {"base_url": model_profile.api_base}
149
+ if model_profile.api_key:
150
+ anthropic_kwargs["api_key"] = model_profile.api_key
151
+ auth_token = getattr(model_profile, "auth_token", None)
152
+ if auth_token:
153
+ anthropic_kwargs["auth_token"] = auth_token
154
+
155
+ normalized_messages = sanitize_tool_history(list(normalized_messages))
156
+
157
+ thinking_payload: Optional[Dict[str, Any]] = None
158
+ if max_thinking_tokens > 0:
159
+ thinking_payload = {"type": "enabled", "budget_tokens": max_thinking_tokens}
160
+
161
+ async with await self._client(anthropic_kwargs) as client:
162
+
163
+ async def _stream_request() -> Any:
164
+ stream_cm = client.messages.stream(
165
+ model=model_profile.model,
166
+ max_tokens=model_profile.max_tokens,
167
+ system=system_prompt,
168
+ messages=normalized_messages, # type: ignore[arg-type]
169
+ tools=tool_schemas if tool_schemas else None, # type: ignore
170
+ temperature=model_profile.temperature,
171
+ thinking=thinking_payload, # type: ignore[arg-type]
172
+ )
173
+ stream_resp = (
174
+ await asyncio.wait_for(stream_cm.__aenter__(), timeout=request_timeout)
175
+ if request_timeout and request_timeout > 0
176
+ else await stream_cm.__aenter__()
177
+ )
178
+ try:
179
+ async for text in iter_with_timeout(stream_resp.text_stream, request_timeout):
180
+ if text:
181
+ collected_text.append(text)
182
+ if progress_callback:
183
+ try:
184
+ await progress_callback(text)
185
+ except (RuntimeError, ValueError, TypeError, OSError) as cb_exc:
186
+ logger.warning(
187
+ "[anthropic_client] Stream callback failed: %s: %s",
188
+ type(cb_exc).__name__, cb_exc,
189
+ )
190
+ getter = getattr(stream_resp, "get_final_response", None) or getattr(
191
+ stream_resp, "get_final_message", None
192
+ )
193
+ if getter:
194
+ return await getter()
195
+ return None
196
+ finally:
197
+ await stream_cm.__aexit__(None, None, None)
198
+
199
+ async def _non_stream_request() -> Any:
200
+ return await client.messages.create(
201
+ model=model_profile.model,
202
+ max_tokens=model_profile.max_tokens,
203
+ system=system_prompt,
204
+ messages=normalized_messages, # type: ignore[arg-type]
205
+ tools=tool_schemas if tool_schemas else None, # type: ignore
206
+ temperature=model_profile.temperature,
207
+ thinking=thinking_payload, # type: ignore[arg-type]
208
+ )
209
+
210
+ timeout_for_call = None if stream else request_timeout
211
+ response = await call_with_timeout_and_retries(
212
+ _stream_request if stream else _non_stream_request,
213
+ timeout_for_call,
214
+ max_retries,
215
+ )
216
+
217
+ duration_ms = (time.time() - start_time) * 1000
218
+ usage_tokens = anthropic_usage_tokens(getattr(response, "usage", None))
219
+ cost_usd = estimate_cost_usd(model_profile, usage_tokens)
220
+ record_usage(
221
+ model_profile.model, duration_ms=duration_ms, cost_usd=cost_usd, **usage_tokens
222
+ )
223
+
224
+ content_blocks = content_blocks_from_anthropic_response(response, tool_mode)
225
+ for blk in content_blocks:
226
+ if blk.get("type") == "thinking":
227
+ thinking_text = blk.get("thinking") or blk.get("text") or ""
228
+ if thinking_text:
229
+ reasoning_parts.append(str(thinking_text))
230
+ if reasoning_parts:
231
+ response_metadata["reasoning_content"] = "\n".join(reasoning_parts)
232
+ # Streaming progress is handled via text_stream; final content retains thinking blocks.
233
+
234
+ logger.info(
235
+ "[anthropic_client] Response received",
236
+ extra={
237
+ "model": model_profile.model,
238
+ "duration_ms": round(duration_ms, 2),
239
+ "tool_mode": tool_mode,
240
+ "tool_schemas": len(tool_schemas),
241
+ },
242
+ )
243
+
244
+ return ProviderResponse(
245
+ content_blocks=content_blocks,
246
+ usage_tokens=usage_tokens,
247
+ cost_usd=cost_usd,
248
+ duration_ms=duration_ms,
249
+ metadata=response_metadata,
250
+ )