ripperdoc 0.2.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ripperdoc/__init__.py +3 -0
- ripperdoc/__main__.py +20 -0
- ripperdoc/cli/__init__.py +1 -0
- ripperdoc/cli/cli.py +405 -0
- ripperdoc/cli/commands/__init__.py +82 -0
- ripperdoc/cli/commands/agents_cmd.py +263 -0
- ripperdoc/cli/commands/base.py +19 -0
- ripperdoc/cli/commands/clear_cmd.py +18 -0
- ripperdoc/cli/commands/compact_cmd.py +23 -0
- ripperdoc/cli/commands/config_cmd.py +31 -0
- ripperdoc/cli/commands/context_cmd.py +144 -0
- ripperdoc/cli/commands/cost_cmd.py +82 -0
- ripperdoc/cli/commands/doctor_cmd.py +221 -0
- ripperdoc/cli/commands/exit_cmd.py +19 -0
- ripperdoc/cli/commands/help_cmd.py +20 -0
- ripperdoc/cli/commands/mcp_cmd.py +70 -0
- ripperdoc/cli/commands/memory_cmd.py +202 -0
- ripperdoc/cli/commands/models_cmd.py +413 -0
- ripperdoc/cli/commands/permissions_cmd.py +302 -0
- ripperdoc/cli/commands/resume_cmd.py +98 -0
- ripperdoc/cli/commands/status_cmd.py +167 -0
- ripperdoc/cli/commands/tasks_cmd.py +278 -0
- ripperdoc/cli/commands/todos_cmd.py +69 -0
- ripperdoc/cli/commands/tools_cmd.py +19 -0
- ripperdoc/cli/ui/__init__.py +1 -0
- ripperdoc/cli/ui/context_display.py +298 -0
- ripperdoc/cli/ui/helpers.py +22 -0
- ripperdoc/cli/ui/rich_ui.py +1557 -0
- ripperdoc/cli/ui/spinner.py +49 -0
- ripperdoc/cli/ui/thinking_spinner.py +128 -0
- ripperdoc/cli/ui/tool_renderers.py +298 -0
- ripperdoc/core/__init__.py +1 -0
- ripperdoc/core/agents.py +486 -0
- ripperdoc/core/commands.py +33 -0
- ripperdoc/core/config.py +559 -0
- ripperdoc/core/default_tools.py +88 -0
- ripperdoc/core/permissions.py +252 -0
- ripperdoc/core/providers/__init__.py +47 -0
- ripperdoc/core/providers/anthropic.py +250 -0
- ripperdoc/core/providers/base.py +265 -0
- ripperdoc/core/providers/gemini.py +615 -0
- ripperdoc/core/providers/openai.py +487 -0
- ripperdoc/core/query.py +1058 -0
- ripperdoc/core/query_utils.py +622 -0
- ripperdoc/core/skills.py +295 -0
- ripperdoc/core/system_prompt.py +431 -0
- ripperdoc/core/tool.py +240 -0
- ripperdoc/sdk/__init__.py +9 -0
- ripperdoc/sdk/client.py +333 -0
- ripperdoc/tools/__init__.py +1 -0
- ripperdoc/tools/ask_user_question_tool.py +431 -0
- ripperdoc/tools/background_shell.py +389 -0
- ripperdoc/tools/bash_output_tool.py +98 -0
- ripperdoc/tools/bash_tool.py +1016 -0
- ripperdoc/tools/dynamic_mcp_tool.py +428 -0
- ripperdoc/tools/enter_plan_mode_tool.py +226 -0
- ripperdoc/tools/exit_plan_mode_tool.py +153 -0
- ripperdoc/tools/file_edit_tool.py +346 -0
- ripperdoc/tools/file_read_tool.py +203 -0
- ripperdoc/tools/file_write_tool.py +205 -0
- ripperdoc/tools/glob_tool.py +179 -0
- ripperdoc/tools/grep_tool.py +370 -0
- ripperdoc/tools/kill_bash_tool.py +136 -0
- ripperdoc/tools/ls_tool.py +471 -0
- ripperdoc/tools/mcp_tools.py +591 -0
- ripperdoc/tools/multi_edit_tool.py +456 -0
- ripperdoc/tools/notebook_edit_tool.py +386 -0
- ripperdoc/tools/skill_tool.py +205 -0
- ripperdoc/tools/task_tool.py +379 -0
- ripperdoc/tools/todo_tool.py +494 -0
- ripperdoc/tools/tool_search_tool.py +380 -0
- ripperdoc/utils/__init__.py +1 -0
- ripperdoc/utils/bash_constants.py +51 -0
- ripperdoc/utils/bash_output_utils.py +43 -0
- ripperdoc/utils/coerce.py +34 -0
- ripperdoc/utils/context_length_errors.py +252 -0
- ripperdoc/utils/exit_code_handlers.py +241 -0
- ripperdoc/utils/file_watch.py +135 -0
- ripperdoc/utils/git_utils.py +274 -0
- ripperdoc/utils/json_utils.py +27 -0
- ripperdoc/utils/log.py +176 -0
- ripperdoc/utils/mcp.py +560 -0
- ripperdoc/utils/memory.py +253 -0
- ripperdoc/utils/message_compaction.py +676 -0
- ripperdoc/utils/messages.py +519 -0
- ripperdoc/utils/output_utils.py +258 -0
- ripperdoc/utils/path_ignore.py +677 -0
- ripperdoc/utils/path_utils.py +46 -0
- ripperdoc/utils/permissions/__init__.py +27 -0
- ripperdoc/utils/permissions/path_validation_utils.py +174 -0
- ripperdoc/utils/permissions/shell_command_validation.py +552 -0
- ripperdoc/utils/permissions/tool_permission_utils.py +279 -0
- ripperdoc/utils/prompt.py +17 -0
- ripperdoc/utils/safe_get_cwd.py +31 -0
- ripperdoc/utils/sandbox_utils.py +38 -0
- ripperdoc/utils/session_history.py +260 -0
- ripperdoc/utils/session_usage.py +117 -0
- ripperdoc/utils/shell_token_utils.py +95 -0
- ripperdoc/utils/shell_utils.py +159 -0
- ripperdoc/utils/todo.py +203 -0
- ripperdoc/utils/token_estimation.py +34 -0
- ripperdoc-0.2.6.dist-info/METADATA +193 -0
- ripperdoc-0.2.6.dist-info/RECORD +107 -0
- ripperdoc-0.2.6.dist-info/WHEEL +5 -0
- ripperdoc-0.2.6.dist-info/entry_points.txt +3 -0
- ripperdoc-0.2.6.dist-info/licenses/LICENSE +53 -0
- ripperdoc-0.2.6.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,519 @@
|
|
|
1
|
+
"""Message handling and formatting for Ripperdoc.
|
|
2
|
+
|
|
3
|
+
This module provides utilities for creating and normalizing messages
|
|
4
|
+
for communication with AI models.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import json
|
|
8
|
+
from typing import Any, Dict, List, Optional, Union
|
|
9
|
+
from pydantic import BaseModel, ConfigDict, Field
|
|
10
|
+
from uuid import uuid4
|
|
11
|
+
from enum import Enum
|
|
12
|
+
from ripperdoc.utils.log import get_logger
|
|
13
|
+
|
|
14
|
+
logger = get_logger()
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class MessageRole(str, Enum):
|
|
18
|
+
"""Message roles in a conversation."""
|
|
19
|
+
|
|
20
|
+
USER = "user"
|
|
21
|
+
ASSISTANT = "assistant"
|
|
22
|
+
SYSTEM = "system"
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
class MessageContent(BaseModel):
|
|
26
|
+
"""Content of a message."""
|
|
27
|
+
|
|
28
|
+
type: str
|
|
29
|
+
text: Optional[str] = None
|
|
30
|
+
thinking: Optional[str] = None
|
|
31
|
+
signature: Optional[str] = None
|
|
32
|
+
data: Optional[str] = None
|
|
33
|
+
# Some providers return tool_use IDs as "id", others as "tool_use_id"
|
|
34
|
+
id: Optional[str] = None
|
|
35
|
+
tool_use_id: Optional[str] = None
|
|
36
|
+
name: Optional[str] = None
|
|
37
|
+
input: Optional[Dict[str, object]] = None
|
|
38
|
+
is_error: Optional[bool] = None
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
def _content_block_to_api(block: MessageContent) -> Dict[str, Any]:
|
|
42
|
+
"""Convert a MessageContent block to API-ready dict for tool protocols."""
|
|
43
|
+
block_type = getattr(block, "type", None)
|
|
44
|
+
if block_type == "thinking":
|
|
45
|
+
return {
|
|
46
|
+
"type": "thinking",
|
|
47
|
+
"thinking": getattr(block, "thinking", None) or getattr(block, "text", None) or "",
|
|
48
|
+
"signature": getattr(block, "signature", None),
|
|
49
|
+
}
|
|
50
|
+
if block_type == "redacted_thinking":
|
|
51
|
+
return {
|
|
52
|
+
"type": "redacted_thinking",
|
|
53
|
+
"data": getattr(block, "data", None) or getattr(block, "text", None) or "",
|
|
54
|
+
"signature": getattr(block, "signature", None),
|
|
55
|
+
}
|
|
56
|
+
if block_type == "tool_use":
|
|
57
|
+
return {
|
|
58
|
+
"type": "tool_use",
|
|
59
|
+
"id": getattr(block, "id", None) or getattr(block, "tool_use_id", "") or "",
|
|
60
|
+
"name": getattr(block, "name", None) or "",
|
|
61
|
+
"input": getattr(block, "input", None) or {},
|
|
62
|
+
}
|
|
63
|
+
if block_type == "tool_result":
|
|
64
|
+
result: Dict[str, Any] = {
|
|
65
|
+
"type": "tool_result",
|
|
66
|
+
"tool_use_id": getattr(block, "tool_use_id", None) or getattr(block, "id", None) or "",
|
|
67
|
+
"content": [
|
|
68
|
+
{
|
|
69
|
+
"type": "text",
|
|
70
|
+
"text": getattr(block, "text", None) or getattr(block, "content", None) or "",
|
|
71
|
+
}
|
|
72
|
+
],
|
|
73
|
+
}
|
|
74
|
+
if getattr(block, "is_error", None) is not None:
|
|
75
|
+
result["is_error"] = block.is_error
|
|
76
|
+
return result
|
|
77
|
+
# Default to text block
|
|
78
|
+
return {
|
|
79
|
+
"type": "text",
|
|
80
|
+
"text": getattr(block, "text", None) or getattr(block, "content", None) or str(block),
|
|
81
|
+
}
|
|
82
|
+
|
|
83
|
+
|
|
84
|
+
def _content_block_to_openai(block: MessageContent) -> Dict[str, Any]:
|
|
85
|
+
"""Convert a MessageContent block to OpenAI chat-completions tool call format."""
|
|
86
|
+
block_type = getattr(block, "type", None)
|
|
87
|
+
if block_type == "tool_use":
|
|
88
|
+
import json
|
|
89
|
+
|
|
90
|
+
args = getattr(block, "input", None) or {}
|
|
91
|
+
try:
|
|
92
|
+
args_str = json.dumps(args)
|
|
93
|
+
except (TypeError, ValueError) as exc:
|
|
94
|
+
logger.warning(
|
|
95
|
+
"[_content_block_to_openai] Failed to serialize tool arguments: %s: %s",
|
|
96
|
+
type(exc).__name__, exc,
|
|
97
|
+
)
|
|
98
|
+
args_str = "{}"
|
|
99
|
+
tool_call_id = (
|
|
100
|
+
getattr(block, "id", None) or getattr(block, "tool_use_id", "") or str(uuid4())
|
|
101
|
+
)
|
|
102
|
+
return {
|
|
103
|
+
"role": "assistant",
|
|
104
|
+
"content": None,
|
|
105
|
+
"tool_calls": [
|
|
106
|
+
{
|
|
107
|
+
"id": tool_call_id,
|
|
108
|
+
"type": "function",
|
|
109
|
+
"function": {
|
|
110
|
+
"name": getattr(block, "name", None) or "",
|
|
111
|
+
"arguments": args_str,
|
|
112
|
+
},
|
|
113
|
+
}
|
|
114
|
+
],
|
|
115
|
+
}
|
|
116
|
+
if block_type == "tool_result":
|
|
117
|
+
# OpenAI expects role=tool messages after a tool call
|
|
118
|
+
tool_call_id = getattr(block, "tool_use_id", None) or getattr(block, "id", None) or ""
|
|
119
|
+
if not tool_call_id:
|
|
120
|
+
logger.debug("[_content_block_to_openai] Skipping tool_result without tool_call_id")
|
|
121
|
+
return {}
|
|
122
|
+
return {
|
|
123
|
+
"role": "tool",
|
|
124
|
+
"tool_call_id": tool_call_id,
|
|
125
|
+
"content": getattr(block, "text", None) or getattr(block, "content", None) or "",
|
|
126
|
+
}
|
|
127
|
+
# Fallback text message
|
|
128
|
+
return {
|
|
129
|
+
"role": "assistant",
|
|
130
|
+
"content": getattr(block, "text", None) or getattr(block, "content", None) or str(block),
|
|
131
|
+
}
|
|
132
|
+
|
|
133
|
+
|
|
134
|
+
class Message(BaseModel):
|
|
135
|
+
"""A message in a conversation."""
|
|
136
|
+
|
|
137
|
+
role: MessageRole
|
|
138
|
+
content: Union[str, List[MessageContent]]
|
|
139
|
+
reasoning: Optional[Any] = None
|
|
140
|
+
metadata: Dict[str, Any] = Field(default_factory=dict)
|
|
141
|
+
uuid: str = ""
|
|
142
|
+
|
|
143
|
+
def __init__(self, **data: object) -> None:
|
|
144
|
+
if "uuid" not in data or not data["uuid"]:
|
|
145
|
+
data["uuid"] = str(uuid4())
|
|
146
|
+
super().__init__(**data)
|
|
147
|
+
|
|
148
|
+
|
|
149
|
+
class UserMessage(BaseModel):
|
|
150
|
+
"""User message with tool results."""
|
|
151
|
+
|
|
152
|
+
type: str = "user"
|
|
153
|
+
message: Message
|
|
154
|
+
uuid: str = ""
|
|
155
|
+
tool_use_result: Optional[object] = None
|
|
156
|
+
|
|
157
|
+
def __init__(self, **data: object) -> None:
|
|
158
|
+
if "uuid" not in data or not data["uuid"]:
|
|
159
|
+
data["uuid"] = str(uuid4())
|
|
160
|
+
super().__init__(**data)
|
|
161
|
+
|
|
162
|
+
|
|
163
|
+
class AssistantMessage(BaseModel):
|
|
164
|
+
"""Assistant message with metadata."""
|
|
165
|
+
|
|
166
|
+
type: str = "assistant"
|
|
167
|
+
message: Message
|
|
168
|
+
uuid: str = ""
|
|
169
|
+
cost_usd: float = 0.0
|
|
170
|
+
duration_ms: float = 0.0
|
|
171
|
+
is_api_error_message: bool = False
|
|
172
|
+
|
|
173
|
+
def __init__(self, **data: object) -> None:
|
|
174
|
+
if "uuid" not in data or not data["uuid"]:
|
|
175
|
+
data["uuid"] = str(uuid4())
|
|
176
|
+
super().__init__(**data)
|
|
177
|
+
|
|
178
|
+
|
|
179
|
+
class ProgressMessage(BaseModel):
|
|
180
|
+
"""Progress message during tool execution."""
|
|
181
|
+
|
|
182
|
+
type: str = "progress"
|
|
183
|
+
uuid: str = ""
|
|
184
|
+
tool_use_id: str
|
|
185
|
+
content: Any
|
|
186
|
+
normalized_messages: List[Message] = []
|
|
187
|
+
sibling_tool_use_ids: set[str] = set()
|
|
188
|
+
model_config = ConfigDict(arbitrary_types_allowed=True)
|
|
189
|
+
|
|
190
|
+
def __init__(self, **data: object) -> None:
|
|
191
|
+
if "uuid" not in data or not data["uuid"]:
|
|
192
|
+
data["uuid"] = str(uuid4())
|
|
193
|
+
super().__init__(**data)
|
|
194
|
+
|
|
195
|
+
|
|
196
|
+
def create_user_message(
|
|
197
|
+
content: Union[str, List[Dict[str, Any]]], tool_use_result: Optional[object] = None
|
|
198
|
+
) -> UserMessage:
|
|
199
|
+
"""Create a user message."""
|
|
200
|
+
if isinstance(content, str):
|
|
201
|
+
message_content: Union[str, List[MessageContent]] = content
|
|
202
|
+
else:
|
|
203
|
+
message_content = [MessageContent(**item) for item in content]
|
|
204
|
+
|
|
205
|
+
# Normalize tool_use_result to a dict if it's a Pydantic model
|
|
206
|
+
if tool_use_result is not None:
|
|
207
|
+
try:
|
|
208
|
+
if hasattr(tool_use_result, "model_dump"):
|
|
209
|
+
tool_use_result = tool_use_result.model_dump()
|
|
210
|
+
except (AttributeError, TypeError, ValueError) as exc:
|
|
211
|
+
# Fallback: keep as-is if conversion fails
|
|
212
|
+
logger.warning(
|
|
213
|
+
"[create_user_message] Failed to normalize tool_use_result: %s: %s",
|
|
214
|
+
type(exc).__name__, exc,
|
|
215
|
+
)
|
|
216
|
+
|
|
217
|
+
message = Message(role=MessageRole.USER, content=message_content)
|
|
218
|
+
|
|
219
|
+
# Debug: record tool_result shaping
|
|
220
|
+
if isinstance(message_content, list):
|
|
221
|
+
tool_result_blocks = [
|
|
222
|
+
blk for blk in message_content if getattr(blk, "type", None) == "tool_result"
|
|
223
|
+
]
|
|
224
|
+
if tool_result_blocks:
|
|
225
|
+
logger.debug(
|
|
226
|
+
f"[create_user_message] tool_result blocks={len(tool_result_blocks)} "
|
|
227
|
+
f"ids={[getattr(b, 'tool_use_id', None) for b in tool_result_blocks]}"
|
|
228
|
+
)
|
|
229
|
+
|
|
230
|
+
return UserMessage(message=message, tool_use_result=tool_use_result)
|
|
231
|
+
|
|
232
|
+
|
|
233
|
+
def create_assistant_message(
|
|
234
|
+
content: Union[str, List[Dict[str, Any]]],
|
|
235
|
+
cost_usd: float = 0.0,
|
|
236
|
+
duration_ms: float = 0.0,
|
|
237
|
+
reasoning: Optional[Any] = None,
|
|
238
|
+
metadata: Optional[Dict[str, Any]] = None,
|
|
239
|
+
) -> AssistantMessage:
|
|
240
|
+
"""Create an assistant message."""
|
|
241
|
+
if isinstance(content, str):
|
|
242
|
+
message_content: Union[str, List[MessageContent]] = content
|
|
243
|
+
else:
|
|
244
|
+
message_content = [MessageContent(**item) for item in content]
|
|
245
|
+
|
|
246
|
+
message = Message(
|
|
247
|
+
role=MessageRole.ASSISTANT,
|
|
248
|
+
content=message_content,
|
|
249
|
+
reasoning=reasoning,
|
|
250
|
+
metadata=metadata or {},
|
|
251
|
+
)
|
|
252
|
+
|
|
253
|
+
return AssistantMessage(message=message, cost_usd=cost_usd, duration_ms=duration_ms)
|
|
254
|
+
|
|
255
|
+
|
|
256
|
+
def create_progress_message(
|
|
257
|
+
tool_use_id: str,
|
|
258
|
+
sibling_tool_use_ids: set[str],
|
|
259
|
+
content: Any,
|
|
260
|
+
normalized_messages: Optional[List[Message]] = None,
|
|
261
|
+
) -> ProgressMessage:
|
|
262
|
+
"""Create a progress message."""
|
|
263
|
+
return ProgressMessage(
|
|
264
|
+
tool_use_id=tool_use_id,
|
|
265
|
+
sibling_tool_use_ids=sibling_tool_use_ids,
|
|
266
|
+
content=content,
|
|
267
|
+
normalized_messages=normalized_messages or [],
|
|
268
|
+
)
|
|
269
|
+
|
|
270
|
+
|
|
271
|
+
def normalize_messages_for_api(
|
|
272
|
+
messages: List[Union[UserMessage, AssistantMessage, ProgressMessage]],
|
|
273
|
+
protocol: str = "anthropic",
|
|
274
|
+
tool_mode: str = "native",
|
|
275
|
+
) -> List[Dict[str, Any]]:
|
|
276
|
+
"""Normalize messages for API submission.
|
|
277
|
+
|
|
278
|
+
Progress messages are filtered out as they are not sent to the API.
|
|
279
|
+
"""
|
|
280
|
+
|
|
281
|
+
def _msg_type(msg: Any) -> Optional[str]:
|
|
282
|
+
if hasattr(msg, "type"):
|
|
283
|
+
return getattr(msg, "type", None)
|
|
284
|
+
if isinstance(msg, dict):
|
|
285
|
+
return msg.get("type")
|
|
286
|
+
return None
|
|
287
|
+
|
|
288
|
+
def _msg_content(msg: Any) -> Any:
|
|
289
|
+
if hasattr(msg, "message"):
|
|
290
|
+
return getattr(getattr(msg, "message", None), "content", None)
|
|
291
|
+
if isinstance(msg, dict):
|
|
292
|
+
message_payload = msg.get("message")
|
|
293
|
+
if isinstance(message_payload, dict):
|
|
294
|
+
return message_payload.get("content")
|
|
295
|
+
if "content" in msg:
|
|
296
|
+
return msg.get("content")
|
|
297
|
+
return None
|
|
298
|
+
|
|
299
|
+
def _msg_metadata(msg: Any) -> Dict[str, Any]:
|
|
300
|
+
message_obj = getattr(msg, "message", None)
|
|
301
|
+
if message_obj is not None and hasattr(message_obj, "metadata"):
|
|
302
|
+
try:
|
|
303
|
+
meta = getattr(message_obj, "metadata", {}) or {}
|
|
304
|
+
meta_dict = dict(meta) if isinstance(meta, dict) else {}
|
|
305
|
+
except (TypeError, ValueError):
|
|
306
|
+
meta_dict = {}
|
|
307
|
+
reasoning_val = getattr(message_obj, "reasoning", None)
|
|
308
|
+
if reasoning_val is not None and "reasoning" not in meta_dict:
|
|
309
|
+
meta_dict["reasoning"] = reasoning_val
|
|
310
|
+
return meta_dict
|
|
311
|
+
if isinstance(msg, dict):
|
|
312
|
+
message_payload = msg.get("message")
|
|
313
|
+
if isinstance(message_payload, dict):
|
|
314
|
+
meta = message_payload.get("metadata") or {}
|
|
315
|
+
meta_dict = dict(meta) if isinstance(meta, dict) else {}
|
|
316
|
+
if "reasoning" not in meta_dict and "reasoning" in message_payload:
|
|
317
|
+
meta_dict["reasoning"] = message_payload.get("reasoning")
|
|
318
|
+
return meta_dict
|
|
319
|
+
return {}
|
|
320
|
+
|
|
321
|
+
def _block_type(block: Any) -> Optional[str]:
|
|
322
|
+
if hasattr(block, "type"):
|
|
323
|
+
return getattr(block, "type", None)
|
|
324
|
+
if isinstance(block, dict):
|
|
325
|
+
return block.get("type")
|
|
326
|
+
return None
|
|
327
|
+
|
|
328
|
+
def _block_attr(block: Any, attr: str, default: Any = None) -> Any:
|
|
329
|
+
if hasattr(block, attr):
|
|
330
|
+
return getattr(block, attr, default)
|
|
331
|
+
if isinstance(block, dict):
|
|
332
|
+
return block.get(attr, default)
|
|
333
|
+
return default
|
|
334
|
+
|
|
335
|
+
def _flatten_blocks_to_text(blocks: List[Any]) -> str:
|
|
336
|
+
parts: List[str] = []
|
|
337
|
+
for blk in blocks:
|
|
338
|
+
btype = _block_type(blk)
|
|
339
|
+
if btype == "text":
|
|
340
|
+
text = _block_attr(blk, "text") or _block_attr(blk, "content") or ""
|
|
341
|
+
if text:
|
|
342
|
+
parts.append(str(text))
|
|
343
|
+
elif btype == "tool_result":
|
|
344
|
+
text = _block_attr(blk, "text") or _block_attr(blk, "content") or ""
|
|
345
|
+
tool_id = _block_attr(blk, "tool_use_id") or _block_attr(blk, "id")
|
|
346
|
+
prefix = "Tool error" if _block_attr(blk, "is_error") else "Tool result"
|
|
347
|
+
label = f"{prefix}{f' ({tool_id})' if tool_id else ''}"
|
|
348
|
+
parts.append(f"{label}: {text}" if text else label)
|
|
349
|
+
elif btype == "tool_use":
|
|
350
|
+
name = _block_attr(blk, "name") or ""
|
|
351
|
+
input_data = _block_attr(blk, "input")
|
|
352
|
+
input_preview = ""
|
|
353
|
+
if input_data not in (None, {}):
|
|
354
|
+
try:
|
|
355
|
+
input_preview = json.dumps(input_data)
|
|
356
|
+
except (TypeError, ValueError):
|
|
357
|
+
input_preview = str(input_data)
|
|
358
|
+
tool_id = _block_attr(blk, "tool_use_id") or _block_attr(blk, "id")
|
|
359
|
+
desc = "Tool call"
|
|
360
|
+
if name:
|
|
361
|
+
desc += f" {name}"
|
|
362
|
+
if tool_id:
|
|
363
|
+
desc += f" ({tool_id})"
|
|
364
|
+
if input_preview:
|
|
365
|
+
desc += f": {input_preview}"
|
|
366
|
+
parts.append(desc)
|
|
367
|
+
else:
|
|
368
|
+
text = _block_attr(blk, "text") or _block_attr(blk, "content") or ""
|
|
369
|
+
if text:
|
|
370
|
+
parts.append(str(text))
|
|
371
|
+
return "\n".join(p for p in parts if p)
|
|
372
|
+
|
|
373
|
+
effective_tool_mode = (tool_mode or "native").lower()
|
|
374
|
+
if effective_tool_mode not in {"native", "text"}:
|
|
375
|
+
effective_tool_mode = "native"
|
|
376
|
+
|
|
377
|
+
normalized: List[Dict[str, Any]] = []
|
|
378
|
+
tool_results_seen = 0
|
|
379
|
+
tool_uses_seen = 0
|
|
380
|
+
|
|
381
|
+
# Precompute tool_result positions so we can drop dangling tool_calls that
|
|
382
|
+
# lack a following tool response (which OpenAI rejects).
|
|
383
|
+
tool_result_positions: Dict[str, int] = {}
|
|
384
|
+
skipped_tool_uses_no_result = 0
|
|
385
|
+
skipped_tool_uses_no_id = 0
|
|
386
|
+
if protocol == "openai":
|
|
387
|
+
for idx, msg in enumerate(messages):
|
|
388
|
+
if _msg_type(msg) != "user":
|
|
389
|
+
continue
|
|
390
|
+
content = _msg_content(msg)
|
|
391
|
+
if not isinstance(content, list):
|
|
392
|
+
continue
|
|
393
|
+
for block in content:
|
|
394
|
+
if getattr(block, "type", None) == "tool_result":
|
|
395
|
+
tool_id = getattr(block, "tool_use_id", None) or getattr(block, "id", None)
|
|
396
|
+
if tool_id and tool_id not in tool_result_positions:
|
|
397
|
+
tool_result_positions[tool_id] = idx
|
|
398
|
+
|
|
399
|
+
for msg_index, msg in enumerate(messages):
|
|
400
|
+
msg_type = _msg_type(msg)
|
|
401
|
+
if msg_type == "progress":
|
|
402
|
+
# Skip progress messages
|
|
403
|
+
continue
|
|
404
|
+
if msg_type is None:
|
|
405
|
+
continue
|
|
406
|
+
|
|
407
|
+
if msg_type == "user":
|
|
408
|
+
user_content = _msg_content(msg)
|
|
409
|
+
meta = _msg_metadata(msg)
|
|
410
|
+
if isinstance(user_content, list):
|
|
411
|
+
if protocol == "openai":
|
|
412
|
+
# Map each block to an OpenAI-style message
|
|
413
|
+
openai_msgs: List[Dict[str, Any]] = []
|
|
414
|
+
for block in user_content:
|
|
415
|
+
if getattr(block, "type", None) == "tool_result":
|
|
416
|
+
tool_results_seen += 1
|
|
417
|
+
mapped = _content_block_to_openai(block)
|
|
418
|
+
if mapped:
|
|
419
|
+
openai_msgs.append(mapped)
|
|
420
|
+
if meta and openai_msgs:
|
|
421
|
+
for candidate in openai_msgs:
|
|
422
|
+
for key in ("reasoning_content", "reasoning_details", "reasoning"):
|
|
423
|
+
if key in meta and meta[key] is not None:
|
|
424
|
+
candidate[key] = meta[key]
|
|
425
|
+
normalized.extend(openai_msgs)
|
|
426
|
+
continue
|
|
427
|
+
api_blocks = []
|
|
428
|
+
for block in user_content:
|
|
429
|
+
if getattr(block, "type", None) == "tool_result":
|
|
430
|
+
tool_results_seen += 1
|
|
431
|
+
api_blocks.append(_content_block_to_api(block))
|
|
432
|
+
normalized.append({"role": "user", "content": api_blocks})
|
|
433
|
+
else:
|
|
434
|
+
normalized.append({"role": "user", "content": user_content}) # type: ignore
|
|
435
|
+
elif msg_type == "assistant":
|
|
436
|
+
asst_content = _msg_content(msg)
|
|
437
|
+
meta = _msg_metadata(msg)
|
|
438
|
+
if isinstance(asst_content, list):
|
|
439
|
+
if protocol == "openai":
|
|
440
|
+
assistant_openai_msgs: List[Dict[str, Any]] = []
|
|
441
|
+
tool_calls: List[Dict[str, Any]] = []
|
|
442
|
+
text_parts: List[str] = []
|
|
443
|
+
for block in asst_content:
|
|
444
|
+
if getattr(block, "type", None) == "tool_use":
|
|
445
|
+
tool_uses_seen += 1
|
|
446
|
+
tool_id = getattr(block, "tool_use_id", None) or getattr(
|
|
447
|
+
block, "id", None
|
|
448
|
+
)
|
|
449
|
+
if not tool_id:
|
|
450
|
+
skipped_tool_uses_no_id += 1
|
|
451
|
+
continue
|
|
452
|
+
# Skip tool_use blocks that are not followed by a tool_result
|
|
453
|
+
result_pos = tool_result_positions.get(tool_id)
|
|
454
|
+
if result_pos is None:
|
|
455
|
+
skipped_tool_uses_no_result += 1
|
|
456
|
+
continue
|
|
457
|
+
if result_pos <= msg_index:
|
|
458
|
+
skipped_tool_uses_no_result += 1
|
|
459
|
+
continue
|
|
460
|
+
mapped = _content_block_to_openai(block)
|
|
461
|
+
if mapped.get("tool_calls"):
|
|
462
|
+
tool_calls.extend(mapped["tool_calls"])
|
|
463
|
+
elif getattr(block, "type", None) == "text":
|
|
464
|
+
text_parts.append(getattr(block, "text", "") or "")
|
|
465
|
+
else:
|
|
466
|
+
mapped = _content_block_to_openai(block)
|
|
467
|
+
if mapped:
|
|
468
|
+
assistant_openai_msgs.append(mapped)
|
|
469
|
+
if text_parts:
|
|
470
|
+
assistant_openai_msgs.append(
|
|
471
|
+
{"role": "assistant", "content": "\n".join(text_parts)}
|
|
472
|
+
)
|
|
473
|
+
if tool_calls:
|
|
474
|
+
assistant_openai_msgs.append(
|
|
475
|
+
{
|
|
476
|
+
"role": "assistant",
|
|
477
|
+
"content": None,
|
|
478
|
+
"tool_calls": tool_calls,
|
|
479
|
+
}
|
|
480
|
+
)
|
|
481
|
+
if meta and assistant_openai_msgs:
|
|
482
|
+
for key in ("reasoning_content", "reasoning_details", "reasoning"):
|
|
483
|
+
if key in meta and meta[key] is not None:
|
|
484
|
+
assistant_openai_msgs[-1][key] = meta[key]
|
|
485
|
+
normalized.extend(assistant_openai_msgs)
|
|
486
|
+
continue
|
|
487
|
+
api_blocks = []
|
|
488
|
+
for block in asst_content:
|
|
489
|
+
if getattr(block, "type", None) == "tool_use":
|
|
490
|
+
tool_uses_seen += 1
|
|
491
|
+
api_blocks.append(_content_block_to_api(block))
|
|
492
|
+
normalized.append({"role": "assistant", "content": api_blocks})
|
|
493
|
+
else:
|
|
494
|
+
normalized.append({"role": "assistant", "content": asst_content}) # type: ignore
|
|
495
|
+
|
|
496
|
+
logger.debug(
|
|
497
|
+
f"[normalize_messages_for_api] protocol={protocol} tool_mode={effective_tool_mode} "
|
|
498
|
+
f"input_msgs={len(messages)} normalized={len(normalized)} "
|
|
499
|
+
f"tool_results_seen={tool_results_seen} tool_uses_seen={tool_uses_seen} "
|
|
500
|
+
f"tool_result_positions={len(tool_result_positions)} "
|
|
501
|
+
f"skipped_tool_uses_no_result={skipped_tool_uses_no_result} "
|
|
502
|
+
f"skipped_tool_uses_no_id={skipped_tool_uses_no_id}"
|
|
503
|
+
)
|
|
504
|
+
return normalized
|
|
505
|
+
|
|
506
|
+
|
|
507
|
+
# Special interrupt messages
|
|
508
|
+
INTERRUPT_MESSAGE = "Request was interrupted by user."
|
|
509
|
+
INTERRUPT_MESSAGE_FOR_TOOL_USE = "Tool execution was interrupted by user."
|
|
510
|
+
|
|
511
|
+
|
|
512
|
+
def create_tool_result_stop_message(tool_use_id: str) -> Dict[str, Any]:
|
|
513
|
+
"""Create a tool result message for interruption."""
|
|
514
|
+
return {
|
|
515
|
+
"type": "tool_result",
|
|
516
|
+
"tool_use_id": tool_use_id,
|
|
517
|
+
"text": INTERRUPT_MESSAGE_FOR_TOOL_USE,
|
|
518
|
+
"is_error": True,
|
|
519
|
+
}
|