ripperdoc 0.2.8__py3-none-any.whl → 0.2.10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (94) hide show
  1. ripperdoc/__init__.py +1 -1
  2. ripperdoc/cli/cli.py +257 -123
  3. ripperdoc/cli/commands/__init__.py +2 -1
  4. ripperdoc/cli/commands/agents_cmd.py +138 -8
  5. ripperdoc/cli/commands/clear_cmd.py +9 -4
  6. ripperdoc/cli/commands/config_cmd.py +1 -1
  7. ripperdoc/cli/commands/context_cmd.py +3 -2
  8. ripperdoc/cli/commands/doctor_cmd.py +18 -4
  9. ripperdoc/cli/commands/exit_cmd.py +1 -0
  10. ripperdoc/cli/commands/hooks_cmd.py +27 -53
  11. ripperdoc/cli/commands/models_cmd.py +27 -10
  12. ripperdoc/cli/commands/permissions_cmd.py +27 -9
  13. ripperdoc/cli/commands/resume_cmd.py +9 -3
  14. ripperdoc/cli/commands/stats_cmd.py +244 -0
  15. ripperdoc/cli/commands/status_cmd.py +4 -4
  16. ripperdoc/cli/commands/tasks_cmd.py +8 -4
  17. ripperdoc/cli/ui/file_mention_completer.py +2 -1
  18. ripperdoc/cli/ui/interrupt_handler.py +2 -3
  19. ripperdoc/cli/ui/message_display.py +4 -2
  20. ripperdoc/cli/ui/panels.py +1 -0
  21. ripperdoc/cli/ui/provider_options.py +247 -0
  22. ripperdoc/cli/ui/rich_ui.py +403 -81
  23. ripperdoc/cli/ui/spinner.py +54 -18
  24. ripperdoc/cli/ui/thinking_spinner.py +1 -2
  25. ripperdoc/cli/ui/tool_renderers.py +8 -2
  26. ripperdoc/cli/ui/wizard.py +213 -0
  27. ripperdoc/core/agents.py +19 -6
  28. ripperdoc/core/config.py +51 -17
  29. ripperdoc/core/custom_commands.py +7 -6
  30. ripperdoc/core/default_tools.py +101 -12
  31. ripperdoc/core/hooks/config.py +1 -3
  32. ripperdoc/core/hooks/events.py +27 -28
  33. ripperdoc/core/hooks/executor.py +4 -6
  34. ripperdoc/core/hooks/integration.py +12 -21
  35. ripperdoc/core/hooks/llm_callback.py +59 -0
  36. ripperdoc/core/hooks/manager.py +40 -15
  37. ripperdoc/core/permissions.py +118 -12
  38. ripperdoc/core/providers/anthropic.py +109 -36
  39. ripperdoc/core/providers/gemini.py +70 -5
  40. ripperdoc/core/providers/openai.py +89 -24
  41. ripperdoc/core/query.py +273 -68
  42. ripperdoc/core/query_utils.py +2 -0
  43. ripperdoc/core/skills.py +9 -3
  44. ripperdoc/core/system_prompt.py +4 -2
  45. ripperdoc/core/tool.py +17 -8
  46. ripperdoc/sdk/client.py +79 -4
  47. ripperdoc/tools/ask_user_question_tool.py +5 -3
  48. ripperdoc/tools/background_shell.py +307 -135
  49. ripperdoc/tools/bash_output_tool.py +1 -1
  50. ripperdoc/tools/bash_tool.py +63 -24
  51. ripperdoc/tools/dynamic_mcp_tool.py +29 -8
  52. ripperdoc/tools/enter_plan_mode_tool.py +1 -1
  53. ripperdoc/tools/exit_plan_mode_tool.py +1 -1
  54. ripperdoc/tools/file_edit_tool.py +167 -54
  55. ripperdoc/tools/file_read_tool.py +28 -4
  56. ripperdoc/tools/file_write_tool.py +13 -10
  57. ripperdoc/tools/glob_tool.py +3 -2
  58. ripperdoc/tools/grep_tool.py +3 -2
  59. ripperdoc/tools/kill_bash_tool.py +1 -1
  60. ripperdoc/tools/ls_tool.py +1 -1
  61. ripperdoc/tools/lsp_tool.py +615 -0
  62. ripperdoc/tools/mcp_tools.py +13 -10
  63. ripperdoc/tools/multi_edit_tool.py +8 -7
  64. ripperdoc/tools/notebook_edit_tool.py +7 -4
  65. ripperdoc/tools/skill_tool.py +1 -1
  66. ripperdoc/tools/task_tool.py +519 -69
  67. ripperdoc/tools/todo_tool.py +2 -2
  68. ripperdoc/tools/tool_search_tool.py +3 -2
  69. ripperdoc/utils/conversation_compaction.py +9 -5
  70. ripperdoc/utils/file_watch.py +214 -5
  71. ripperdoc/utils/json_utils.py +2 -1
  72. ripperdoc/utils/lsp.py +806 -0
  73. ripperdoc/utils/mcp.py +11 -3
  74. ripperdoc/utils/memory.py +4 -2
  75. ripperdoc/utils/message_compaction.py +21 -7
  76. ripperdoc/utils/message_formatting.py +14 -7
  77. ripperdoc/utils/messages.py +126 -67
  78. ripperdoc/utils/path_ignore.py +35 -8
  79. ripperdoc/utils/permissions/path_validation_utils.py +2 -1
  80. ripperdoc/utils/permissions/shell_command_validation.py +427 -91
  81. ripperdoc/utils/permissions/tool_permission_utils.py +174 -15
  82. ripperdoc/utils/safe_get_cwd.py +2 -1
  83. ripperdoc/utils/session_heatmap.py +244 -0
  84. ripperdoc/utils/session_history.py +13 -6
  85. ripperdoc/utils/session_stats.py +293 -0
  86. ripperdoc/utils/todo.py +2 -1
  87. ripperdoc/utils/token_estimation.py +6 -1
  88. {ripperdoc-0.2.8.dist-info → ripperdoc-0.2.10.dist-info}/METADATA +8 -2
  89. ripperdoc-0.2.10.dist-info/RECORD +129 -0
  90. ripperdoc-0.2.8.dist-info/RECORD +0 -121
  91. {ripperdoc-0.2.8.dist-info → ripperdoc-0.2.10.dist-info}/WHEEL +0 -0
  92. {ripperdoc-0.2.8.dist-info → ripperdoc-0.2.10.dist-info}/entry_points.txt +0 -0
  93. {ripperdoc-0.2.8.dist-info → ripperdoc-0.2.10.dist-info}/licenses/LICENSE +0 -0
  94. {ripperdoc-0.2.8.dist-info → ripperdoc-0.2.10.dist-info}/top_level.txt +0 -0
ripperdoc/utils/mcp.py CHANGED
@@ -92,7 +92,8 @@ def _ensure_str_dict(raw: object) -> Dict[str, str]:
92
92
  except (TypeError, ValueError) as exc:
93
93
  logger.warning(
94
94
  "[mcp] Failed to coerce env/header value to string: %s: %s",
95
- type(exc).__name__, exc,
95
+ type(exc).__name__,
96
+ exc,
96
97
  extra={"key": key},
97
98
  )
98
99
  continue
@@ -365,10 +366,17 @@ class McpRuntime:
365
366
  "capabilities": list(info.capabilities.keys()),
366
367
  },
367
368
  )
368
- except (OSError, RuntimeError, ConnectionError, ValueError, TimeoutError) as exc: # pragma: no cover - network/process errors
369
+ except (
370
+ OSError,
371
+ RuntimeError,
372
+ ConnectionError,
373
+ ValueError,
374
+ TimeoutError,
375
+ ) as exc: # pragma: no cover - network/process errors
369
376
  logger.warning(
370
377
  "Failed to connect to MCP server: %s: %s",
371
- type(exc).__name__, exc,
378
+ type(exc).__name__,
379
+ exc,
372
380
  extra={"server": config.name},
373
381
  )
374
382
  info.status = "failed"
ripperdoc/utils/memory.py CHANGED
@@ -48,7 +48,8 @@ def _is_path_under_directory(path: Path, directory: Path) -> bool:
48
48
  except (ValueError, OSError) as exc:
49
49
  logger.warning(
50
50
  "[memory] Failed to compare path containment: %s: %s",
51
- type(exc).__name__, exc,
51
+ type(exc).__name__,
52
+ exc,
52
53
  extra={"path": str(path), "directory": str(directory)},
53
54
  )
54
55
  return False
@@ -126,7 +127,8 @@ def _collect_files(
126
127
  except (OSError, ValueError) as exc:
127
128
  logger.warning(
128
129
  "[memory] Failed to resolve memory file path: %s: %s",
129
- type(exc).__name__, exc,
130
+ type(exc).__name__,
131
+ exc,
130
132
  extra={"path": str(resolved_path)},
131
133
  )
132
134
 
@@ -27,7 +27,7 @@ MAX_TOKENS_SOFT = 20_000
27
27
  MAX_TOKENS_HARD = 40_000
28
28
  MAX_TOOL_USES_TO_PRESERVE = 3
29
29
  IMAGE_TOKEN_COST = 2_000
30
- AUTO_COMPACT_BUFFER = 13_000
30
+ AUTO_COMPACT_BUFFER = 24_000
31
31
  WARNING_THRESHOLD = 20_000
32
32
  ERROR_THRESHOLD = 20_000
33
33
  MICRO_PLACEHOLDER = "[Old tool result content cleared]"
@@ -270,7 +270,9 @@ def get_remaining_context_tokens(
270
270
  """Context window minus configured output tokens."""
271
271
  context_limit = max(get_model_context_limit(model_profile, explicit_limit), MIN_CONTEXT_TOKENS)
272
272
  try:
273
- max_output_tokens = int(getattr(model_profile, "max_tokens", 0) or 0) if model_profile else 0
273
+ max_output_tokens = (
274
+ int(getattr(model_profile, "max_tokens", 0) or 0) if model_profile else 0
275
+ )
274
276
  except (TypeError, ValueError):
275
277
  max_output_tokens = 0
276
278
  return max(MIN_CONTEXT_TOKENS, context_limit - max(0, max_output_tokens))
@@ -298,7 +300,9 @@ def get_context_usage_status(
298
300
  )
299
301
 
300
302
  tokens_left = max(effective_limit - used_tokens, 0)
301
- percent_left = 0.0 if effective_limit <= 0 else min(100.0, (tokens_left / effective_limit) * 100)
303
+ percent_left = (
304
+ 0.0 if effective_limit <= 0 else min(100.0, (tokens_left / effective_limit) * 100)
305
+ )
302
306
  percent_used = 100.0 - percent_left
303
307
 
304
308
  warning_limit = max(0, effective_limit - WARNING_THRESHOLD)
@@ -419,7 +423,9 @@ def _estimate_message_tokens(content_block: Any) -> int:
419
423
  if isinstance(content, list):
420
424
  total = 0
421
425
  for part in content:
422
- part_type = getattr(part, "type", None) or (part.get("type") if isinstance(part, dict) else None)
426
+ part_type = getattr(part, "type", None) or (
427
+ part.get("type") if isinstance(part, dict) else None
428
+ )
423
429
  if part_type == "text":
424
430
  text_val = getattr(part, "text", None) if hasattr(part, "text") else None
425
431
  if text_val is None and isinstance(part, dict):
@@ -501,7 +507,9 @@ def micro_compact_messages(
501
507
  token_counts_by_tool_use_id[tool_use_id] = token_count
502
508
 
503
509
  latest_tool_use_ids = (
504
- tool_use_ids_to_compact[-MAX_TOOL_USES_TO_PRESERVE:] if MAX_TOOL_USES_TO_PRESERVE > 0 else []
510
+ tool_use_ids_to_compact[-MAX_TOOL_USES_TO_PRESERVE:]
511
+ if MAX_TOOL_USES_TO_PRESERVE > 0
512
+ else []
505
513
  )
506
514
  total_token_count = sum(token_counts_by_tool_use_id.values())
507
515
 
@@ -525,7 +533,9 @@ def micro_compact_messages(
525
533
  messages, protocol=protocol, precomputed_total_tokens=tokens_before
526
534
  )
527
535
  status = get_context_usage_status(
528
- usage_tokens, max_context_tokens=context_limit, auto_compact_enabled=resolved_auto_compact
536
+ usage_tokens,
537
+ max_context_tokens=context_limit,
538
+ auto_compact_enabled=resolved_auto_compact,
529
539
  )
530
540
  if not status.is_above_warning_threshold or total_tokens_removed < MAX_TOKENS_SOFT:
531
541
  ids_to_remove.clear()
@@ -571,7 +581,11 @@ def micro_compact_messages(
571
581
  new_block = content_item.model_copy()
572
582
  new_block.text = MICRO_PLACEHOLDER
573
583
  else:
574
- block_dict = dict(content_item) if isinstance(content_item, dict) else {"type": "tool_result"}
584
+ block_dict = (
585
+ dict(content_item)
586
+ if isinstance(content_item, dict)
587
+ else {"type": "tool_result"}
588
+ )
575
589
  block_dict["text"] = MICRO_PLACEHOLDER
576
590
  block_dict["tool_use_id"] = tool_use_id
577
591
  new_block = MessageContent(**block_dict)
@@ -12,9 +12,7 @@ from ripperdoc.utils.messages import UserMessage, AssistantMessage, ProgressMess
12
12
  ConversationMessage = Union[UserMessage, AssistantMessage, ProgressMessage]
13
13
 
14
14
 
15
- def stringify_message_content(
16
- content: Any, *, include_tool_details: bool = False
17
- ) -> str:
15
+ def stringify_message_content(content: Any, *, include_tool_details: bool = False) -> str:
18
16
  """Convert message content to plain string.
19
17
 
20
18
  Args:
@@ -128,14 +126,16 @@ def format_tool_result_detail(result_text: str, is_error: bool = False) -> str:
128
126
  return f"{prefix}: {result_preview}"
129
127
 
130
128
 
131
- def format_reasoning_preview(reasoning: Any) -> str:
129
+ def format_reasoning_preview(reasoning: Any, show_full_thinking: bool = False) -> str:
132
130
  """Return a short preview of reasoning/thinking content.
133
131
 
134
132
  Args:
135
133
  reasoning: The reasoning content (string, list, or other).
134
+ show_full_thinking: If True, return full reasoning content without truncation.
135
+ If False, return a truncated preview (max 250 chars).
136
136
 
137
137
  Returns:
138
- A short preview string (max ~80 chars with ellipsis).
138
+ A short preview string or full reasoning content.
139
139
  """
140
140
  if reasoning is None:
141
141
  return ""
@@ -151,11 +151,18 @@ def format_reasoning_preview(reasoning: Any) -> str:
151
151
  text = "\n".join(p for p in parts if p)
152
152
  else:
153
153
  text = str(reasoning)
154
+
155
+ if show_full_thinking:
156
+ return text
157
+
154
158
  lines = text.strip().splitlines()
155
159
  if not lines:
156
160
  return ""
157
- preview = lines[0][:80]
158
- if len(lines) > 1 or len(lines[0]) > 80:
161
+ first_line = lines[0]
162
+ if not first_line:
163
+ return "..." if len(lines) > 1 else ""
164
+ preview = first_line[:250]
165
+ if len(lines) > 1 or len(first_line) > 250:
159
166
  preview += "..."
160
167
  return preview
161
168
 
@@ -4,7 +4,6 @@ This module provides utilities for creating and normalizing messages
4
4
  for communication with AI models.
5
5
  """
6
6
 
7
- import json
8
7
  from typing import Any, Dict, List, Optional, Union
9
8
  from pydantic import BaseModel, ConfigDict, Field
10
9
  from uuid import uuid4
@@ -93,7 +92,8 @@ def _content_block_to_openai(block: MessageContent) -> Dict[str, Any]:
93
92
  except (TypeError, ValueError) as exc:
94
93
  logger.warning(
95
94
  "[_content_block_to_openai] Failed to serialize tool arguments: %s: %s",
96
- type(exc).__name__, exc,
95
+ type(exc).__name__,
96
+ exc,
97
97
  )
98
98
  args_str = "{}"
99
99
  tool_call_id = (
@@ -169,6 +169,12 @@ class AssistantMessage(BaseModel):
169
169
  cost_usd: float = 0.0
170
170
  duration_ms: float = 0.0
171
171
  is_api_error_message: bool = False
172
+ # Model and token usage information
173
+ model: Optional[str] = None
174
+ input_tokens: int = 0
175
+ output_tokens: int = 0
176
+ cache_read_tokens: int = 0
177
+ cache_creation_tokens: int = 0
172
178
 
173
179
  def __init__(self, **data: object) -> None:
174
180
  if "uuid" not in data or not data["uuid"]:
@@ -211,7 +217,8 @@ def create_user_message(
211
217
  # Fallback: keep as-is if conversion fails
212
218
  logger.warning(
213
219
  "[create_user_message] Failed to normalize tool_use_result: %s: %s",
214
- type(exc).__name__, exc,
220
+ type(exc).__name__,
221
+ exc,
215
222
  )
216
223
 
217
224
  message = Message(role=MessageRole.USER, content=message_content)
@@ -236,6 +243,11 @@ def create_assistant_message(
236
243
  duration_ms: float = 0.0,
237
244
  reasoning: Optional[Any] = None,
238
245
  metadata: Optional[Dict[str, Any]] = None,
246
+ model: Optional[str] = None,
247
+ input_tokens: int = 0,
248
+ output_tokens: int = 0,
249
+ cache_read_tokens: int = 0,
250
+ cache_creation_tokens: int = 0,
239
251
  ) -> AssistantMessage:
240
252
  """Create an assistant message."""
241
253
  if isinstance(content, str):
@@ -250,7 +262,16 @@ def create_assistant_message(
250
262
  metadata=metadata or {},
251
263
  )
252
264
 
253
- return AssistantMessage(message=message, cost_usd=cost_usd, duration_ms=duration_ms)
265
+ return AssistantMessage(
266
+ message=message,
267
+ cost_usd=cost_usd,
268
+ duration_ms=duration_ms,
269
+ model=model,
270
+ input_tokens=input_tokens,
271
+ output_tokens=output_tokens,
272
+ cache_read_tokens=cache_read_tokens,
273
+ cache_creation_tokens=cache_creation_tokens,
274
+ )
254
275
 
255
276
 
256
277
  def create_progress_message(
@@ -268,14 +289,80 @@ def create_progress_message(
268
289
  )
269
290
 
270
291
 
292
+ def _apply_deepseek_reasoning_content(
293
+ normalized: List[Dict[str, Any]],
294
+ is_new_turn: bool = False,
295
+ ) -> List[Dict[str, Any]]:
296
+ """Apply DeepSeek reasoning_content handling to normalized messages.
297
+
298
+ DeepSeek thinking mode requires special handling for tool calls:
299
+ 1. During a tool call loop (same turn), reasoning_content MUST be preserved
300
+ in assistant messages that contain tool_calls
301
+ 2. When a new user turn starts, we can optionally clear previous reasoning_content
302
+ to save bandwidth (the API will ignore them anyway)
303
+
304
+ According to DeepSeek docs, an assistant message with tool_calls should look like:
305
+ {
306
+ 'role': 'assistant',
307
+ 'content': response.choices[0].message.content,
308
+ 'reasoning_content': response.choices[0].message.reasoning_content,
309
+ 'tool_calls': response.choices[0].message.tool_calls,
310
+ }
311
+
312
+ Args:
313
+ normalized: The normalized messages list
314
+ is_new_turn: If True, clear reasoning_content from historical messages
315
+ to save network bandwidth
316
+
317
+ Returns:
318
+ The processed messages list
319
+ """
320
+ if not normalized:
321
+ return normalized
322
+
323
+ # Find the last user message index to determine the current turn boundary
324
+ last_user_idx = -1
325
+ for idx in range(len(normalized) - 1, -1, -1):
326
+ if normalized[idx].get("role") == "user":
327
+ last_user_idx = idx
328
+ break
329
+
330
+ if is_new_turn and last_user_idx > 0:
331
+ # Clear reasoning_content from messages before the last user message
332
+ # This is optional but recommended by DeepSeek to save bandwidth
333
+ for idx in range(last_user_idx):
334
+ msg = normalized[idx]
335
+ if msg.get("role") == "assistant" and "reasoning_content" in msg:
336
+ # Set to None instead of deleting to match DeepSeek's example
337
+ msg["reasoning_content"] = None
338
+
339
+ # Validate: ensure all assistant messages with tool_calls have reasoning_content
340
+ # within the current turn (after last_user_idx)
341
+ for idx in range(max(0, last_user_idx), len(normalized)):
342
+ msg = normalized[idx]
343
+ if msg.get("role") == "assistant" and msg.get("tool_calls"):
344
+ if "reasoning_content" not in msg:
345
+ # This is a problem - DeepSeek requires reasoning_content for tool_calls
346
+ logger.warning(
347
+ f"[deepseek] Assistant message at index {idx} has tool_calls "
348
+ f"but missing reasoning_content - this may cause API errors"
349
+ )
350
+
351
+ return normalized
352
+
353
+
271
354
  def normalize_messages_for_api(
272
355
  messages: List[Union[UserMessage, AssistantMessage, ProgressMessage]],
273
356
  protocol: str = "anthropic",
274
357
  tool_mode: str = "native",
358
+ thinking_mode: Optional[str] = None,
275
359
  ) -> List[Dict[str, Any]]:
276
360
  """Normalize messages for API submission.
277
361
 
278
362
  Progress messages are filtered out as they are not sent to the API.
363
+
364
+ For DeepSeek thinking mode, this function ensures reasoning_content is properly
365
+ included in assistant messages that contain tool_calls, as required by the API.
279
366
  """
280
367
 
281
368
  def _msg_type(msg: Any) -> Optional[str]:
@@ -318,58 +405,6 @@ def normalize_messages_for_api(
318
405
  return meta_dict
319
406
  return {}
320
407
 
321
- def _block_type(block: Any) -> Optional[str]:
322
- if hasattr(block, "type"):
323
- return getattr(block, "type", None)
324
- if isinstance(block, dict):
325
- return block.get("type")
326
- return None
327
-
328
- def _block_attr(block: Any, attr: str, default: Any = None) -> Any:
329
- if hasattr(block, attr):
330
- return getattr(block, attr, default)
331
- if isinstance(block, dict):
332
- return block.get(attr, default)
333
- return default
334
-
335
- def _flatten_blocks_to_text(blocks: List[Any]) -> str:
336
- parts: List[str] = []
337
- for blk in blocks:
338
- btype = _block_type(blk)
339
- if btype == "text":
340
- text = _block_attr(blk, "text") or _block_attr(blk, "content") or ""
341
- if text:
342
- parts.append(str(text))
343
- elif btype == "tool_result":
344
- text = _block_attr(blk, "text") or _block_attr(blk, "content") or ""
345
- tool_id = _block_attr(blk, "tool_use_id") or _block_attr(blk, "id")
346
- prefix = "Tool error" if _block_attr(blk, "is_error") else "Tool result"
347
- label = f"{prefix}{f' ({tool_id})' if tool_id else ''}"
348
- parts.append(f"{label}: {text}" if text else label)
349
- elif btype == "tool_use":
350
- name = _block_attr(blk, "name") or ""
351
- input_data = _block_attr(blk, "input")
352
- input_preview = ""
353
- if input_data not in (None, {}):
354
- try:
355
- input_preview = json.dumps(input_data)
356
- except (TypeError, ValueError):
357
- input_preview = str(input_data)
358
- tool_id = _block_attr(blk, "tool_use_id") or _block_attr(blk, "id")
359
- desc = "Tool call"
360
- if name:
361
- desc += f" {name}"
362
- if tool_id:
363
- desc += f" ({tool_id})"
364
- if input_preview:
365
- desc += f": {input_preview}"
366
- parts.append(desc)
367
- else:
368
- text = _block_attr(blk, "text") or _block_attr(blk, "content") or ""
369
- if text:
370
- parts.append(str(text))
371
- return "\n".join(p for p in parts if p)
372
-
373
408
  effective_tool_mode = (tool_mode or "native").lower()
374
409
  if effective_tool_mode not in {"native", "text"}:
375
410
  effective_tool_mode = "native"
@@ -426,7 +461,9 @@ def normalize_messages_for_api(
426
461
  if block_type == "tool_result":
427
462
  tool_results_seen += 1
428
463
  # Skip tool_result blocks that lack a preceding tool_use
429
- tool_id = getattr(block, "tool_use_id", None) or getattr(block, "id", None)
464
+ tool_id = getattr(block, "tool_use_id", None) or getattr(
465
+ block, "id", None
466
+ )
430
467
  if not tool_id:
431
468
  skipped_tool_results_no_call += 1
432
469
  continue
@@ -486,19 +523,35 @@ def normalize_messages_for_api(
486
523
  mapped = _content_block_to_openai(block)
487
524
  if mapped:
488
525
  assistant_openai_msgs.append(mapped)
489
- if text_parts:
490
- assistant_openai_msgs.append(
491
- {"role": "assistant", "content": "\n".join(text_parts)}
492
- )
493
526
  if tool_calls:
527
+ # For DeepSeek thinking mode, we must include reasoning_content
528
+ # in the assistant message that contains tool_calls
529
+ tool_call_msg: Dict[str, Any] = {
530
+ "role": "assistant",
531
+ "content": "\n".join(text_parts) if text_parts else None,
532
+ "tool_calls": tool_calls,
533
+ }
534
+ # Add reasoning_content if present (required for DeepSeek thinking mode)
535
+ reasoning_content = meta.get("reasoning_content") if meta else None
536
+ if reasoning_content is not None:
537
+ tool_call_msg["reasoning_content"] = reasoning_content
538
+ logger.debug(
539
+ f"[normalize_messages_for_api] Added reasoning_content to "
540
+ f"tool_call message (len={len(str(reasoning_content))})"
541
+ )
542
+ elif thinking_mode == "deepseek":
543
+ logger.warning(
544
+ f"[normalize_messages_for_api] DeepSeek mode: assistant "
545
+ f"message with tool_calls but no reasoning_content in metadata. "
546
+ f"meta_keys={list(meta.keys()) if meta else []}"
547
+ )
548
+ assistant_openai_msgs.append(tool_call_msg)
549
+ elif text_parts:
494
550
  assistant_openai_msgs.append(
495
- {
496
- "role": "assistant",
497
- "content": None,
498
- "tool_calls": tool_calls,
499
- }
551
+ {"role": "assistant", "content": "\n".join(text_parts)}
500
552
  )
501
- if meta and assistant_openai_msgs:
553
+ # For non-tool-call messages, add reasoning metadata to the last message
554
+ if meta and assistant_openai_msgs and not tool_calls:
502
555
  for key in ("reasoning_content", "reasoning_details", "reasoning"):
503
556
  if key in meta and meta[key] is not None:
504
557
  assistant_openai_msgs[-1][key] = meta[key]
@@ -515,6 +568,7 @@ def normalize_messages_for_api(
515
568
 
516
569
  logger.debug(
517
570
  f"[normalize_messages_for_api] protocol={protocol} tool_mode={effective_tool_mode} "
571
+ f"thinking_mode={thinking_mode} "
518
572
  f"input_msgs={len(messages)} normalized={len(normalized)} "
519
573
  f"tool_results_seen={tool_results_seen} tool_uses_seen={tool_uses_seen} "
520
574
  f"tool_result_positions={len(tool_result_positions)} "
@@ -523,6 +577,11 @@ def normalize_messages_for_api(
523
577
  f"skipped_tool_uses_no_id={skipped_tool_uses_no_id} "
524
578
  f"skipped_tool_results_no_call={skipped_tool_results_no_call}"
525
579
  )
580
+
581
+ # Apply DeepSeek-specific reasoning_content handling
582
+ if thinking_mode == "deepseek":
583
+ normalized = _apply_deepseek_reasoning_content(normalized, is_new_turn=False)
584
+
526
585
  return normalized
527
586
 
528
587
 
@@ -286,7 +286,7 @@ def _compile_pattern(pattern: str) -> re.Pattern[str]:
286
286
  while j < len(pattern) and pattern[j] != "]":
287
287
  j += 1
288
288
  if j < len(pattern):
289
- regex += pattern[i:j + 1]
289
+ regex += pattern[i : j + 1]
290
290
  i = j
291
291
  else:
292
292
  regex += re.escape(c)
@@ -383,7 +383,9 @@ class IgnoreFilter:
383
383
  # =============================================================================
384
384
 
385
385
 
386
- def parse_ignore_pattern(pattern: str, settings_path: Optional[Path] = None) -> Tuple[str, Optional[Path]]:
386
+ def parse_ignore_pattern(
387
+ pattern: str, settings_path: Optional[Path] = None
388
+ ) -> Tuple[str, Optional[Path]]:
387
389
  """Parse an ignore pattern and return (relative_pattern, root_path).
388
390
 
389
391
  Supports prefixes:
@@ -503,6 +505,7 @@ def is_path_ignored(
503
505
  file_path = Path(file_path)
504
506
  if not file_path.is_absolute():
505
507
  from ripperdoc.utils.safe_get_cwd import safe_get_cwd
508
+
506
509
  file_path = Path(safe_get_cwd()) / file_path
507
510
 
508
511
  file_path = file_path.resolve()
@@ -512,6 +515,7 @@ def is_path_ignored(
512
515
  root_path = get_git_root(file_path.parent)
513
516
  if root_path is None:
514
517
  from ripperdoc.utils.safe_get_cwd import safe_get_cwd
518
+
515
519
  root_path = Path(safe_get_cwd())
516
520
 
517
521
  root_path = root_path.resolve()
@@ -628,12 +632,35 @@ def check_path_for_tool(
628
632
  # Check if it's a binary/media file
629
633
  suffix = file_path.suffix.lower()
630
634
  binary_extensions = {
631
- ".png", ".jpg", ".jpeg", ".gif", ".bmp", ".ico", ".webp",
632
- ".mp4", ".avi", ".mkv", ".mov", ".mp3", ".wav", ".flac",
633
- ".zip", ".tar", ".gz", ".7z", ".rar",
634
- ".exe", ".dll", ".so", ".dylib",
635
- ".db", ".sqlite", ".parquet",
636
- ".ttf", ".otf", ".woff",
635
+ ".png",
636
+ ".jpg",
637
+ ".jpeg",
638
+ ".gif",
639
+ ".bmp",
640
+ ".ico",
641
+ ".webp",
642
+ ".mp4",
643
+ ".avi",
644
+ ".mkv",
645
+ ".mov",
646
+ ".mp3",
647
+ ".wav",
648
+ ".flac",
649
+ ".zip",
650
+ ".tar",
651
+ ".gz",
652
+ ".7z",
653
+ ".rar",
654
+ ".exe",
655
+ ".dll",
656
+ ".so",
657
+ ".dylib",
658
+ ".db",
659
+ ".sqlite",
660
+ ".parquet",
661
+ ".ttf",
662
+ ".otf",
663
+ ".woff",
637
664
  }
638
665
  if suffix in binary_extensions:
639
666
  reasons.append("binary/media file")
@@ -51,7 +51,8 @@ def _resolve_path(raw_path: str, cwd: str) -> Path:
51
51
  except (OSError, ValueError) as exc:
52
52
  logger.warning(
53
53
  "[path_validation] Failed to resolve path: %s: %s",
54
- type(exc).__name__, exc,
54
+ type(exc).__name__,
55
+ exc,
55
56
  extra={"raw_path": raw_path, "cwd": cwd},
56
57
  )
57
58
  return candidate