ripperdoc 0.2.8__py3-none-any.whl → 0.2.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (84) hide show
  1. ripperdoc/__init__.py +1 -1
  2. ripperdoc/cli/cli.py +28 -115
  3. ripperdoc/cli/commands/__init__.py +0 -1
  4. ripperdoc/cli/commands/agents_cmd.py +6 -3
  5. ripperdoc/cli/commands/clear_cmd.py +1 -4
  6. ripperdoc/cli/commands/config_cmd.py +1 -1
  7. ripperdoc/cli/commands/context_cmd.py +3 -2
  8. ripperdoc/cli/commands/doctor_cmd.py +18 -4
  9. ripperdoc/cli/commands/hooks_cmd.py +27 -53
  10. ripperdoc/cli/commands/models_cmd.py +26 -9
  11. ripperdoc/cli/commands/permissions_cmd.py +27 -9
  12. ripperdoc/cli/commands/resume_cmd.py +5 -3
  13. ripperdoc/cli/commands/status_cmd.py +4 -4
  14. ripperdoc/cli/commands/tasks_cmd.py +8 -4
  15. ripperdoc/cli/ui/file_mention_completer.py +2 -1
  16. ripperdoc/cli/ui/interrupt_handler.py +2 -3
  17. ripperdoc/cli/ui/message_display.py +4 -2
  18. ripperdoc/cli/ui/provider_options.py +247 -0
  19. ripperdoc/cli/ui/rich_ui.py +110 -59
  20. ripperdoc/cli/ui/spinner.py +25 -1
  21. ripperdoc/cli/ui/tool_renderers.py +8 -2
  22. ripperdoc/cli/ui/wizard.py +215 -0
  23. ripperdoc/core/agents.py +9 -3
  24. ripperdoc/core/config.py +49 -12
  25. ripperdoc/core/custom_commands.py +7 -6
  26. ripperdoc/core/default_tools.py +11 -2
  27. ripperdoc/core/hooks/config.py +1 -3
  28. ripperdoc/core/hooks/events.py +23 -28
  29. ripperdoc/core/hooks/executor.py +4 -6
  30. ripperdoc/core/hooks/integration.py +12 -21
  31. ripperdoc/core/hooks/manager.py +40 -15
  32. ripperdoc/core/permissions.py +40 -8
  33. ripperdoc/core/providers/anthropic.py +109 -36
  34. ripperdoc/core/providers/gemini.py +70 -5
  35. ripperdoc/core/providers/openai.py +60 -5
  36. ripperdoc/core/query.py +82 -38
  37. ripperdoc/core/query_utils.py +2 -0
  38. ripperdoc/core/skills.py +9 -3
  39. ripperdoc/core/system_prompt.py +4 -2
  40. ripperdoc/core/tool.py +9 -5
  41. ripperdoc/sdk/client.py +2 -2
  42. ripperdoc/tools/ask_user_question_tool.py +5 -3
  43. ripperdoc/tools/background_shell.py +2 -1
  44. ripperdoc/tools/bash_output_tool.py +1 -1
  45. ripperdoc/tools/bash_tool.py +26 -16
  46. ripperdoc/tools/dynamic_mcp_tool.py +29 -8
  47. ripperdoc/tools/enter_plan_mode_tool.py +1 -1
  48. ripperdoc/tools/exit_plan_mode_tool.py +1 -1
  49. ripperdoc/tools/file_edit_tool.py +8 -4
  50. ripperdoc/tools/file_read_tool.py +8 -4
  51. ripperdoc/tools/file_write_tool.py +9 -5
  52. ripperdoc/tools/glob_tool.py +3 -2
  53. ripperdoc/tools/grep_tool.py +3 -2
  54. ripperdoc/tools/kill_bash_tool.py +1 -1
  55. ripperdoc/tools/ls_tool.py +1 -1
  56. ripperdoc/tools/mcp_tools.py +13 -10
  57. ripperdoc/tools/multi_edit_tool.py +8 -7
  58. ripperdoc/tools/notebook_edit_tool.py +7 -4
  59. ripperdoc/tools/skill_tool.py +1 -1
  60. ripperdoc/tools/task_tool.py +5 -4
  61. ripperdoc/tools/todo_tool.py +2 -2
  62. ripperdoc/tools/tool_search_tool.py +3 -2
  63. ripperdoc/utils/conversation_compaction.py +8 -4
  64. ripperdoc/utils/file_watch.py +8 -2
  65. ripperdoc/utils/json_utils.py +2 -1
  66. ripperdoc/utils/mcp.py +11 -3
  67. ripperdoc/utils/memory.py +4 -2
  68. ripperdoc/utils/message_compaction.py +21 -7
  69. ripperdoc/utils/message_formatting.py +11 -7
  70. ripperdoc/utils/messages.py +105 -66
  71. ripperdoc/utils/path_ignore.py +35 -8
  72. ripperdoc/utils/permissions/path_validation_utils.py +2 -1
  73. ripperdoc/utils/permissions/shell_command_validation.py +427 -91
  74. ripperdoc/utils/safe_get_cwd.py +2 -1
  75. ripperdoc/utils/session_history.py +13 -6
  76. ripperdoc/utils/todo.py +2 -1
  77. ripperdoc/utils/token_estimation.py +6 -1
  78. {ripperdoc-0.2.8.dist-info → ripperdoc-0.2.9.dist-info}/METADATA +1 -1
  79. ripperdoc-0.2.9.dist-info/RECORD +123 -0
  80. ripperdoc-0.2.8.dist-info/RECORD +0 -121
  81. {ripperdoc-0.2.8.dist-info → ripperdoc-0.2.9.dist-info}/WHEEL +0 -0
  82. {ripperdoc-0.2.8.dist-info → ripperdoc-0.2.9.dist-info}/entry_points.txt +0 -0
  83. {ripperdoc-0.2.8.dist-info → ripperdoc-0.2.9.dist-info}/licenses/LICENSE +0 -0
  84. {ripperdoc-0.2.8.dist-info → ripperdoc-0.2.9.dist-info}/top_level.txt +0 -0
@@ -34,6 +34,7 @@ RECENT_MESSAGES_AFTER_COMPACT = 8
34
34
  # Summary Prompt Generation
35
35
  # ─────────────────────────────────────────────────────────────────────────────
36
36
 
37
+
37
38
  def generate_summary_prompt(additional_instructions: Optional[str] = None) -> str:
38
39
  """Generate the system prompt for conversation summarization.
39
40
 
@@ -203,9 +204,11 @@ Please continue the conversation from where we left it off without asking the us
203
204
  # Data Classes
204
205
  # ─────────────────────────────────────────────────────────────────────────────
205
206
 
207
+
206
208
  @dataclass
207
209
  class CompactionResult:
208
210
  """Result of a conversation compaction operation."""
211
+
209
212
  messages: List[ConversationMessage]
210
213
  summary_text: str
211
214
  continuation_prompt: str
@@ -219,6 +222,7 @@ class CompactionResult:
219
222
  @dataclass
220
223
  class CompactionError:
221
224
  """Error during compaction."""
225
+
222
226
  error_type: str # "not_enough_messages", "empty_summary", "exception"
223
227
  message: str
224
228
  exception: Optional[Exception] = None
@@ -373,16 +377,15 @@ async def compact_conversation(
373
377
  messages_for_summary = micro.messages
374
378
 
375
379
  # Summarize the conversation
376
-
380
+
377
381
  non_progress_messages = [
378
382
  m for m in messages_for_summary if getattr(m, "type", "") != "progress"
379
383
  ]
380
384
  try:
381
- summary_text = await summarize_conversation(
382
- non_progress_messages, custom_instructions
383
- )
385
+ summary_text = await summarize_conversation(non_progress_messages, custom_instructions)
384
386
  except Exception as exc:
385
387
  import traceback
388
+
386
389
  logger.warning(
387
390
  "[compaction] Error during compaction: %s: %s\n%s",
388
391
  type(exc).__name__,
@@ -443,6 +446,7 @@ class ConversationCompactor:
443
446
  Deprecated: Use compact_conversation() function directly instead.
444
447
  This class is kept for backward compatibility.
445
448
  """
449
+
446
450
  # Keep CompactionResult as a nested class for backward compatibility
447
451
  CompactionResult = CompactionResult
448
452
 
@@ -102,10 +102,16 @@ def detect_changed_files(
102
102
 
103
103
  try:
104
104
  new_content = _read_portion(file_path, snapshot.offset, snapshot.limit)
105
- except (OSError, IOError, UnicodeDecodeError, ValueError) as exc: # pragma: no cover - best-effort telemetry
105
+ except (
106
+ OSError,
107
+ IOError,
108
+ UnicodeDecodeError,
109
+ ValueError,
110
+ ) as exc: # pragma: no cover - best-effort telemetry
106
111
  logger.warning(
107
112
  "[file_watch] Failed reading changed file: %s: %s",
108
- type(exc).__name__, exc,
113
+ type(exc).__name__,
114
+ exc,
109
115
  extra={"file_path": file_path},
110
116
  )
111
117
  notices.append(
@@ -21,7 +21,8 @@ def safe_parse_json(json_text: Optional[str], log_error: bool = True) -> Optiona
21
21
  if log_error:
22
22
  logger.debug(
23
23
  "[json_utils] Failed to parse JSON: %s: %s",
24
- type(exc).__name__, exc,
24
+ type(exc).__name__,
25
+ exc,
25
26
  extra={"length": len(json_text)},
26
27
  )
27
28
  return None
ripperdoc/utils/mcp.py CHANGED
@@ -92,7 +92,8 @@ def _ensure_str_dict(raw: object) -> Dict[str, str]:
92
92
  except (TypeError, ValueError) as exc:
93
93
  logger.warning(
94
94
  "[mcp] Failed to coerce env/header value to string: %s: %s",
95
- type(exc).__name__, exc,
95
+ type(exc).__name__,
96
+ exc,
96
97
  extra={"key": key},
97
98
  )
98
99
  continue
@@ -365,10 +366,17 @@ class McpRuntime:
365
366
  "capabilities": list(info.capabilities.keys()),
366
367
  },
367
368
  )
368
- except (OSError, RuntimeError, ConnectionError, ValueError, TimeoutError) as exc: # pragma: no cover - network/process errors
369
+ except (
370
+ OSError,
371
+ RuntimeError,
372
+ ConnectionError,
373
+ ValueError,
374
+ TimeoutError,
375
+ ) as exc: # pragma: no cover - network/process errors
369
376
  logger.warning(
370
377
  "Failed to connect to MCP server: %s: %s",
371
- type(exc).__name__, exc,
378
+ type(exc).__name__,
379
+ exc,
372
380
  extra={"server": config.name},
373
381
  )
374
382
  info.status = "failed"
ripperdoc/utils/memory.py CHANGED
@@ -48,7 +48,8 @@ def _is_path_under_directory(path: Path, directory: Path) -> bool:
48
48
  except (ValueError, OSError) as exc:
49
49
  logger.warning(
50
50
  "[memory] Failed to compare path containment: %s: %s",
51
- type(exc).__name__, exc,
51
+ type(exc).__name__,
52
+ exc,
52
53
  extra={"path": str(path), "directory": str(directory)},
53
54
  )
54
55
  return False
@@ -126,7 +127,8 @@ def _collect_files(
126
127
  except (OSError, ValueError) as exc:
127
128
  logger.warning(
128
129
  "[memory] Failed to resolve memory file path: %s: %s",
129
- type(exc).__name__, exc,
130
+ type(exc).__name__,
131
+ exc,
130
132
  extra={"path": str(resolved_path)},
131
133
  )
132
134
 
@@ -27,7 +27,7 @@ MAX_TOKENS_SOFT = 20_000
27
27
  MAX_TOKENS_HARD = 40_000
28
28
  MAX_TOOL_USES_TO_PRESERVE = 3
29
29
  IMAGE_TOKEN_COST = 2_000
30
- AUTO_COMPACT_BUFFER = 13_000
30
+ AUTO_COMPACT_BUFFER = 24_000
31
31
  WARNING_THRESHOLD = 20_000
32
32
  ERROR_THRESHOLD = 20_000
33
33
  MICRO_PLACEHOLDER = "[Old tool result content cleared]"
@@ -270,7 +270,9 @@ def get_remaining_context_tokens(
270
270
  """Context window minus configured output tokens."""
271
271
  context_limit = max(get_model_context_limit(model_profile, explicit_limit), MIN_CONTEXT_TOKENS)
272
272
  try:
273
- max_output_tokens = int(getattr(model_profile, "max_tokens", 0) or 0) if model_profile else 0
273
+ max_output_tokens = (
274
+ int(getattr(model_profile, "max_tokens", 0) or 0) if model_profile else 0
275
+ )
274
276
  except (TypeError, ValueError):
275
277
  max_output_tokens = 0
276
278
  return max(MIN_CONTEXT_TOKENS, context_limit - max(0, max_output_tokens))
@@ -298,7 +300,9 @@ def get_context_usage_status(
298
300
  )
299
301
 
300
302
  tokens_left = max(effective_limit - used_tokens, 0)
301
- percent_left = 0.0 if effective_limit <= 0 else min(100.0, (tokens_left / effective_limit) * 100)
303
+ percent_left = (
304
+ 0.0 if effective_limit <= 0 else min(100.0, (tokens_left / effective_limit) * 100)
305
+ )
302
306
  percent_used = 100.0 - percent_left
303
307
 
304
308
  warning_limit = max(0, effective_limit - WARNING_THRESHOLD)
@@ -419,7 +423,9 @@ def _estimate_message_tokens(content_block: Any) -> int:
419
423
  if isinstance(content, list):
420
424
  total = 0
421
425
  for part in content:
422
- part_type = getattr(part, "type", None) or (part.get("type") if isinstance(part, dict) else None)
426
+ part_type = getattr(part, "type", None) or (
427
+ part.get("type") if isinstance(part, dict) else None
428
+ )
423
429
  if part_type == "text":
424
430
  text_val = getattr(part, "text", None) if hasattr(part, "text") else None
425
431
  if text_val is None and isinstance(part, dict):
@@ -501,7 +507,9 @@ def micro_compact_messages(
501
507
  token_counts_by_tool_use_id[tool_use_id] = token_count
502
508
 
503
509
  latest_tool_use_ids = (
504
- tool_use_ids_to_compact[-MAX_TOOL_USES_TO_PRESERVE:] if MAX_TOOL_USES_TO_PRESERVE > 0 else []
510
+ tool_use_ids_to_compact[-MAX_TOOL_USES_TO_PRESERVE:]
511
+ if MAX_TOOL_USES_TO_PRESERVE > 0
512
+ else []
505
513
  )
506
514
  total_token_count = sum(token_counts_by_tool_use_id.values())
507
515
 
@@ -525,7 +533,9 @@ def micro_compact_messages(
525
533
  messages, protocol=protocol, precomputed_total_tokens=tokens_before
526
534
  )
527
535
  status = get_context_usage_status(
528
- usage_tokens, max_context_tokens=context_limit, auto_compact_enabled=resolved_auto_compact
536
+ usage_tokens,
537
+ max_context_tokens=context_limit,
538
+ auto_compact_enabled=resolved_auto_compact,
529
539
  )
530
540
  if not status.is_above_warning_threshold or total_tokens_removed < MAX_TOKENS_SOFT:
531
541
  ids_to_remove.clear()
@@ -571,7 +581,11 @@ def micro_compact_messages(
571
581
  new_block = content_item.model_copy()
572
582
  new_block.text = MICRO_PLACEHOLDER
573
583
  else:
574
- block_dict = dict(content_item) if isinstance(content_item, dict) else {"type": "tool_result"}
584
+ block_dict = (
585
+ dict(content_item)
586
+ if isinstance(content_item, dict)
587
+ else {"type": "tool_result"}
588
+ )
575
589
  block_dict["text"] = MICRO_PLACEHOLDER
576
590
  block_dict["tool_use_id"] = tool_use_id
577
591
  new_block = MessageContent(**block_dict)
@@ -12,9 +12,7 @@ from ripperdoc.utils.messages import UserMessage, AssistantMessage, ProgressMess
12
12
  ConversationMessage = Union[UserMessage, AssistantMessage, ProgressMessage]
13
13
 
14
14
 
15
- def stringify_message_content(
16
- content: Any, *, include_tool_details: bool = False
17
- ) -> str:
15
+ def stringify_message_content(content: Any, *, include_tool_details: bool = False) -> str:
18
16
  """Convert message content to plain string.
19
17
 
20
18
  Args:
@@ -128,14 +126,16 @@ def format_tool_result_detail(result_text: str, is_error: bool = False) -> str:
128
126
  return f"{prefix}: {result_preview}"
129
127
 
130
128
 
131
- def format_reasoning_preview(reasoning: Any) -> str:
129
+ def format_reasoning_preview(reasoning: Any, show_full_thinking: bool = False) -> str:
132
130
  """Return a short preview of reasoning/thinking content.
133
131
 
134
132
  Args:
135
133
  reasoning: The reasoning content (string, list, or other).
134
+ show_full_thinking: If True, return full reasoning content without truncation.
135
+ If False, return a truncated preview (max 250 chars).
136
136
 
137
137
  Returns:
138
- A short preview string (max ~80 chars with ellipsis).
138
+ A short preview string or full reasoning content.
139
139
  """
140
140
  if reasoning is None:
141
141
  return ""
@@ -151,11 +151,15 @@ def format_reasoning_preview(reasoning: Any) -> str:
151
151
  text = "\n".join(p for p in parts if p)
152
152
  else:
153
153
  text = str(reasoning)
154
+
155
+ if show_full_thinking:
156
+ return text
157
+
154
158
  lines = text.strip().splitlines()
155
159
  if not lines:
156
160
  return ""
157
- preview = lines[0][:80]
158
- if len(lines) > 1 or len(lines[0]) > 80:
161
+ preview = lines[0][:250]
162
+ if len(lines) > 1 or len(lines[0]) > 250:
159
163
  preview += "..."
160
164
  return preview
161
165
 
@@ -4,7 +4,6 @@ This module provides utilities for creating and normalizing messages
4
4
  for communication with AI models.
5
5
  """
6
6
 
7
- import json
8
7
  from typing import Any, Dict, List, Optional, Union
9
8
  from pydantic import BaseModel, ConfigDict, Field
10
9
  from uuid import uuid4
@@ -93,7 +92,8 @@ def _content_block_to_openai(block: MessageContent) -> Dict[str, Any]:
93
92
  except (TypeError, ValueError) as exc:
94
93
  logger.warning(
95
94
  "[_content_block_to_openai] Failed to serialize tool arguments: %s: %s",
96
- type(exc).__name__, exc,
95
+ type(exc).__name__,
96
+ exc,
97
97
  )
98
98
  args_str = "{}"
99
99
  tool_call_id = (
@@ -211,7 +211,8 @@ def create_user_message(
211
211
  # Fallback: keep as-is if conversion fails
212
212
  logger.warning(
213
213
  "[create_user_message] Failed to normalize tool_use_result: %s: %s",
214
- type(exc).__name__, exc,
214
+ type(exc).__name__,
215
+ exc,
215
216
  )
216
217
 
217
218
  message = Message(role=MessageRole.USER, content=message_content)
@@ -268,14 +269,80 @@ def create_progress_message(
268
269
  )
269
270
 
270
271
 
272
+ def _apply_deepseek_reasoning_content(
273
+ normalized: List[Dict[str, Any]],
274
+ is_new_turn: bool = False,
275
+ ) -> List[Dict[str, Any]]:
276
+ """Apply DeepSeek reasoning_content handling to normalized messages.
277
+
278
+ DeepSeek thinking mode requires special handling for tool calls:
279
+ 1. During a tool call loop (same turn), reasoning_content MUST be preserved
280
+ in assistant messages that contain tool_calls
281
+ 2. When a new user turn starts, we can optionally clear previous reasoning_content
282
+ to save bandwidth (the API will ignore them anyway)
283
+
284
+ According to DeepSeek docs, an assistant message with tool_calls should look like:
285
+ {
286
+ 'role': 'assistant',
287
+ 'content': response.choices[0].message.content,
288
+ 'reasoning_content': response.choices[0].message.reasoning_content,
289
+ 'tool_calls': response.choices[0].message.tool_calls,
290
+ }
291
+
292
+ Args:
293
+ normalized: The normalized messages list
294
+ is_new_turn: If True, clear reasoning_content from historical messages
295
+ to save network bandwidth
296
+
297
+ Returns:
298
+ The processed messages list
299
+ """
300
+ if not normalized:
301
+ return normalized
302
+
303
+ # Find the last user message index to determine the current turn boundary
304
+ last_user_idx = -1
305
+ for idx in range(len(normalized) - 1, -1, -1):
306
+ if normalized[idx].get("role") == "user":
307
+ last_user_idx = idx
308
+ break
309
+
310
+ if is_new_turn and last_user_idx > 0:
311
+ # Clear reasoning_content from messages before the last user message
312
+ # This is optional but recommended by DeepSeek to save bandwidth
313
+ for idx in range(last_user_idx):
314
+ msg = normalized[idx]
315
+ if msg.get("role") == "assistant" and "reasoning_content" in msg:
316
+ # Set to None instead of deleting to match DeepSeek's example
317
+ msg["reasoning_content"] = None
318
+
319
+ # Validate: ensure all assistant messages with tool_calls have reasoning_content
320
+ # within the current turn (after last_user_idx)
321
+ for idx in range(max(0, last_user_idx), len(normalized)):
322
+ msg = normalized[idx]
323
+ if msg.get("role") == "assistant" and msg.get("tool_calls"):
324
+ if "reasoning_content" not in msg:
325
+ # This is a problem - DeepSeek requires reasoning_content for tool_calls
326
+ logger.warning(
327
+ f"[deepseek] Assistant message at index {idx} has tool_calls "
328
+ f"but missing reasoning_content - this may cause API errors"
329
+ )
330
+
331
+ return normalized
332
+
333
+
271
334
  def normalize_messages_for_api(
272
335
  messages: List[Union[UserMessage, AssistantMessage, ProgressMessage]],
273
336
  protocol: str = "anthropic",
274
337
  tool_mode: str = "native",
338
+ thinking_mode: Optional[str] = None,
275
339
  ) -> List[Dict[str, Any]]:
276
340
  """Normalize messages for API submission.
277
341
 
278
342
  Progress messages are filtered out as they are not sent to the API.
343
+
344
+ For DeepSeek thinking mode, this function ensures reasoning_content is properly
345
+ included in assistant messages that contain tool_calls, as required by the API.
279
346
  """
280
347
 
281
348
  def _msg_type(msg: Any) -> Optional[str]:
@@ -318,58 +385,6 @@ def normalize_messages_for_api(
318
385
  return meta_dict
319
386
  return {}
320
387
 
321
- def _block_type(block: Any) -> Optional[str]:
322
- if hasattr(block, "type"):
323
- return getattr(block, "type", None)
324
- if isinstance(block, dict):
325
- return block.get("type")
326
- return None
327
-
328
- def _block_attr(block: Any, attr: str, default: Any = None) -> Any:
329
- if hasattr(block, attr):
330
- return getattr(block, attr, default)
331
- if isinstance(block, dict):
332
- return block.get(attr, default)
333
- return default
334
-
335
- def _flatten_blocks_to_text(blocks: List[Any]) -> str:
336
- parts: List[str] = []
337
- for blk in blocks:
338
- btype = _block_type(blk)
339
- if btype == "text":
340
- text = _block_attr(blk, "text") or _block_attr(blk, "content") or ""
341
- if text:
342
- parts.append(str(text))
343
- elif btype == "tool_result":
344
- text = _block_attr(blk, "text") or _block_attr(blk, "content") or ""
345
- tool_id = _block_attr(blk, "tool_use_id") or _block_attr(blk, "id")
346
- prefix = "Tool error" if _block_attr(blk, "is_error") else "Tool result"
347
- label = f"{prefix}{f' ({tool_id})' if tool_id else ''}"
348
- parts.append(f"{label}: {text}" if text else label)
349
- elif btype == "tool_use":
350
- name = _block_attr(blk, "name") or ""
351
- input_data = _block_attr(blk, "input")
352
- input_preview = ""
353
- if input_data not in (None, {}):
354
- try:
355
- input_preview = json.dumps(input_data)
356
- except (TypeError, ValueError):
357
- input_preview = str(input_data)
358
- tool_id = _block_attr(blk, "tool_use_id") or _block_attr(blk, "id")
359
- desc = "Tool call"
360
- if name:
361
- desc += f" {name}"
362
- if tool_id:
363
- desc += f" ({tool_id})"
364
- if input_preview:
365
- desc += f": {input_preview}"
366
- parts.append(desc)
367
- else:
368
- text = _block_attr(blk, "text") or _block_attr(blk, "content") or ""
369
- if text:
370
- parts.append(str(text))
371
- return "\n".join(p for p in parts if p)
372
-
373
388
  effective_tool_mode = (tool_mode or "native").lower()
374
389
  if effective_tool_mode not in {"native", "text"}:
375
390
  effective_tool_mode = "native"
@@ -426,7 +441,9 @@ def normalize_messages_for_api(
426
441
  if block_type == "tool_result":
427
442
  tool_results_seen += 1
428
443
  # Skip tool_result blocks that lack a preceding tool_use
429
- tool_id = getattr(block, "tool_use_id", None) or getattr(block, "id", None)
444
+ tool_id = getattr(block, "tool_use_id", None) or getattr(
445
+ block, "id", None
446
+ )
430
447
  if not tool_id:
431
448
  skipped_tool_results_no_call += 1
432
449
  continue
@@ -486,19 +503,35 @@ def normalize_messages_for_api(
486
503
  mapped = _content_block_to_openai(block)
487
504
  if mapped:
488
505
  assistant_openai_msgs.append(mapped)
489
- if text_parts:
490
- assistant_openai_msgs.append(
491
- {"role": "assistant", "content": "\n".join(text_parts)}
492
- )
493
506
  if tool_calls:
507
+ # For DeepSeek thinking mode, we must include reasoning_content
508
+ # in the assistant message that contains tool_calls
509
+ tool_call_msg: Dict[str, Any] = {
510
+ "role": "assistant",
511
+ "content": "\n".join(text_parts) if text_parts else None,
512
+ "tool_calls": tool_calls,
513
+ }
514
+ # Add reasoning_content if present (required for DeepSeek thinking mode)
515
+ reasoning_content = meta.get("reasoning_content") if meta else None
516
+ if reasoning_content is not None:
517
+ tool_call_msg["reasoning_content"] = reasoning_content
518
+ logger.debug(
519
+ f"[normalize_messages_for_api] Added reasoning_content to "
520
+ f"tool_call message (len={len(str(reasoning_content))})"
521
+ )
522
+ elif thinking_mode == "deepseek":
523
+ logger.warning(
524
+ f"[normalize_messages_for_api] DeepSeek mode: assistant "
525
+ f"message with tool_calls but no reasoning_content in metadata. "
526
+ f"meta_keys={list(meta.keys()) if meta else []}"
527
+ )
528
+ assistant_openai_msgs.append(tool_call_msg)
529
+ elif text_parts:
494
530
  assistant_openai_msgs.append(
495
- {
496
- "role": "assistant",
497
- "content": None,
498
- "tool_calls": tool_calls,
499
- }
531
+ {"role": "assistant", "content": "\n".join(text_parts)}
500
532
  )
501
- if meta and assistant_openai_msgs:
533
+ # For non-tool-call messages, add reasoning metadata to the last message
534
+ if meta and assistant_openai_msgs and not tool_calls:
502
535
  for key in ("reasoning_content", "reasoning_details", "reasoning"):
503
536
  if key in meta and meta[key] is not None:
504
537
  assistant_openai_msgs[-1][key] = meta[key]
@@ -515,6 +548,7 @@ def normalize_messages_for_api(
515
548
 
516
549
  logger.debug(
517
550
  f"[normalize_messages_for_api] protocol={protocol} tool_mode={effective_tool_mode} "
551
+ f"thinking_mode={thinking_mode} "
518
552
  f"input_msgs={len(messages)} normalized={len(normalized)} "
519
553
  f"tool_results_seen={tool_results_seen} tool_uses_seen={tool_uses_seen} "
520
554
  f"tool_result_positions={len(tool_result_positions)} "
@@ -523,6 +557,11 @@ def normalize_messages_for_api(
523
557
  f"skipped_tool_uses_no_id={skipped_tool_uses_no_id} "
524
558
  f"skipped_tool_results_no_call={skipped_tool_results_no_call}"
525
559
  )
560
+
561
+ # Apply DeepSeek-specific reasoning_content handling
562
+ if thinking_mode == "deepseek":
563
+ normalized = _apply_deepseek_reasoning_content(normalized, is_new_turn=False)
564
+
526
565
  return normalized
527
566
 
528
567
 
@@ -286,7 +286,7 @@ def _compile_pattern(pattern: str) -> re.Pattern[str]:
286
286
  while j < len(pattern) and pattern[j] != "]":
287
287
  j += 1
288
288
  if j < len(pattern):
289
- regex += pattern[i:j + 1]
289
+ regex += pattern[i : j + 1]
290
290
  i = j
291
291
  else:
292
292
  regex += re.escape(c)
@@ -383,7 +383,9 @@ class IgnoreFilter:
383
383
  # =============================================================================
384
384
 
385
385
 
386
- def parse_ignore_pattern(pattern: str, settings_path: Optional[Path] = None) -> Tuple[str, Optional[Path]]:
386
+ def parse_ignore_pattern(
387
+ pattern: str, settings_path: Optional[Path] = None
388
+ ) -> Tuple[str, Optional[Path]]:
387
389
  """Parse an ignore pattern and return (relative_pattern, root_path).
388
390
 
389
391
  Supports prefixes:
@@ -503,6 +505,7 @@ def is_path_ignored(
503
505
  file_path = Path(file_path)
504
506
  if not file_path.is_absolute():
505
507
  from ripperdoc.utils.safe_get_cwd import safe_get_cwd
508
+
506
509
  file_path = Path(safe_get_cwd()) / file_path
507
510
 
508
511
  file_path = file_path.resolve()
@@ -512,6 +515,7 @@ def is_path_ignored(
512
515
  root_path = get_git_root(file_path.parent)
513
516
  if root_path is None:
514
517
  from ripperdoc.utils.safe_get_cwd import safe_get_cwd
518
+
515
519
  root_path = Path(safe_get_cwd())
516
520
 
517
521
  root_path = root_path.resolve()
@@ -628,12 +632,35 @@ def check_path_for_tool(
628
632
  # Check if it's a binary/media file
629
633
  suffix = file_path.suffix.lower()
630
634
  binary_extensions = {
631
- ".png", ".jpg", ".jpeg", ".gif", ".bmp", ".ico", ".webp",
632
- ".mp4", ".avi", ".mkv", ".mov", ".mp3", ".wav", ".flac",
633
- ".zip", ".tar", ".gz", ".7z", ".rar",
634
- ".exe", ".dll", ".so", ".dylib",
635
- ".db", ".sqlite", ".parquet",
636
- ".ttf", ".otf", ".woff",
635
+ ".png",
636
+ ".jpg",
637
+ ".jpeg",
638
+ ".gif",
639
+ ".bmp",
640
+ ".ico",
641
+ ".webp",
642
+ ".mp4",
643
+ ".avi",
644
+ ".mkv",
645
+ ".mov",
646
+ ".mp3",
647
+ ".wav",
648
+ ".flac",
649
+ ".zip",
650
+ ".tar",
651
+ ".gz",
652
+ ".7z",
653
+ ".rar",
654
+ ".exe",
655
+ ".dll",
656
+ ".so",
657
+ ".dylib",
658
+ ".db",
659
+ ".sqlite",
660
+ ".parquet",
661
+ ".ttf",
662
+ ".otf",
663
+ ".woff",
637
664
  }
638
665
  if suffix in binary_extensions:
639
666
  reasons.append("binary/media file")
@@ -51,7 +51,8 @@ def _resolve_path(raw_path: str, cwd: str) -> Path:
51
51
  except (OSError, ValueError) as exc:
52
52
  logger.warning(
53
53
  "[path_validation] Failed to resolve path: %s: %s",
54
- type(exc).__name__, exc,
54
+ type(exc).__name__,
55
+ exc,
55
56
  extra={"raw_path": raw_path, "cwd": cwd},
56
57
  )
57
58
  return candidate