ripperdoc 0.2.9__py3-none-any.whl → 0.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (76) hide show
  1. ripperdoc/__init__.py +1 -1
  2. ripperdoc/cli/cli.py +379 -51
  3. ripperdoc/cli/commands/__init__.py +6 -0
  4. ripperdoc/cli/commands/agents_cmd.py +128 -5
  5. ripperdoc/cli/commands/clear_cmd.py +8 -0
  6. ripperdoc/cli/commands/doctor_cmd.py +29 -0
  7. ripperdoc/cli/commands/exit_cmd.py +1 -0
  8. ripperdoc/cli/commands/memory_cmd.py +2 -1
  9. ripperdoc/cli/commands/models_cmd.py +63 -7
  10. ripperdoc/cli/commands/resume_cmd.py +5 -0
  11. ripperdoc/cli/commands/skills_cmd.py +103 -0
  12. ripperdoc/cli/commands/stats_cmd.py +244 -0
  13. ripperdoc/cli/commands/status_cmd.py +10 -0
  14. ripperdoc/cli/commands/tasks_cmd.py +6 -3
  15. ripperdoc/cli/commands/themes_cmd.py +139 -0
  16. ripperdoc/cli/ui/file_mention_completer.py +63 -13
  17. ripperdoc/cli/ui/helpers.py +6 -3
  18. ripperdoc/cli/ui/interrupt_handler.py +34 -0
  19. ripperdoc/cli/ui/panels.py +14 -8
  20. ripperdoc/cli/ui/rich_ui.py +737 -47
  21. ripperdoc/cli/ui/spinner.py +93 -18
  22. ripperdoc/cli/ui/thinking_spinner.py +1 -2
  23. ripperdoc/cli/ui/tool_renderers.py +10 -9
  24. ripperdoc/cli/ui/wizard.py +24 -19
  25. ripperdoc/core/agents.py +14 -3
  26. ripperdoc/core/config.py +238 -6
  27. ripperdoc/core/default_tools.py +91 -10
  28. ripperdoc/core/hooks/events.py +4 -0
  29. ripperdoc/core/hooks/llm_callback.py +58 -0
  30. ripperdoc/core/hooks/manager.py +6 -0
  31. ripperdoc/core/permissions.py +160 -9
  32. ripperdoc/core/providers/openai.py +84 -28
  33. ripperdoc/core/query.py +489 -87
  34. ripperdoc/core/query_utils.py +17 -14
  35. ripperdoc/core/skills.py +1 -0
  36. ripperdoc/core/theme.py +298 -0
  37. ripperdoc/core/tool.py +15 -5
  38. ripperdoc/protocol/__init__.py +14 -0
  39. ripperdoc/protocol/models.py +300 -0
  40. ripperdoc/protocol/stdio.py +1453 -0
  41. ripperdoc/tools/background_shell.py +354 -139
  42. ripperdoc/tools/bash_tool.py +117 -22
  43. ripperdoc/tools/file_edit_tool.py +228 -50
  44. ripperdoc/tools/file_read_tool.py +154 -3
  45. ripperdoc/tools/file_write_tool.py +53 -11
  46. ripperdoc/tools/grep_tool.py +98 -8
  47. ripperdoc/tools/lsp_tool.py +609 -0
  48. ripperdoc/tools/multi_edit_tool.py +26 -3
  49. ripperdoc/tools/skill_tool.py +52 -1
  50. ripperdoc/tools/task_tool.py +539 -65
  51. ripperdoc/utils/conversation_compaction.py +1 -1
  52. ripperdoc/utils/file_watch.py +216 -7
  53. ripperdoc/utils/image_utils.py +125 -0
  54. ripperdoc/utils/log.py +30 -3
  55. ripperdoc/utils/lsp.py +812 -0
  56. ripperdoc/utils/mcp.py +80 -18
  57. ripperdoc/utils/message_formatting.py +7 -4
  58. ripperdoc/utils/messages.py +198 -33
  59. ripperdoc/utils/pending_messages.py +50 -0
  60. ripperdoc/utils/permissions/shell_command_validation.py +3 -3
  61. ripperdoc/utils/permissions/tool_permission_utils.py +180 -15
  62. ripperdoc/utils/platform.py +198 -0
  63. ripperdoc/utils/session_heatmap.py +242 -0
  64. ripperdoc/utils/session_history.py +2 -2
  65. ripperdoc/utils/session_stats.py +294 -0
  66. ripperdoc/utils/shell_utils.py +8 -5
  67. ripperdoc/utils/todo.py +0 -6
  68. {ripperdoc-0.2.9.dist-info → ripperdoc-0.3.0.dist-info}/METADATA +55 -17
  69. ripperdoc-0.3.0.dist-info/RECORD +136 -0
  70. {ripperdoc-0.2.9.dist-info → ripperdoc-0.3.0.dist-info}/WHEEL +1 -1
  71. ripperdoc/sdk/__init__.py +0 -9
  72. ripperdoc/sdk/client.py +0 -333
  73. ripperdoc-0.2.9.dist-info/RECORD +0 -123
  74. {ripperdoc-0.2.9.dist-info → ripperdoc-0.3.0.dist-info}/entry_points.txt +0 -0
  75. {ripperdoc-0.2.9.dist-info → ripperdoc-0.3.0.dist-info}/licenses/LICENSE +0 -0
  76. {ripperdoc-0.2.9.dist-info → ripperdoc-0.3.0.dist-info}/top_level.txt +0 -0
ripperdoc/utils/mcp.py CHANGED
@@ -17,12 +17,13 @@ from ripperdoc.utils.token_estimation import estimate_tokens
17
17
 
18
18
  logger = get_logger()
19
19
 
20
+
20
21
  try:
21
22
  import mcp.types as mcp_types # type: ignore[import-not-found]
22
23
  from mcp.client.session import ClientSession # type: ignore[import-not-found]
23
24
  from mcp.client.sse import sse_client # type: ignore[import-not-found]
24
25
  from mcp.client.stdio import StdioServerParameters, stdio_client # type: ignore[import-not-found]
25
- from mcp.client.streamable_http import streamablehttp_client # type: ignore[import-not-found]
26
+ from mcp.client.streamable_http import streamable_http_client # type: ignore[import-not-found]
26
27
 
27
28
  MCP_AVAILABLE = True
28
29
  except (ImportError, ModuleNotFoundError): # pragma: no cover - handled gracefully at runtime
@@ -217,6 +218,14 @@ class McpRuntime:
217
218
  self.sessions: Dict[str, ClientSession] = {}
218
219
  self.servers: List[McpServerInfo] = []
219
220
  self._closed = False
221
+ # Track MCP streams for proper cleanup ordering
222
+ # We need to close write streams BEFORE exiting the stdio_client context
223
+ # to allow the internal tasks to exit cleanly
224
+ self._mcp_write_streams: List[Any] = []
225
+ # Track the underlying async generators from @asynccontextmanager wrappers
226
+ # These need to be explicitly closed after exit stack cleanup to prevent
227
+ # shutdown_asyncgens() from trying to close them in a different task
228
+ self._raw_async_generators: List[Any] = []
220
229
 
221
230
  async def connect(self, configs: Dict[str, McpServerInfo]) -> List[McpServerInfo]:
222
231
  logger.info(
@@ -281,19 +290,24 @@ class McpRuntime:
281
290
  if config.type in ("sse", "sse-ide"):
282
291
  if not config.url:
283
292
  raise ValueError("SSE MCP server requires a 'url'.")
284
- read_stream, write_stream = await self._exit_stack.enter_async_context(
285
- sse_client(config.url, headers=config.headers or None)
286
- )
293
+ cm = sse_client(config.url, headers=config.headers or None)
294
+ # Track the underlying async generator for explicit cleanup
295
+ if hasattr(cm, "gen"):
296
+ self._raw_async_generators.append(cm.gen)
297
+ read_stream, write_stream = await self._exit_stack.enter_async_context(cm)
298
+ self._mcp_write_streams.append(write_stream)
287
299
  elif config.type in ("http", "streamable-http"):
288
300
  if not config.url:
289
301
  raise ValueError("HTTP MCP server requires a 'url'.")
290
- read_stream, write_stream, _ = await self._exit_stack.enter_async_context(
291
- streamablehttp_client(
292
- url=config.url,
293
- headers=config.headers or None,
294
- terminate_on_close=True,
295
- )
302
+ cm = streamable_http_client( # type: ignore[call-arg]
303
+ url=config.url,
304
+ terminate_on_close=True,
296
305
  )
306
+ # Track the underlying async generator for explicit cleanup
307
+ if hasattr(cm, "gen"):
308
+ self._raw_async_generators.append(cm.gen)
309
+ read_stream, write_stream, _ = await self._exit_stack.enter_async_context(cm)
310
+ self._mcp_write_streams.append(write_stream)
297
311
  else:
298
312
  if not config.command:
299
313
  raise ValueError("Stdio MCP server requires a 'command'.")
@@ -303,9 +317,12 @@ class McpRuntime:
303
317
  env=config.env or None,
304
318
  cwd=self.project_path,
305
319
  )
306
- read_stream, write_stream = await self._exit_stack.enter_async_context(
307
- stdio_client(stdio_params)
308
- )
320
+ cm = stdio_client(stdio_params)
321
+ # Track the underlying async generator for explicit cleanup
322
+ if hasattr(cm, "gen"):
323
+ self._raw_async_generators.append(cm.gen)
324
+ read_stream, write_stream = await self._exit_stack.enter_async_context(cm)
325
+ self._mcp_write_streams.append(write_stream)
309
326
 
310
327
  if read_stream is None or write_stream is None:
311
328
  raise ValueError("Failed to create read/write streams for MCP server")
@@ -392,17 +409,39 @@ class McpRuntime:
392
409
  "[mcp] Shutting down MCP runtime",
393
410
  extra={"project_path": str(self.project_path), "session_count": len(self.sessions)},
394
411
  )
412
+
413
+ # CRITICAL: Close all MCP write streams FIRST to signal internal tasks to stop.
414
+ for write_stream in self._mcp_write_streams:
415
+ try:
416
+ await write_stream.aclose()
417
+ except BaseException: # pragma: no cover
418
+ pass
419
+ self._mcp_write_streams.clear()
420
+
421
+ # Small delay to allow internal tasks to notice stream closure and exit
422
+ await asyncio.sleep(0.1)
423
+
424
+ # CRITICAL: Close the raw async generators BEFORE the exit stack cleanup.
425
+ # This prevents asyncio's shutdown_asyncgens() from trying to close them
426
+ # later, which would cause the "cancel scope in different task" error.
427
+ for gen in self._raw_async_generators:
428
+ try:
429
+ await gen.aclose()
430
+ except BaseException: # pragma: no cover
431
+ pass
432
+ self._raw_async_generators.clear()
433
+
434
+ # Now close the exit stack
395
435
  try:
396
436
  await self._exit_stack.aclose()
397
437
  except BaseException as exc: # pragma: no cover - defensive shutdown
398
- # Swallow noisy ExceptionGroups from stdio_client cancel scopes during exit.
399
438
  logger.debug(
400
- "[mcp] Suppressed MCP shutdown error",
439
+ "[mcp] Suppressed MCP shutdown error during exit_stack.aclose()",
401
440
  extra={"error": str(exc), "project_path": str(self.project_path)},
402
441
  )
403
- finally:
404
- self.sessions.clear()
405
- self.servers.clear()
442
+
443
+ self.sessions.clear()
444
+ self.servers.clear()
406
445
 
407
446
 
408
447
  _runtime_var: contextvars.ContextVar[Optional[McpRuntime]] = contextvars.ContextVar(
@@ -453,6 +492,29 @@ async def ensure_mcp_runtime(project_path: Optional[Path] = None) -> McpRuntime:
453
492
  # Keep a module-level reference so sync callers that hop event loops can reuse it.
454
493
  global _global_runtime
455
494
  _global_runtime = runtime
495
+
496
+ # Install custom exception handler to suppress MCP asyncgen cleanup errors.
497
+ # These errors occur due to anyio cancel scope issues when stdio_client async
498
+ # generators are finalized by Python's asyncgen hooks. The errors are harmless
499
+ # but noisy, so we suppress them here.
500
+ loop = asyncio.get_running_loop()
501
+ original_handler = loop.get_exception_handler()
502
+
503
+ def mcp_exception_handler(loop: asyncio.AbstractEventLoop, context: Dict[str, Any]) -> None:
504
+ asyncgen = context.get("asyncgen")
505
+ # Suppress MCP stdio_client asyncgen cleanup errors
506
+ if asyncgen and "stdio_client" in str(asyncgen):
507
+ logger.debug("[mcp] Suppressed asyncgen cleanup error for stdio_client")
508
+ return
509
+ # Call original handler for other errors
510
+ if original_handler:
511
+ original_handler(loop, context)
512
+ else:
513
+ loop.default_exception_handler(context)
514
+
515
+ loop.set_exception_handler(mcp_exception_handler)
516
+ logger.debug("[mcp] Installed custom exception handler for asyncgen cleanup")
517
+
456
518
  return runtime
457
519
 
458
520
 
@@ -151,15 +151,18 @@ def format_reasoning_preview(reasoning: Any, show_full_thinking: bool = False) -
151
151
  text = "\n".join(p for p in parts if p)
152
152
  else:
153
153
  text = str(reasoning)
154
-
154
+
155
155
  if show_full_thinking:
156
156
  return text
157
-
157
+
158
158
  lines = text.strip().splitlines()
159
159
  if not lines:
160
160
  return ""
161
- preview = lines[0][:250]
162
- if len(lines) > 1 or len(lines[0]) > 250:
161
+ first_line = lines[0]
162
+ if not first_line:
163
+ return "..." if len(lines) > 1 else ""
164
+ preview = first_line[:250]
165
+ if len(lines) > 1 or len(first_line) > 250:
163
166
  preview += "..."
164
167
  return preview
165
168
 
@@ -5,7 +5,7 @@ for communication with AI models.
5
5
  """
6
6
 
7
7
  from typing import Any, Dict, List, Optional, Union
8
- from pydantic import BaseModel, ConfigDict, Field
8
+ from pydantic import BaseModel, ConfigDict, Field, field_validator
9
9
  from uuid import uuid4
10
10
  from enum import Enum
11
11
  from ripperdoc.utils.log import get_logger
@@ -35,6 +35,23 @@ class MessageContent(BaseModel):
35
35
  name: Optional[str] = None
36
36
  input: Optional[Dict[str, object]] = None
37
37
  is_error: Optional[bool] = None
38
+ # Image/vision content fields
39
+ source_type: Optional[str] = None # "base64", "url", "file"
40
+ media_type: Optional[str] = None # "image/jpeg", "image/png", etc.
41
+ image_data: Optional[str] = None # base64-encoded image data or URL
42
+
43
+ @field_validator("input", mode="before")
44
+ @classmethod
45
+ def validate_input(cls, v):
46
+ """Ensure input is always a dict, never a Pydantic model."""
47
+ if v is not None and not isinstance(v, dict):
48
+ if hasattr(v, "model_dump"):
49
+ v = v.model_dump()
50
+ elif hasattr(v, "dict"):
51
+ v = v.dict()
52
+ else:
53
+ v = {"value": str(v)}
54
+ return v
38
55
 
39
56
 
40
57
  def _content_block_to_api(block: MessageContent) -> Dict[str, Any]:
@@ -53,11 +70,19 @@ def _content_block_to_api(block: MessageContent) -> Dict[str, Any]:
53
70
  "signature": getattr(block, "signature", None),
54
71
  }
55
72
  if block_type == "tool_use":
73
+ input_value = getattr(block, "input", None) or {}
74
+ # Ensure input is a dict, not a Pydantic model
75
+ if hasattr(input_value, "model_dump"):
76
+ input_value = input_value.model_dump()
77
+ elif hasattr(input_value, "dict"):
78
+ input_value = input_value.dict()
79
+ elif not isinstance(input_value, dict):
80
+ input_value = {"value": str(input_value)}
56
81
  return {
57
82
  "type": "tool_use",
58
83
  "id": getattr(block, "id", None) or getattr(block, "tool_use_id", "") or "",
59
84
  "name": getattr(block, "name", None) or "",
60
- "input": getattr(block, "input", None) or {},
85
+ "input": input_value,
61
86
  }
62
87
  if block_type == "tool_result":
63
88
  result: Dict[str, Any] = {
@@ -73,6 +98,15 @@ def _content_block_to_api(block: MessageContent) -> Dict[str, Any]:
73
98
  if getattr(block, "is_error", None) is not None:
74
99
  result["is_error"] = block.is_error
75
100
  return result
101
+ if block_type == "image":
102
+ return {
103
+ "type": "image",
104
+ "source": {
105
+ "type": getattr(block, "source_type", None) or "base64",
106
+ "media_type": getattr(block, "media_type", None) or "image/jpeg",
107
+ "data": getattr(block, "image_data", None) or "",
108
+ },
109
+ }
76
110
  # Default to text block
77
111
  return {
78
112
  "type": "text",
@@ -124,6 +158,15 @@ def _content_block_to_openai(block: MessageContent) -> Dict[str, Any]:
124
158
  "tool_call_id": tool_call_id,
125
159
  "content": getattr(block, "text", None) or getattr(block, "content", None) or "",
126
160
  }
161
+ if block_type == "image":
162
+ # OpenAI uses data URL format for images
163
+ media_type = getattr(block, "media_type", None) or "image/jpeg"
164
+ image_data = getattr(block, "image_data", None) or ""
165
+ data_url = f"data:{media_type};base64,{image_data}"
166
+ return {
167
+ "type": "image_url",
168
+ "image_url": {"url": data_url},
169
+ }
127
170
  # Fallback text message
128
171
  return {
129
172
  "role": "assistant",
@@ -152,6 +195,7 @@ class UserMessage(BaseModel):
152
195
  type: str = "user"
153
196
  message: Message
154
197
  uuid: str = ""
198
+ parent_tool_use_id: Optional[str] = None
155
199
  tool_use_result: Optional[object] = None
156
200
 
157
201
  def __init__(self, **data: object) -> None:
@@ -166,9 +210,17 @@ class AssistantMessage(BaseModel):
166
210
  type: str = "assistant"
167
211
  message: Message
168
212
  uuid: str = ""
213
+ parent_tool_use_id: Optional[str] = None
169
214
  cost_usd: float = 0.0
170
215
  duration_ms: float = 0.0
171
216
  is_api_error_message: bool = False
217
+ # Model and token usage information
218
+ model: Optional[str] = None
219
+ input_tokens: int = 0
220
+ output_tokens: int = 0
221
+ cache_read_tokens: int = 0
222
+ cache_creation_tokens: int = 0
223
+ error: Optional[str] = None
172
224
 
173
225
  def __init__(self, **data: object) -> None:
174
226
  if "uuid" not in data or not data["uuid"]:
@@ -185,6 +237,7 @@ class ProgressMessage(BaseModel):
185
237
  content: Any
186
238
  normalized_messages: List[Message] = []
187
239
  sibling_tool_use_ids: set[str] = set()
240
+ is_subagent_message: bool = False # Flag to indicate if content is a subagent message
188
241
  model_config = ConfigDict(arbitrary_types_allowed=True)
189
242
 
190
243
  def __init__(self, **data: object) -> None:
@@ -194,7 +247,9 @@ class ProgressMessage(BaseModel):
194
247
 
195
248
 
196
249
  def create_user_message(
197
- content: Union[str, List[Dict[str, Any]]], tool_use_result: Optional[object] = None
250
+ content: Union[str, List[Dict[str, Any]]],
251
+ tool_use_result: Optional[object] = None,
252
+ parent_tool_use_id: Optional[str] = None,
198
253
  ) -> UserMessage:
199
254
  """Create a user message."""
200
255
  if isinstance(content, str):
@@ -228,7 +283,39 @@ def create_user_message(
228
283
  f"ids={[getattr(b, 'tool_use_id', None) for b in tool_result_blocks]}"
229
284
  )
230
285
 
231
- return UserMessage(message=message, tool_use_result=tool_use_result)
286
+ return UserMessage(
287
+ message=message,
288
+ tool_use_result=tool_use_result,
289
+ parent_tool_use_id=parent_tool_use_id,
290
+ )
291
+
292
+
293
+ def _normalize_content_item(item: Dict[str, Any]) -> Dict[str, Any]:
294
+ """Normalize a content item to ensure all fields are JSON-serializable.
295
+
296
+ This is needed because some API providers may return Pydantic models
297
+ for tool input fields, which need to be converted to dicts for proper
298
+ serialization and later processing.
299
+
300
+ Args:
301
+ item: The content item dict from API response
302
+
303
+ Returns:
304
+ Normalized content item with all fields JSON-serializable
305
+ """
306
+ normalized = dict(item)
307
+
308
+ # If input is a Pydantic model, convert to dict
309
+ if 'input' in normalized and normalized['input'] is not None:
310
+ input_value = normalized['input']
311
+ if hasattr(input_value, 'model_dump'):
312
+ normalized['input'] = input_value.model_dump()
313
+ elif hasattr(input_value, 'dict'):
314
+ normalized['input'] = input_value.dict()
315
+ elif not isinstance(input_value, dict):
316
+ normalized['input'] = {'value': str(input_value)}
317
+
318
+ return normalized
232
319
 
233
320
 
234
321
  def create_assistant_message(
@@ -237,12 +324,20 @@ def create_assistant_message(
237
324
  duration_ms: float = 0.0,
238
325
  reasoning: Optional[Any] = None,
239
326
  metadata: Optional[Dict[str, Any]] = None,
327
+ model: Optional[str] = None,
328
+ input_tokens: int = 0,
329
+ output_tokens: int = 0,
330
+ cache_read_tokens: int = 0,
331
+ cache_creation_tokens: int = 0,
332
+ parent_tool_use_id: Optional[str] = None,
333
+ error: Optional[str] = None,
240
334
  ) -> AssistantMessage:
241
335
  """Create an assistant message."""
242
336
  if isinstance(content, str):
243
337
  message_content: Union[str, List[MessageContent]] = content
244
338
  else:
245
- message_content = [MessageContent(**item) for item in content]
339
+ # Normalize content items to ensure tool input is always a dict
340
+ message_content = [MessageContent(**_normalize_content_item(item)) for item in content]
246
341
 
247
342
  message = Message(
248
343
  role=MessageRole.ASSISTANT,
@@ -251,7 +346,18 @@ def create_assistant_message(
251
346
  metadata=metadata or {},
252
347
  )
253
348
 
254
- return AssistantMessage(message=message, cost_usd=cost_usd, duration_ms=duration_ms)
349
+ return AssistantMessage(
350
+ message=message,
351
+ cost_usd=cost_usd,
352
+ duration_ms=duration_ms,
353
+ model=model,
354
+ input_tokens=input_tokens,
355
+ output_tokens=output_tokens,
356
+ cache_read_tokens=cache_read_tokens,
357
+ cache_creation_tokens=cache_creation_tokens,
358
+ parent_tool_use_id=parent_tool_use_id,
359
+ error=error,
360
+ )
255
361
 
256
362
 
257
363
  def create_progress_message(
@@ -259,6 +365,7 @@ def create_progress_message(
259
365
  sibling_tool_use_ids: set[str],
260
366
  content: Any,
261
367
  normalized_messages: Optional[List[Message]] = None,
368
+ is_subagent_message: bool = False,
262
369
  ) -> ProgressMessage:
263
370
  """Create a progress message."""
264
371
  return ProgressMessage(
@@ -266,6 +373,7 @@ def create_progress_message(
266
373
  sibling_tool_use_ids=sibling_tool_use_ids,
267
374
  content=content,
268
375
  normalized_messages=normalized_messages or [],
376
+ is_subagent_message=is_subagent_message,
269
377
  )
270
378
 
271
379
 
@@ -434,33 +542,90 @@ def normalize_messages_for_api(
434
542
  meta = _msg_metadata(msg)
435
543
  if isinstance(user_content, list):
436
544
  if protocol == "openai":
437
- # Map each block to an OpenAI-style message
438
- openai_msgs: List[Dict[str, Any]] = []
439
- for block in user_content:
440
- block_type = getattr(block, "type", None)
441
- if block_type == "tool_result":
442
- tool_results_seen += 1
443
- # Skip tool_result blocks that lack a preceding tool_use
444
- tool_id = getattr(block, "tool_use_id", None) or getattr(
445
- block, "id", None
446
- )
447
- if not tool_id:
448
- skipped_tool_results_no_call += 1
449
- continue
450
- call_pos = tool_use_positions.get(tool_id)
451
- if call_pos is None or call_pos >= msg_index:
452
- skipped_tool_results_no_call += 1
453
- continue
454
- mapped = _content_block_to_openai(block)
455
- if mapped:
456
- openai_msgs.append(mapped)
457
- if meta and openai_msgs:
458
- for candidate in openai_msgs:
459
- for key in ("reasoning_content", "reasoning_details", "reasoning"):
460
- if key in meta and meta[key] is not None:
461
- candidate[key] = meta[key]
462
- normalized.extend(openai_msgs)
463
- continue
545
+ # Check if this message contains images
546
+ has_images = any(
547
+ getattr(block, "type", None) == "image" for block in user_content
548
+ )
549
+ has_text_only = all(
550
+ getattr(block, "type", None) in ("text", "image", "tool_result")
551
+ for block in user_content
552
+ )
553
+
554
+ # If message has images or only text/images (no tool_result), use content array format
555
+ if has_images or (
556
+ has_text_only
557
+ and not any(
558
+ getattr(block, "type", None) == "tool_result" for block in user_content
559
+ )
560
+ ):
561
+ content_array: List[Dict[str, Any]] = []
562
+ for block in user_content:
563
+ block_type = getattr(block, "type", None)
564
+ if block_type == "image":
565
+ content_array.append(_content_block_to_openai(block))
566
+ elif block_type == "text":
567
+ content_array.append(
568
+ {
569
+ "type": "text",
570
+ "text": getattr(block, "text", "") or "",
571
+ }
572
+ )
573
+ elif block_type == "tool_result":
574
+ # Handle tool_result separately
575
+ tool_results_seen += 1
576
+ tool_id = getattr(block, "tool_use_id", None) or getattr(
577
+ block, "id", None
578
+ )
579
+ if not tool_id:
580
+ skipped_tool_results_no_call += 1
581
+ continue
582
+ call_pos = tool_use_positions.get(tool_id)
583
+ if call_pos is None or call_pos >= msg_index:
584
+ skipped_tool_results_no_call += 1
585
+ continue
586
+ mapped = _content_block_to_openai(block)
587
+ if mapped:
588
+ normalized.append(mapped)
589
+
590
+ if content_array:
591
+ user_msg: Dict[str, Any] = {
592
+ "role": "user",
593
+ "content": content_array,
594
+ }
595
+ if meta:
596
+ for key in ("reasoning_content", "reasoning_details", "reasoning"):
597
+ if key in meta and meta[key] is not None:
598
+ user_msg[key] = meta[key]
599
+ normalized.append(user_msg)
600
+ continue
601
+ else:
602
+ # Original behavior for tool_result messages
603
+ openai_msgs: List[Dict[str, Any]] = []
604
+ for block in user_content:
605
+ block_type = getattr(block, "type", None)
606
+ if block_type == "tool_result":
607
+ tool_results_seen += 1
608
+ # Skip tool_result blocks that lack a preceding tool_use
609
+ tool_id = getattr(block, "tool_use_id", None) or getattr(
610
+ block, "id", None
611
+ )
612
+ if not tool_id:
613
+ skipped_tool_results_no_call += 1
614
+ continue
615
+ call_pos = tool_use_positions.get(tool_id)
616
+ if call_pos is None or call_pos >= msg_index:
617
+ skipped_tool_results_no_call += 1
618
+ continue
619
+ mapped = _content_block_to_openai(block)
620
+ if mapped:
621
+ openai_msgs.append(mapped)
622
+ if meta and openai_msgs:
623
+ for candidate in openai_msgs:
624
+ for key in ("reasoning_content", "reasoning_details", "reasoning"):
625
+ if key in meta and meta[key] is not None:
626
+ candidate[key] = meta[key]
627
+ normalized.extend(openai_msgs)
628
+ continue
464
629
  api_blocks = []
465
630
  for block in user_content:
466
631
  if getattr(block, "type", None) == "tool_result":
@@ -0,0 +1,50 @@
1
+ """Thread-safe queue for pending conversation messages.
2
+
3
+ Allows background tasks or external events to enqueue user messages that
4
+ should be injected into the conversation once the current iteration
5
+ finishes. Messages are drained in FIFO order.
6
+ """
7
+
8
+ from collections import deque
9
+ import threading
10
+ from typing import Any, Deque, Dict, List, Optional
11
+
12
+ from ripperdoc.utils.messages import UserMessage, create_user_message
13
+
14
+
15
+ class PendingMessageQueue:
16
+ """Thread-safe queue for pending user messages."""
17
+
18
+ def __init__(self) -> None:
19
+ self._queue: Deque[UserMessage] = deque()
20
+ self._lock = threading.Lock()
21
+
22
+ def enqueue(self, message: UserMessage) -> None:
23
+ """Add a pre-built UserMessage to the queue."""
24
+ with self._lock:
25
+ self._queue.append(message)
26
+
27
+ def enqueue_text(self, text: str, metadata: Optional[Dict[str, Any]] = None) -> None:
28
+ """Create and enqueue a UserMessage with optional metadata."""
29
+ message = create_user_message(text)
30
+ if metadata:
31
+ try:
32
+ message.message.metadata.update(metadata)
33
+ except Exception:
34
+ # Best-effort metadata attachment; ignore failures.
35
+ pass
36
+ self.enqueue(message)
37
+
38
+ def drain(self) -> List[UserMessage]:
39
+ """Drain all pending messages in FIFO order."""
40
+ with self._lock:
41
+ if not self._queue:
42
+ return []
43
+ messages = list(self._queue)
44
+ self._queue.clear()
45
+ return messages
46
+
47
+ def has_messages(self) -> bool:
48
+ """Check if there are pending messages."""
49
+ with self._lock:
50
+ return bool(self._queue)
@@ -662,7 +662,7 @@ def validate_shell_command(shell_command: str) -> ValidationResult:
662
662
  lex = shlex.shlex(cmd, posix=True)
663
663
  lex.whitespace_split = True # Split on whitespace, better for argument parsing
664
664
  lex.commenters = "" # Don't treat # as comment for security analysis
665
-
665
+
666
666
  tokens = []
667
667
  try:
668
668
  # Get all tokens
@@ -691,7 +691,7 @@ def validate_shell_command(shell_command: str) -> ValidationResult:
691
691
  # Single ; & | are dangerous
692
692
  return True
693
693
  i += 1
694
-
694
+
695
695
  # Also check for find -exec escaped semicolon pattern
696
696
  # shlex will have already parsed \; as separate token ';' (since escaped)
697
697
  # We need to check if this ; is part of find -exec pattern
@@ -716,7 +716,7 @@ def validate_shell_command(shell_command: str) -> ValidationResult:
716
716
  continue
717
717
  # Not part of find -exec, so it's dangerous
718
718
  return True
719
-
719
+
720
720
  return False
721
721
 
722
722
  if has_metachars_outside_quotes(sanitized_for_metachar_check):