ripperdoc 0.2.8__py3-none-any.whl → 0.2.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ripperdoc/__init__.py +1 -1
- ripperdoc/cli/cli.py +28 -115
- ripperdoc/cli/commands/__init__.py +0 -1
- ripperdoc/cli/commands/agents_cmd.py +6 -3
- ripperdoc/cli/commands/clear_cmd.py +1 -4
- ripperdoc/cli/commands/config_cmd.py +1 -1
- ripperdoc/cli/commands/context_cmd.py +3 -2
- ripperdoc/cli/commands/doctor_cmd.py +18 -4
- ripperdoc/cli/commands/hooks_cmd.py +27 -53
- ripperdoc/cli/commands/models_cmd.py +26 -9
- ripperdoc/cli/commands/permissions_cmd.py +27 -9
- ripperdoc/cli/commands/resume_cmd.py +5 -3
- ripperdoc/cli/commands/status_cmd.py +4 -4
- ripperdoc/cli/commands/tasks_cmd.py +8 -4
- ripperdoc/cli/ui/file_mention_completer.py +2 -1
- ripperdoc/cli/ui/interrupt_handler.py +2 -3
- ripperdoc/cli/ui/message_display.py +4 -2
- ripperdoc/cli/ui/provider_options.py +247 -0
- ripperdoc/cli/ui/rich_ui.py +110 -59
- ripperdoc/cli/ui/spinner.py +25 -1
- ripperdoc/cli/ui/tool_renderers.py +8 -2
- ripperdoc/cli/ui/wizard.py +215 -0
- ripperdoc/core/agents.py +9 -3
- ripperdoc/core/config.py +49 -12
- ripperdoc/core/custom_commands.py +7 -6
- ripperdoc/core/default_tools.py +11 -2
- ripperdoc/core/hooks/config.py +1 -3
- ripperdoc/core/hooks/events.py +23 -28
- ripperdoc/core/hooks/executor.py +4 -6
- ripperdoc/core/hooks/integration.py +12 -21
- ripperdoc/core/hooks/manager.py +40 -15
- ripperdoc/core/permissions.py +40 -8
- ripperdoc/core/providers/anthropic.py +109 -36
- ripperdoc/core/providers/gemini.py +70 -5
- ripperdoc/core/providers/openai.py +60 -5
- ripperdoc/core/query.py +82 -38
- ripperdoc/core/query_utils.py +2 -0
- ripperdoc/core/skills.py +9 -3
- ripperdoc/core/system_prompt.py +4 -2
- ripperdoc/core/tool.py +9 -5
- ripperdoc/sdk/client.py +2 -2
- ripperdoc/tools/ask_user_question_tool.py +5 -3
- ripperdoc/tools/background_shell.py +2 -1
- ripperdoc/tools/bash_output_tool.py +1 -1
- ripperdoc/tools/bash_tool.py +26 -16
- ripperdoc/tools/dynamic_mcp_tool.py +29 -8
- ripperdoc/tools/enter_plan_mode_tool.py +1 -1
- ripperdoc/tools/exit_plan_mode_tool.py +1 -1
- ripperdoc/tools/file_edit_tool.py +8 -4
- ripperdoc/tools/file_read_tool.py +8 -4
- ripperdoc/tools/file_write_tool.py +9 -5
- ripperdoc/tools/glob_tool.py +3 -2
- ripperdoc/tools/grep_tool.py +3 -2
- ripperdoc/tools/kill_bash_tool.py +1 -1
- ripperdoc/tools/ls_tool.py +1 -1
- ripperdoc/tools/mcp_tools.py +13 -10
- ripperdoc/tools/multi_edit_tool.py +8 -7
- ripperdoc/tools/notebook_edit_tool.py +7 -4
- ripperdoc/tools/skill_tool.py +1 -1
- ripperdoc/tools/task_tool.py +5 -4
- ripperdoc/tools/todo_tool.py +2 -2
- ripperdoc/tools/tool_search_tool.py +3 -2
- ripperdoc/utils/conversation_compaction.py +8 -4
- ripperdoc/utils/file_watch.py +8 -2
- ripperdoc/utils/json_utils.py +2 -1
- ripperdoc/utils/mcp.py +11 -3
- ripperdoc/utils/memory.py +4 -2
- ripperdoc/utils/message_compaction.py +21 -7
- ripperdoc/utils/message_formatting.py +11 -7
- ripperdoc/utils/messages.py +105 -66
- ripperdoc/utils/path_ignore.py +35 -8
- ripperdoc/utils/permissions/path_validation_utils.py +2 -1
- ripperdoc/utils/permissions/shell_command_validation.py +427 -91
- ripperdoc/utils/safe_get_cwd.py +2 -1
- ripperdoc/utils/session_history.py +13 -6
- ripperdoc/utils/todo.py +2 -1
- ripperdoc/utils/token_estimation.py +6 -1
- {ripperdoc-0.2.8.dist-info → ripperdoc-0.2.9.dist-info}/METADATA +1 -1
- ripperdoc-0.2.9.dist-info/RECORD +123 -0
- ripperdoc-0.2.8.dist-info/RECORD +0 -121
- {ripperdoc-0.2.8.dist-info → ripperdoc-0.2.9.dist-info}/WHEEL +0 -0
- {ripperdoc-0.2.8.dist-info → ripperdoc-0.2.9.dist-info}/entry_points.txt +0 -0
- {ripperdoc-0.2.8.dist-info → ripperdoc-0.2.9.dist-info}/licenses/LICENSE +0 -0
- {ripperdoc-0.2.8.dist-info → ripperdoc-0.2.9.dist-info}/top_level.txt +0 -0
|
@@ -3,6 +3,7 @@
|
|
|
3
3
|
from __future__ import annotations
|
|
4
4
|
|
|
5
5
|
import asyncio
|
|
6
|
+
import json
|
|
6
7
|
import time
|
|
7
8
|
from typing import Any, Dict, List, Optional, cast
|
|
8
9
|
from uuid import uuid4
|
|
@@ -94,7 +95,7 @@ def _detect_openai_vendor(model_profile: ModelProfile) -> str:
|
|
|
94
95
|
if "generativelanguage.googleapis.com" in base or name.startswith("gemini"):
|
|
95
96
|
return "gemini_openai"
|
|
96
97
|
if "gpt-5" in name:
|
|
97
|
-
return "
|
|
98
|
+
return "openai"
|
|
98
99
|
return "openai"
|
|
99
100
|
|
|
100
101
|
|
|
@@ -130,7 +131,7 @@ def _build_thinking_kwargs(
|
|
|
130
131
|
if effort:
|
|
131
132
|
top_level["reasoning_effort"] = effort
|
|
132
133
|
extra_body.setdefault("reasoning", {"effort": effort})
|
|
133
|
-
elif vendor == "
|
|
134
|
+
elif vendor == "openai":
|
|
134
135
|
if effort:
|
|
135
136
|
extra_body["reasoning"] = {"effort": effort}
|
|
136
137
|
else:
|
|
@@ -178,6 +179,15 @@ class OpenAIClient(ProviderClient):
|
|
|
178
179
|
except Exception as exc:
|
|
179
180
|
duration_ms = (time.time() - start_time) * 1000
|
|
180
181
|
error_code, error_message = _classify_openai_error(exc)
|
|
182
|
+
logger.debug(
|
|
183
|
+
"[openai_client] Exception details",
|
|
184
|
+
extra={
|
|
185
|
+
"model": model_profile.model,
|
|
186
|
+
"exception_type": type(exc).__name__,
|
|
187
|
+
"exception_str": str(exc),
|
|
188
|
+
"error_code": error_code,
|
|
189
|
+
},
|
|
190
|
+
)
|
|
181
191
|
logger.error(
|
|
182
192
|
"[openai_client] API call failed",
|
|
183
193
|
extra={
|
|
@@ -213,6 +223,18 @@ class OpenAIClient(ProviderClient):
|
|
|
213
223
|
openai_messages: List[Dict[str, object]] = [
|
|
214
224
|
{"role": "system", "content": system_prompt}
|
|
215
225
|
] + sanitize_tool_history(list(normalized_messages))
|
|
226
|
+
|
|
227
|
+
logger.debug(
|
|
228
|
+
"[openai_client] Preparing request",
|
|
229
|
+
extra={
|
|
230
|
+
"model": model_profile.model,
|
|
231
|
+
"tool_mode": tool_mode,
|
|
232
|
+
"stream": stream,
|
|
233
|
+
"max_thinking_tokens": max_thinking_tokens,
|
|
234
|
+
"num_tools": len(openai_tools),
|
|
235
|
+
"num_messages": len(openai_messages),
|
|
236
|
+
},
|
|
237
|
+
)
|
|
216
238
|
collected_text: List[str] = []
|
|
217
239
|
streamed_tool_calls: Dict[int, Dict[str, Optional[str]]] = {}
|
|
218
240
|
streamed_tool_text: List[str] = []
|
|
@@ -228,6 +250,16 @@ class OpenAIClient(ProviderClient):
|
|
|
228
250
|
model_profile, max_thinking_tokens
|
|
229
251
|
)
|
|
230
252
|
|
|
253
|
+
logger.debug(
|
|
254
|
+
"[openai_client] Request parameters",
|
|
255
|
+
extra={
|
|
256
|
+
"model": model_profile.model,
|
|
257
|
+
"thinking_extra_body": json.dumps(thinking_extra_body, ensure_ascii=False),
|
|
258
|
+
"thinking_top_level": json.dumps(thinking_top_level, ensure_ascii=False),
|
|
259
|
+
"messages_preview": json.dumps(openai_messages[:2], ensure_ascii=False)[:500],
|
|
260
|
+
},
|
|
261
|
+
)
|
|
262
|
+
|
|
231
263
|
async with AsyncOpenAI(
|
|
232
264
|
api_key=model_profile.api_key, base_url=model_profile.api_base
|
|
233
265
|
) as client:
|
|
@@ -246,6 +278,16 @@ class OpenAIClient(ProviderClient):
|
|
|
246
278
|
}
|
|
247
279
|
if thinking_extra_body:
|
|
248
280
|
stream_kwargs["extra_body"] = thinking_extra_body
|
|
281
|
+
logger.debug(
|
|
282
|
+
"[openai_client] Initiating stream request",
|
|
283
|
+
extra={
|
|
284
|
+
"model": model_profile.model,
|
|
285
|
+
"stream_kwargs": json.dumps(
|
|
286
|
+
{k: v for k, v in stream_kwargs.items() if k != "messages"},
|
|
287
|
+
ensure_ascii=False,
|
|
288
|
+
),
|
|
289
|
+
},
|
|
290
|
+
)
|
|
249
291
|
stream_coro = client.chat.completions.create( # type: ignore[call-overload]
|
|
250
292
|
**stream_kwargs
|
|
251
293
|
)
|
|
@@ -303,7 +345,8 @@ class OpenAIClient(ProviderClient):
|
|
|
303
345
|
except (RuntimeError, ValueError, TypeError, OSError) as cb_exc:
|
|
304
346
|
logger.warning(
|
|
305
347
|
"[openai_client] Stream callback failed: %s: %s",
|
|
306
|
-
type(cb_exc).__name__,
|
|
348
|
+
type(cb_exc).__name__,
|
|
349
|
+
cb_exc,
|
|
307
350
|
)
|
|
308
351
|
|
|
309
352
|
# Tool call deltas for native tool mode
|
|
@@ -333,7 +376,8 @@ class OpenAIClient(ProviderClient):
|
|
|
333
376
|
except (RuntimeError, ValueError, TypeError, OSError) as cb_exc:
|
|
334
377
|
logger.warning(
|
|
335
378
|
"[openai_client] Stream callback failed: %s: %s",
|
|
336
|
-
type(cb_exc).__name__,
|
|
379
|
+
type(cb_exc).__name__,
|
|
380
|
+
cb_exc,
|
|
337
381
|
)
|
|
338
382
|
|
|
339
383
|
if idx not in announced_tool_indexes and state.get("name"):
|
|
@@ -344,7 +388,8 @@ class OpenAIClient(ProviderClient):
|
|
|
344
388
|
except (RuntimeError, ValueError, TypeError, OSError) as cb_exc:
|
|
345
389
|
logger.warning(
|
|
346
390
|
"[openai_client] Stream callback failed: %s: %s",
|
|
347
|
-
type(cb_exc).__name__,
|
|
391
|
+
type(cb_exc).__name__,
|
|
392
|
+
cb_exc,
|
|
348
393
|
)
|
|
349
394
|
|
|
350
395
|
streamed_tool_calls[idx] = state
|
|
@@ -467,6 +512,16 @@ class OpenAIClient(ProviderClient):
|
|
|
467
512
|
if stream_reasoning_details:
|
|
468
513
|
response_metadata["reasoning_details"] = stream_reasoning_details
|
|
469
514
|
|
|
515
|
+
logger.debug(
|
|
516
|
+
"[openai_client] Response content blocks",
|
|
517
|
+
extra={
|
|
518
|
+
"model": model_profile.model,
|
|
519
|
+
"content_blocks": json.dumps(content_blocks, ensure_ascii=False)[:1000],
|
|
520
|
+
"usage_tokens": json.dumps(usage_tokens, ensure_ascii=False),
|
|
521
|
+
"metadata": json.dumps(response_metadata, ensure_ascii=False)[:500],
|
|
522
|
+
},
|
|
523
|
+
)
|
|
524
|
+
|
|
470
525
|
logger.info(
|
|
471
526
|
"[openai_client] Response received",
|
|
472
527
|
extra={
|
ripperdoc/core/query.py
CHANGED
|
@@ -26,7 +26,7 @@ from typing import (
|
|
|
26
26
|
|
|
27
27
|
from pydantic import ValidationError
|
|
28
28
|
|
|
29
|
-
from ripperdoc.core.config import provider_protocol
|
|
29
|
+
from ripperdoc.core.config import ModelProfile, provider_protocol
|
|
30
30
|
from ripperdoc.core.providers import ProviderClient, get_provider_client
|
|
31
31
|
from ripperdoc.core.permissions import PermissionResult
|
|
32
32
|
from ripperdoc.core.hooks.manager import hook_manager
|
|
@@ -65,6 +65,42 @@ DEFAULT_REQUEST_TIMEOUT_SEC = float(os.getenv("RIPPERDOC_API_TIMEOUT", "120"))
|
|
|
65
65
|
MAX_LLM_RETRIES = int(os.getenv("RIPPERDOC_MAX_RETRIES", "10"))
|
|
66
66
|
|
|
67
67
|
|
|
68
|
+
def infer_thinking_mode(model_profile: ModelProfile) -> Optional[str]:
|
|
69
|
+
"""Infer thinking mode from ModelProfile if not explicitly configured.
|
|
70
|
+
|
|
71
|
+
This function checks the model_profile.thinking_mode first. If it's set,
|
|
72
|
+
returns that value. Otherwise, auto-detects based on api_base and model name.
|
|
73
|
+
|
|
74
|
+
Args:
|
|
75
|
+
model_profile: The model profile to analyze
|
|
76
|
+
|
|
77
|
+
Returns:
|
|
78
|
+
Thinking mode string ("deepseek", "qwen", "openrouter", "gemini_openai")
|
|
79
|
+
or None if no thinking mode should be applied.
|
|
80
|
+
"""
|
|
81
|
+
# Use explicit config if set
|
|
82
|
+
explicit_mode = model_profile.thinking_mode
|
|
83
|
+
if explicit_mode:
|
|
84
|
+
return explicit_mode
|
|
85
|
+
|
|
86
|
+
# Auto-detect based on API base and model name
|
|
87
|
+
base = (model_profile.api_base or "").lower()
|
|
88
|
+
name = (model_profile.model or "").lower()
|
|
89
|
+
|
|
90
|
+
if "deepseek" in base or name.startswith("deepseek"):
|
|
91
|
+
return "deepseek"
|
|
92
|
+
if "dashscope" in base or "qwen" in name:
|
|
93
|
+
return "qwen"
|
|
94
|
+
if "openrouter.ai" in base:
|
|
95
|
+
return "openrouter"
|
|
96
|
+
if "generativelanguage.googleapis.com" in base or name.startswith("gemini"):
|
|
97
|
+
return "gemini_openai"
|
|
98
|
+
if "openai" in base:
|
|
99
|
+
return "openai"
|
|
100
|
+
|
|
101
|
+
return None
|
|
102
|
+
|
|
103
|
+
|
|
68
104
|
def _resolve_tool(
|
|
69
105
|
tool_registry: "ToolRegistry", tool_name: str, tool_use_id: str
|
|
70
106
|
) -> tuple[Optional[Tool[Any, Any]], Optional[UserMessage]]:
|
|
@@ -110,7 +146,7 @@ async def _check_tool_permissions(
|
|
|
110
146
|
return bool(decision[0]), decision[1]
|
|
111
147
|
return bool(decision), None
|
|
112
148
|
|
|
113
|
-
if query_context.
|
|
149
|
+
if not query_context.yolo_mode and tool.needs_permissions(parsed_input):
|
|
114
150
|
loop = asyncio.get_running_loop()
|
|
115
151
|
input_preview = (
|
|
116
152
|
parsed_input.model_dump()
|
|
@@ -159,7 +195,9 @@ async def _run_tool_use_generator(
|
|
|
159
195
|
tool_input_dict = (
|
|
160
196
|
parsed_input.model_dump()
|
|
161
197
|
if hasattr(parsed_input, "model_dump")
|
|
162
|
-
else dict(parsed_input)
|
|
198
|
+
else dict(parsed_input)
|
|
199
|
+
if isinstance(parsed_input, dict)
|
|
200
|
+
else {}
|
|
163
201
|
)
|
|
164
202
|
|
|
165
203
|
# Run PreToolUse hooks
|
|
@@ -199,7 +237,6 @@ async def _run_tool_use_generator(
|
|
|
199
237
|
)
|
|
200
238
|
|
|
201
239
|
tool_output = None
|
|
202
|
-
tool_error = None
|
|
203
240
|
|
|
204
241
|
try:
|
|
205
242
|
async for output in tool.call(parsed_input, tool_context):
|
|
@@ -224,10 +261,11 @@ async def _run_tool_use_generator(
|
|
|
224
261
|
except CancelledError:
|
|
225
262
|
raise # Don't suppress task cancellation
|
|
226
263
|
except (RuntimeError, ValueError, TypeError, OSError, IOError, AttributeError, KeyError) as exc:
|
|
227
|
-
tool_error = str(exc)
|
|
228
264
|
logger.warning(
|
|
229
265
|
"Error executing tool '%s': %s: %s",
|
|
230
|
-
tool_name,
|
|
266
|
+
tool_name,
|
|
267
|
+
type(exc).__name__,
|
|
268
|
+
exc,
|
|
231
269
|
extra={"tool": tool_name, "tool_use_id": tool_use_id},
|
|
232
270
|
)
|
|
233
271
|
yield tool_result_message(tool_use_id, f"Error executing tool: {str(exc)}", is_error=True)
|
|
@@ -321,7 +359,8 @@ async def _run_concurrent_tool_uses(
|
|
|
321
359
|
except (RuntimeError, ValueError, TypeError) as exc:
|
|
322
360
|
logger.warning(
|
|
323
361
|
"[query] Error while consuming tool generator: %s: %s",
|
|
324
|
-
type(exc).__name__,
|
|
362
|
+
type(exc).__name__,
|
|
363
|
+
exc,
|
|
325
364
|
)
|
|
326
365
|
finally:
|
|
327
366
|
await queue.put(None)
|
|
@@ -374,7 +413,8 @@ class ToolRegistry:
|
|
|
374
413
|
except (TypeError, AttributeError) as exc:
|
|
375
414
|
logger.warning(
|
|
376
415
|
"[tool_registry] Tool.defer_loading failed: %s: %s",
|
|
377
|
-
type(exc).__name__,
|
|
416
|
+
type(exc).__name__,
|
|
417
|
+
exc,
|
|
378
418
|
extra={"tool": getattr(tool, "name", None)},
|
|
379
419
|
)
|
|
380
420
|
deferred = False
|
|
@@ -461,7 +501,8 @@ def _apply_skill_context_updates(
|
|
|
461
501
|
except (KeyError, ValueError, TypeError) as exc:
|
|
462
502
|
logger.warning(
|
|
463
503
|
"[query] Failed to activate tools listed in skill output: %s: %s",
|
|
464
|
-
type(exc).__name__,
|
|
504
|
+
type(exc).__name__,
|
|
505
|
+
exc,
|
|
465
506
|
)
|
|
466
507
|
|
|
467
508
|
model_hint = data.get("model")
|
|
@@ -491,7 +532,7 @@ class QueryContext:
|
|
|
491
532
|
self,
|
|
492
533
|
tools: List[Tool[Any, Any]],
|
|
493
534
|
max_thinking_tokens: int = 0,
|
|
494
|
-
|
|
535
|
+
yolo_mode: bool = False,
|
|
495
536
|
model: str = "main",
|
|
496
537
|
verbose: bool = False,
|
|
497
538
|
pause_ui: Optional[Callable[[], None]] = None,
|
|
@@ -499,7 +540,7 @@ class QueryContext:
|
|
|
499
540
|
) -> None:
|
|
500
541
|
self.tool_registry = ToolRegistry(tools)
|
|
501
542
|
self.max_thinking_tokens = max_thinking_tokens
|
|
502
|
-
self.
|
|
543
|
+
self.yolo_mode = yolo_mode
|
|
503
544
|
self.model = model
|
|
504
545
|
self.verbose = verbose
|
|
505
546
|
self.abort_controller = asyncio.Event()
|
|
@@ -572,8 +613,22 @@ async def query_llm(
|
|
|
572
613
|
else:
|
|
573
614
|
messages_for_model = messages
|
|
574
615
|
|
|
616
|
+
# Get thinking_mode for provider-specific handling
|
|
617
|
+
# Apply when thinking is enabled (max_thinking_tokens > 0) OR when using a
|
|
618
|
+
# reasoning model like deepseek-reasoner which has thinking enabled by default
|
|
619
|
+
thinking_mode: Optional[str] = None
|
|
620
|
+
if protocol == "openai":
|
|
621
|
+
model_name = (model_profile.model or "").lower()
|
|
622
|
+
# DeepSeek Reasoner models have thinking enabled by default
|
|
623
|
+
is_reasoning_model = "reasoner" in model_name or "r1" in model_name
|
|
624
|
+
if max_thinking_tokens > 0 or is_reasoning_model:
|
|
625
|
+
thinking_mode = infer_thinking_mode(model_profile)
|
|
626
|
+
|
|
575
627
|
normalized_messages: List[Dict[str, Any]] = normalize_messages_for_api(
|
|
576
|
-
messages_for_model,
|
|
628
|
+
messages_for_model,
|
|
629
|
+
protocol=protocol,
|
|
630
|
+
tool_mode=tool_mode,
|
|
631
|
+
thinking_mode=thinking_mode,
|
|
577
632
|
)
|
|
578
633
|
logger.info(
|
|
579
634
|
"[query_llm] Preparing model request",
|
|
@@ -584,6 +639,7 @@ async def query_llm(
|
|
|
584
639
|
"normalized_messages": len(normalized_messages),
|
|
585
640
|
"tool_count": len(tools),
|
|
586
641
|
"max_thinking_tokens": max_thinking_tokens,
|
|
642
|
+
"thinking_mode": thinking_mode,
|
|
587
643
|
"tool_mode": tool_mode,
|
|
588
644
|
},
|
|
589
645
|
)
|
|
@@ -667,7 +723,8 @@ async def query_llm(
|
|
|
667
723
|
# Return error message
|
|
668
724
|
logger.warning(
|
|
669
725
|
"Error querying AI model: %s: %s",
|
|
670
|
-
type(e).__name__,
|
|
726
|
+
type(e).__name__,
|
|
727
|
+
e,
|
|
671
728
|
extra={
|
|
672
729
|
"model": getattr(model_profile, "model", None),
|
|
673
730
|
"model_pointer": model,
|
|
@@ -758,9 +815,7 @@ async def _run_query_iteration(
|
|
|
758
815
|
|
|
759
816
|
model_profile = resolve_model_profile(query_context.model)
|
|
760
817
|
tool_mode = determine_tool_mode(model_profile)
|
|
761
|
-
tools_for_model: List[Tool[Any, Any]] = (
|
|
762
|
-
[] if tool_mode == "text" else query_context.all_tools()
|
|
763
|
-
)
|
|
818
|
+
tools_for_model: List[Tool[Any, Any]] = [] if tool_mode == "text" else query_context.all_tools()
|
|
764
819
|
|
|
765
820
|
full_system_prompt = build_full_system_prompt(
|
|
766
821
|
system_prompt, context, tool_mode, query_context.all_tools()
|
|
@@ -832,7 +887,7 @@ async def _run_query_iteration(
|
|
|
832
887
|
done, pending = await asyncio.wait(
|
|
833
888
|
{assistant_task, waiter},
|
|
834
889
|
return_when=asyncio.FIRST_COMPLETED,
|
|
835
|
-
timeout=0.1 # Check abort_controller every 100ms
|
|
890
|
+
timeout=0.1, # Check abort_controller every 100ms
|
|
836
891
|
)
|
|
837
892
|
if not done:
|
|
838
893
|
# Timeout - cancel waiter and continue loop to check abort_controller
|
|
@@ -890,8 +945,7 @@ async def _run_query_iteration(
|
|
|
890
945
|
tool_results: List[UserMessage] = []
|
|
891
946
|
permission_denied = False
|
|
892
947
|
sibling_ids = set(
|
|
893
|
-
getattr(t, "tool_use_id", None) or getattr(t, "id", None) or ""
|
|
894
|
-
for t in tool_use_blocks
|
|
948
|
+
getattr(t, "tool_use_id", None) or getattr(t, "id", None) or "" for t in tool_use_blocks
|
|
895
949
|
)
|
|
896
950
|
prepared_calls: List[Dict[str, Any]] = []
|
|
897
951
|
|
|
@@ -899,18 +953,12 @@ async def _run_query_iteration(
|
|
|
899
953
|
tool_name = tool_use.name
|
|
900
954
|
if not tool_name:
|
|
901
955
|
continue
|
|
902
|
-
tool_use_id = (
|
|
903
|
-
getattr(tool_use, "tool_use_id", None) or getattr(tool_use, "id", None) or ""
|
|
904
|
-
)
|
|
956
|
+
tool_use_id = getattr(tool_use, "tool_use_id", None) or getattr(tool_use, "id", None) or ""
|
|
905
957
|
tool_input = getattr(tool_use, "input", {}) or {}
|
|
906
958
|
|
|
907
|
-
tool, missing_msg = _resolve_tool(
|
|
908
|
-
query_context.tool_registry, tool_name, tool_use_id
|
|
909
|
-
)
|
|
959
|
+
tool, missing_msg = _resolve_tool(query_context.tool_registry, tool_name, tool_use_id)
|
|
910
960
|
if missing_msg:
|
|
911
|
-
logger.warning(
|
|
912
|
-
f"[query] Tool '{tool_name}' not found for tool_use_id={tool_use_id}"
|
|
913
|
-
)
|
|
961
|
+
logger.warning(f"[query] Tool '{tool_name}' not found for tool_use_id={tool_use_id}")
|
|
914
962
|
tool_results.append(missing_msg)
|
|
915
963
|
yield missing_msg
|
|
916
964
|
continue
|
|
@@ -924,7 +972,7 @@ async def _run_query_iteration(
|
|
|
924
972
|
)
|
|
925
973
|
|
|
926
974
|
tool_context = ToolUseContext(
|
|
927
|
-
|
|
975
|
+
yolo_mode=query_context.yolo_mode,
|
|
928
976
|
verbose=query_context.verbose,
|
|
929
977
|
permission_checker=can_use_tool_fn,
|
|
930
978
|
tool_registry=query_context.tool_registry,
|
|
@@ -937,8 +985,7 @@ async def _run_query_iteration(
|
|
|
937
985
|
validation = await tool.validate_input(parsed_input, tool_context)
|
|
938
986
|
if not validation.result:
|
|
939
987
|
logger.debug(
|
|
940
|
-
f"[query] Validation failed for tool_use_id={tool_use_id}: "
|
|
941
|
-
f"{validation.message}"
|
|
988
|
+
f"[query] Validation failed for tool_use_id={tool_use_id}: {validation.message}"
|
|
942
989
|
)
|
|
943
990
|
result_msg = tool_result_message(
|
|
944
991
|
tool_use_id,
|
|
@@ -949,18 +996,15 @@ async def _run_query_iteration(
|
|
|
949
996
|
yield result_msg
|
|
950
997
|
continue
|
|
951
998
|
|
|
952
|
-
if query_context.
|
|
999
|
+
if not query_context.yolo_mode or can_use_tool_fn is not None:
|
|
953
1000
|
allowed, denial_message = await _check_tool_permissions(
|
|
954
1001
|
tool, parsed_input, query_context, can_use_tool_fn
|
|
955
1002
|
)
|
|
956
1003
|
if not allowed:
|
|
957
1004
|
logger.debug(
|
|
958
|
-
f"[query] Permission denied for tool_use_id={tool_use_id}: "
|
|
959
|
-
f"{denial_message}"
|
|
960
|
-
)
|
|
961
|
-
denial_text = (
|
|
962
|
-
denial_message or f"User aborted the tool invocation: {tool_name}"
|
|
1005
|
+
f"[query] Permission denied for tool_use_id={tool_use_id}: {denial_message}"
|
|
963
1006
|
)
|
|
1007
|
+
denial_text = denial_message or f"User aborted the tool invocation: {tool_name}"
|
|
964
1008
|
denial_msg = tool_result_message(tool_use_id, denial_text, is_error=True)
|
|
965
1009
|
tool_results.append(denial_msg)
|
|
966
1010
|
yield denial_msg
|
|
@@ -1070,7 +1114,7 @@ async def query(
|
|
|
1070
1114
|
extra={
|
|
1071
1115
|
"message_count": len(messages),
|
|
1072
1116
|
"tool_count": len(query_context.tools),
|
|
1073
|
-
"
|
|
1117
|
+
"yolo_mode": query_context.yolo_mode,
|
|
1074
1118
|
"model_pointer": query_context.model,
|
|
1075
1119
|
},
|
|
1076
1120
|
)
|
ripperdoc/core/query_utils.py
CHANGED
|
@@ -462,11 +462,13 @@ def log_openai_messages(normalized_messages: List[Dict[str, Any]]) -> None:
|
|
|
462
462
|
role = message.get("role")
|
|
463
463
|
tool_calls = message.get("tool_calls")
|
|
464
464
|
tool_call_id = message.get("tool_call_id")
|
|
465
|
+
has_reasoning = "reasoning_content" in message and message.get("reasoning_content")
|
|
465
466
|
ids = [tc.get("id") for tc in tool_calls] if tool_calls else []
|
|
466
467
|
summary_parts.append(
|
|
467
468
|
f"{idx}:{role}"
|
|
468
469
|
+ (f" tool_calls={ids}" if ids else "")
|
|
469
470
|
+ (f" tool_call_id={tool_call_id}" if tool_call_id else "")
|
|
471
|
+
+ (" +reasoning" if has_reasoning else "")
|
|
470
472
|
)
|
|
471
473
|
logger.debug(f"[query_llm] OpenAI normalized messages: {' | '.join(summary_parts)}")
|
|
472
474
|
|
ripperdoc/core/skills.py
CHANGED
|
@@ -84,10 +84,15 @@ def _split_frontmatter(raw_text: str) -> Tuple[Dict[str, Any], str]:
|
|
|
84
84
|
body = "\n".join(lines[idx + 1 :])
|
|
85
85
|
try:
|
|
86
86
|
frontmatter = yaml.safe_load(frontmatter_text) or {}
|
|
87
|
-
except (
|
|
87
|
+
except (
|
|
88
|
+
yaml.YAMLError,
|
|
89
|
+
ValueError,
|
|
90
|
+
TypeError,
|
|
91
|
+
) as exc: # pragma: no cover - defensive
|
|
88
92
|
logger.warning(
|
|
89
93
|
"[skills] Invalid frontmatter in SKILL.md: %s: %s",
|
|
90
|
-
type(exc).__name__,
|
|
94
|
+
type(exc).__name__,
|
|
95
|
+
exc,
|
|
91
96
|
)
|
|
92
97
|
return {"__error__": f"Invalid frontmatter: {exc}"}, body
|
|
93
98
|
return frontmatter, body
|
|
@@ -118,7 +123,8 @@ def _load_skill_file(
|
|
|
118
123
|
except (OSError, IOError, UnicodeDecodeError) as exc:
|
|
119
124
|
logger.warning(
|
|
120
125
|
"[skills] Failed to read skill file: %s: %s",
|
|
121
|
-
type(exc).__name__,
|
|
126
|
+
type(exc).__name__,
|
|
127
|
+
exc,
|
|
122
128
|
extra={"path": str(path)},
|
|
123
129
|
)
|
|
124
130
|
return None, SkillLoadError(path=path, reason=f"Failed to read file: {exc}")
|
ripperdoc/core/system_prompt.py
CHANGED
|
@@ -49,7 +49,8 @@ def _detect_git_repo(cwd: Path) -> bool:
|
|
|
49
49
|
except (OSError, subprocess.SubprocessError) as exc:
|
|
50
50
|
logger.warning(
|
|
51
51
|
"[system_prompt] Failed to detect git repository: %s: %s",
|
|
52
|
-
type(exc).__name__,
|
|
52
|
+
type(exc).__name__,
|
|
53
|
+
exc,
|
|
53
54
|
extra={"cwd": str(cwd)},
|
|
54
55
|
)
|
|
55
56
|
return False
|
|
@@ -393,7 +394,8 @@ def build_system_prompt(
|
|
|
393
394
|
except (OSError, ValueError, RuntimeError) as exc:
|
|
394
395
|
logger.warning(
|
|
395
396
|
"Failed to load agent definitions: %s: %s",
|
|
396
|
-
type(exc).__name__,
|
|
397
|
+
type(exc).__name__,
|
|
398
|
+
exc,
|
|
397
399
|
)
|
|
398
400
|
agent_section = (
|
|
399
401
|
"# Subagents\nTask tool available, but agent definitions could not be loaded."
|
ripperdoc/core/tool.py
CHANGED
|
@@ -37,13 +37,15 @@ class ToolUseContext(BaseModel):
|
|
|
37
37
|
|
|
38
38
|
message_id: Optional[str] = None
|
|
39
39
|
agent_id: Optional[str] = None
|
|
40
|
-
|
|
40
|
+
yolo_mode: bool = False
|
|
41
41
|
verbose: bool = False
|
|
42
42
|
permission_checker: Optional[Any] = None
|
|
43
43
|
read_file_timestamps: Dict[str, float] = Field(default_factory=dict)
|
|
44
44
|
# SkipValidation prevents Pydantic from copying the dict during validation,
|
|
45
45
|
# ensuring Read and Edit tools share the same cache instance
|
|
46
|
-
file_state_cache: Annotated[Dict[str, FileSnapshot], SkipValidation] = Field(
|
|
46
|
+
file_state_cache: Annotated[Dict[str, FileSnapshot], SkipValidation] = Field(
|
|
47
|
+
default_factory=dict
|
|
48
|
+
)
|
|
47
49
|
tool_registry: Optional[Any] = None
|
|
48
50
|
abort_signal: Optional[Any] = None
|
|
49
51
|
# UI control callbacks for tools that need user interaction
|
|
@@ -110,7 +112,7 @@ class Tool(ABC, Generic[TInput, TOutput]):
|
|
|
110
112
|
pass
|
|
111
113
|
|
|
112
114
|
@abstractmethod
|
|
113
|
-
async def prompt(self,
|
|
115
|
+
async def prompt(self, yolo_mode: bool = False) -> str:
|
|
114
116
|
"""Get the system prompt for this tool."""
|
|
115
117
|
pass
|
|
116
118
|
|
|
@@ -213,7 +215,8 @@ async def build_tool_description(
|
|
|
213
215
|
except (TypeError, ValueError, AttributeError, KeyError) as exc:
|
|
214
216
|
logger.warning(
|
|
215
217
|
"[tool] Failed to build input example section: %s: %s",
|
|
216
|
-
type(exc).__name__,
|
|
218
|
+
type(exc).__name__,
|
|
219
|
+
exc,
|
|
217
220
|
extra={"tool": getattr(tool, "name", None)},
|
|
218
221
|
)
|
|
219
222
|
return description_text
|
|
@@ -233,7 +236,8 @@ def tool_input_examples(tool: Tool[Any, Any], limit: int = 5) -> List[Dict[str,
|
|
|
233
236
|
except (TypeError, ValueError, AttributeError) as exc:
|
|
234
237
|
logger.warning(
|
|
235
238
|
"[tool] Failed to format tool input example: %s: %s",
|
|
236
|
-
type(exc).__name__,
|
|
239
|
+
type(exc).__name__,
|
|
240
|
+
exc,
|
|
237
241
|
extra={"tool": getattr(tool, "name", None)},
|
|
238
242
|
)
|
|
239
243
|
continue
|
ripperdoc/sdk/client.py
CHANGED
|
@@ -83,7 +83,7 @@ class RipperdocOptions:
|
|
|
83
83
|
tools: Optional[Sequence[Tool[Any, Any]]] = None
|
|
84
84
|
allowed_tools: Optional[Sequence[str]] = None
|
|
85
85
|
disallowed_tools: Optional[Sequence[str]] = None
|
|
86
|
-
|
|
86
|
+
yolo_mode: bool = False
|
|
87
87
|
verbose: bool = False
|
|
88
88
|
model: str = "main"
|
|
89
89
|
max_thinking_tokens: int = 0
|
|
@@ -227,7 +227,7 @@ class RipperdocClient:
|
|
|
227
227
|
query_context = QueryContext(
|
|
228
228
|
tools=self._tools,
|
|
229
229
|
max_thinking_tokens=self.options.max_thinking_tokens,
|
|
230
|
-
|
|
230
|
+
yolo_mode=self.options.yolo_mode,
|
|
231
231
|
model=self.options.model,
|
|
232
232
|
verbose=self.options.verbose,
|
|
233
233
|
)
|
|
@@ -228,7 +228,8 @@ async def prompt_user_for_answer(
|
|
|
228
228
|
except (OSError, RuntimeError, ValueError) as e:
|
|
229
229
|
logger.warning(
|
|
230
230
|
"[ask_user_question_tool] Error during prompt: %s: %s",
|
|
231
|
-
type(e).__name__,
|
|
231
|
+
type(e).__name__,
|
|
232
|
+
e,
|
|
232
233
|
)
|
|
233
234
|
return None
|
|
234
235
|
|
|
@@ -275,7 +276,7 @@ class AskUserQuestionTool(Tool[AskUserQuestionToolInput, AskUserQuestionToolOutp
|
|
|
275
276
|
def input_schema(self) -> type[AskUserQuestionToolInput]:
|
|
276
277
|
return AskUserQuestionToolInput
|
|
277
278
|
|
|
278
|
-
async def prompt(self,
|
|
279
|
+
async def prompt(self, yolo_mode: bool = False) -> str: # noqa: ARG002
|
|
279
280
|
return ASK_USER_QUESTION_PROMPT
|
|
280
281
|
|
|
281
282
|
def user_facing_name(self) -> str:
|
|
@@ -410,7 +411,8 @@ class AskUserQuestionTool(Tool[AskUserQuestionToolInput, AskUserQuestionToolOutp
|
|
|
410
411
|
except (OSError, RuntimeError, ValueError, KeyError) as exc:
|
|
411
412
|
logger.warning(
|
|
412
413
|
"[ask_user_question_tool] Error collecting answers: %s: %s",
|
|
413
|
-
type(exc).__name__,
|
|
414
|
+
type(exc).__name__,
|
|
415
|
+
exc,
|
|
414
416
|
)
|
|
415
417
|
output = AskUserQuestionToolOutput(
|
|
416
418
|
questions=questions,
|
|
@@ -162,7 +162,8 @@ async def _monitor_task(task: BackgroundTask) -> None:
|
|
|
162
162
|
except (OSError, RuntimeError, ProcessLookupError) as exc:
|
|
163
163
|
logger.warning(
|
|
164
164
|
"Error monitoring background task: %s: %s",
|
|
165
|
-
type(exc).__name__,
|
|
165
|
+
type(exc).__name__,
|
|
166
|
+
exc,
|
|
166
167
|
extra={"task_id": task.id, "command": task.command},
|
|
167
168
|
)
|
|
168
169
|
with _tasks_lock:
|
|
@@ -40,7 +40,7 @@ class BashOutputTool(Tool[BashOutputInput, BashOutputData]):
|
|
|
40
40
|
async def description(self) -> str:
|
|
41
41
|
return "Read output and status from a background bash command started with BashTool(run_in_background=True)."
|
|
42
42
|
|
|
43
|
-
async def prompt(self,
|
|
43
|
+
async def prompt(self, yolo_mode: bool = False) -> str:
|
|
44
44
|
return "Fetch buffered output and status for a background bash task by id."
|
|
45
45
|
|
|
46
46
|
@property
|