ripperdoc 0.2.2__py3-none-any.whl → 0.2.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ripperdoc/__init__.py +1 -1
- ripperdoc/cli/cli.py +9 -2
- ripperdoc/cli/commands/agents_cmd.py +8 -4
- ripperdoc/cli/commands/context_cmd.py +3 -3
- ripperdoc/cli/commands/cost_cmd.py +5 -0
- ripperdoc/cli/commands/doctor_cmd.py +12 -4
- ripperdoc/cli/commands/memory_cmd.py +6 -13
- ripperdoc/cli/commands/models_cmd.py +36 -6
- ripperdoc/cli/commands/resume_cmd.py +4 -2
- ripperdoc/cli/commands/status_cmd.py +1 -1
- ripperdoc/cli/ui/rich_ui.py +135 -2
- ripperdoc/cli/ui/thinking_spinner.py +128 -0
- ripperdoc/core/agents.py +174 -6
- ripperdoc/core/config.py +9 -1
- ripperdoc/core/default_tools.py +6 -0
- ripperdoc/core/providers/__init__.py +47 -0
- ripperdoc/core/providers/anthropic.py +147 -0
- ripperdoc/core/providers/base.py +236 -0
- ripperdoc/core/providers/gemini.py +496 -0
- ripperdoc/core/providers/openai.py +253 -0
- ripperdoc/core/query.py +337 -141
- ripperdoc/core/query_utils.py +65 -24
- ripperdoc/core/system_prompt.py +67 -61
- ripperdoc/core/tool.py +12 -3
- ripperdoc/sdk/client.py +12 -1
- ripperdoc/tools/ask_user_question_tool.py +433 -0
- ripperdoc/tools/background_shell.py +104 -18
- ripperdoc/tools/bash_tool.py +33 -13
- ripperdoc/tools/enter_plan_mode_tool.py +223 -0
- ripperdoc/tools/exit_plan_mode_tool.py +150 -0
- ripperdoc/tools/file_edit_tool.py +13 -0
- ripperdoc/tools/file_read_tool.py +16 -0
- ripperdoc/tools/file_write_tool.py +13 -0
- ripperdoc/tools/glob_tool.py +5 -1
- ripperdoc/tools/ls_tool.py +14 -10
- ripperdoc/tools/mcp_tools.py +113 -4
- ripperdoc/tools/multi_edit_tool.py +12 -0
- ripperdoc/tools/notebook_edit_tool.py +12 -0
- ripperdoc/tools/task_tool.py +88 -5
- ripperdoc/tools/todo_tool.py +1 -3
- ripperdoc/tools/tool_search_tool.py +8 -4
- ripperdoc/utils/file_watch.py +134 -0
- ripperdoc/utils/git_utils.py +36 -38
- ripperdoc/utils/json_utils.py +1 -2
- ripperdoc/utils/log.py +3 -4
- ripperdoc/utils/mcp.py +49 -10
- ripperdoc/utils/memory.py +1 -3
- ripperdoc/utils/message_compaction.py +5 -11
- ripperdoc/utils/messages.py +9 -13
- ripperdoc/utils/output_utils.py +1 -3
- ripperdoc/utils/prompt.py +17 -0
- ripperdoc/utils/session_usage.py +7 -0
- ripperdoc/utils/shell_utils.py +159 -0
- ripperdoc/utils/token_estimation.py +33 -0
- {ripperdoc-0.2.2.dist-info → ripperdoc-0.2.4.dist-info}/METADATA +3 -1
- ripperdoc-0.2.4.dist-info/RECORD +99 -0
- ripperdoc-0.2.2.dist-info/RECORD +0 -86
- {ripperdoc-0.2.2.dist-info → ripperdoc-0.2.4.dist-info}/WHEEL +0 -0
- {ripperdoc-0.2.2.dist-info → ripperdoc-0.2.4.dist-info}/entry_points.txt +0 -0
- {ripperdoc-0.2.2.dist-info → ripperdoc-0.2.4.dist-info}/licenses/LICENSE +0 -0
- {ripperdoc-0.2.2.dist-info → ripperdoc-0.2.4.dist-info}/top_level.txt +0 -0
ripperdoc/tools/mcp_tools.py
CHANGED
|
@@ -30,6 +30,7 @@ from ripperdoc.utils.mcp import (
|
|
|
30
30
|
load_mcp_servers_async,
|
|
31
31
|
shutdown_mcp_runtime,
|
|
32
32
|
)
|
|
33
|
+
from ripperdoc.utils.token_estimation import estimate_tokens
|
|
33
34
|
|
|
34
35
|
|
|
35
36
|
logger = get_logger()
|
|
@@ -40,6 +41,55 @@ except Exception: # pragma: no cover - SDK may be missing at runtime
|
|
|
40
41
|
mcp_types = None # type: ignore[assignment]
|
|
41
42
|
logger.exception("[mcp_tools] MCP SDK unavailable during import")
|
|
42
43
|
|
|
44
|
+
DEFAULT_MAX_MCP_OUTPUT_TOKENS = 25_000
|
|
45
|
+
MIN_MCP_OUTPUT_TOKENS = 1_000
|
|
46
|
+
DEFAULT_MCP_WARNING_FRACTION = 0.8
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
def _get_mcp_token_limits() -> tuple[int, int]:
|
|
50
|
+
"""Compute warning and hard limits for MCP output size."""
|
|
51
|
+
max_tokens = os.getenv("RIPPERDOC_MCP_MAX_OUTPUT_TOKENS")
|
|
52
|
+
try:
|
|
53
|
+
max_tokens_int = int(max_tokens) if max_tokens else DEFAULT_MAX_MCP_OUTPUT_TOKENS
|
|
54
|
+
except (TypeError, ValueError):
|
|
55
|
+
max_tokens_int = DEFAULT_MAX_MCP_OUTPUT_TOKENS
|
|
56
|
+
max_tokens_int = max(MIN_MCP_OUTPUT_TOKENS, max_tokens_int)
|
|
57
|
+
|
|
58
|
+
warn_env = os.getenv("RIPPERDOC_MCP_WARNING_TOKENS")
|
|
59
|
+
try:
|
|
60
|
+
warn_tokens_int = int(warn_env) if warn_env else int(max_tokens_int * DEFAULT_MCP_WARNING_FRACTION)
|
|
61
|
+
except (TypeError, ValueError):
|
|
62
|
+
warn_tokens_int = int(max_tokens_int * DEFAULT_MCP_WARNING_FRACTION)
|
|
63
|
+
warn_tokens_int = max(MIN_MCP_OUTPUT_TOKENS, min(warn_tokens_int, max_tokens_int))
|
|
64
|
+
return warn_tokens_int, max_tokens_int
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
def _evaluate_mcp_output_size(
|
|
68
|
+
result_text: Optional[str],
|
|
69
|
+
server_name: str,
|
|
70
|
+
tool_name: str,
|
|
71
|
+
) -> tuple[Optional[str], Optional[str], int]:
|
|
72
|
+
"""Return (warning, error, token_estimate) for an MCP result text."""
|
|
73
|
+
warn_tokens, max_tokens = _get_mcp_token_limits()
|
|
74
|
+
token_estimate = estimate_tokens(result_text or "")
|
|
75
|
+
|
|
76
|
+
if token_estimate > max_tokens:
|
|
77
|
+
error_text = (
|
|
78
|
+
f"MCP response from {server_name}:{tool_name} is ~{token_estimate:,} tokens, "
|
|
79
|
+
f"which exceeds the configured limit of {max_tokens}. "
|
|
80
|
+
"Refine the request (pagination/filtering) or raise RIPPERDOC_MCP_MAX_OUTPUT_TOKENS."
|
|
81
|
+
)
|
|
82
|
+
return None, error_text, token_estimate
|
|
83
|
+
|
|
84
|
+
warning_text = None
|
|
85
|
+
if result_text and token_estimate >= warn_tokens:
|
|
86
|
+
line_count = result_text.count("\n") + 1
|
|
87
|
+
warning_text = (
|
|
88
|
+
f"WARNING: Large MCP response (~{token_estimate:,} tokens, {line_count:,} lines). "
|
|
89
|
+
"This can fill the context quickly; consider pagination or filters."
|
|
90
|
+
)
|
|
91
|
+
return warning_text, None, token_estimate
|
|
92
|
+
|
|
43
93
|
|
|
44
94
|
def _content_block_to_text(block: Any) -> str:
|
|
45
95
|
block_type = getattr(block, "type", None) or (
|
|
@@ -370,6 +420,9 @@ class ReadMcpResourceOutput(BaseModel):
|
|
|
370
420
|
uri: str
|
|
371
421
|
content: Optional[str] = None
|
|
372
422
|
contents: List[ResourceContentPart] = Field(default_factory=list)
|
|
423
|
+
token_estimate: Optional[int] = None
|
|
424
|
+
warning: Optional[str] = None
|
|
425
|
+
is_error: bool = False
|
|
373
426
|
|
|
374
427
|
|
|
375
428
|
class McpToolCallOutput(BaseModel):
|
|
@@ -382,6 +435,8 @@ class McpToolCallOutput(BaseModel):
|
|
|
382
435
|
content_blocks: Optional[List[Any]] = None
|
|
383
436
|
structured_content: Optional[dict] = None
|
|
384
437
|
is_error: bool = False
|
|
438
|
+
token_estimate: Optional[int] = None
|
|
439
|
+
warning: Optional[str] = None
|
|
385
440
|
|
|
386
441
|
|
|
387
442
|
class ReadMcpResourceTool(Tool[ReadMcpResourceInput, ReadMcpResourceOutput]):
|
|
@@ -552,9 +607,35 @@ class ReadMcpResourceTool(Tool[ReadMcpResourceInput, ReadMcpResourceOutput]):
|
|
|
552
607
|
read_result: Any = ReadMcpResourceOutput(
|
|
553
608
|
server=input_data.server, uri=input_data.uri, content=content_text, contents=parts
|
|
554
609
|
)
|
|
610
|
+
assistant_text = self.render_result_for_assistant(read_result) # type: ignore[arg-type]
|
|
611
|
+
warning_text, error_text, token_estimate = _evaluate_mcp_output_size(
|
|
612
|
+
assistant_text, input_data.server, f"resource:{input_data.uri}"
|
|
613
|
+
)
|
|
614
|
+
|
|
615
|
+
if error_text:
|
|
616
|
+
limited_result = ReadMcpResourceOutput(
|
|
617
|
+
server=input_data.server,
|
|
618
|
+
uri=input_data.uri,
|
|
619
|
+
content=None,
|
|
620
|
+
contents=[],
|
|
621
|
+
token_estimate=token_estimate,
|
|
622
|
+
warning=None,
|
|
623
|
+
is_error=True,
|
|
624
|
+
)
|
|
625
|
+
yield ToolResult(data=limited_result, result_for_assistant=error_text)
|
|
626
|
+
return
|
|
627
|
+
|
|
628
|
+
annotated_result = read_result.model_copy(
|
|
629
|
+
update={"token_estimate": token_estimate, "warning": warning_text}
|
|
630
|
+
)
|
|
631
|
+
|
|
632
|
+
final_text = assistant_text or ""
|
|
633
|
+
if not final_text and warning_text:
|
|
634
|
+
final_text = warning_text
|
|
635
|
+
|
|
555
636
|
yield ToolResult(
|
|
556
|
-
data=
|
|
557
|
-
result_for_assistant=
|
|
637
|
+
data=annotated_result,
|
|
638
|
+
result_for_assistant=final_text, # type: ignore[arg-type]
|
|
558
639
|
)
|
|
559
640
|
|
|
560
641
|
|
|
@@ -715,9 +796,37 @@ class DynamicMcpTool(Tool[BaseModel, McpToolCallOutput]):
|
|
|
715
796
|
structured_content=structured,
|
|
716
797
|
is_error=getattr(call_result, "isError", False),
|
|
717
798
|
)
|
|
799
|
+
base_result_text = self.render_result_for_assistant(output)
|
|
800
|
+
warning_text, error_text, token_estimate = _evaluate_mcp_output_size(
|
|
801
|
+
base_result_text, self.server_name, self.tool_info.name
|
|
802
|
+
)
|
|
803
|
+
|
|
804
|
+
if error_text:
|
|
805
|
+
limited_output = McpToolCallOutput(
|
|
806
|
+
server=self.server_name,
|
|
807
|
+
tool=self.tool_info.name,
|
|
808
|
+
content=None,
|
|
809
|
+
text=None,
|
|
810
|
+
content_blocks=None,
|
|
811
|
+
structured_content=None,
|
|
812
|
+
is_error=True,
|
|
813
|
+
token_estimate=token_estimate,
|
|
814
|
+
warning=None,
|
|
815
|
+
)
|
|
816
|
+
yield ToolResult(data=limited_output, result_for_assistant=error_text)
|
|
817
|
+
return
|
|
818
|
+
|
|
819
|
+
annotated_output = output.model_copy(
|
|
820
|
+
update={"token_estimate": token_estimate, "warning": warning_text}
|
|
821
|
+
)
|
|
822
|
+
|
|
823
|
+
final_text = base_result_text or ""
|
|
824
|
+
if not final_text and warning_text:
|
|
825
|
+
final_text = warning_text
|
|
826
|
+
|
|
718
827
|
yield ToolResult(
|
|
719
|
-
data=
|
|
720
|
-
result_for_assistant=
|
|
828
|
+
data=annotated_output,
|
|
829
|
+
result_for_assistant=final_text,
|
|
721
830
|
)
|
|
722
831
|
except Exception as exc: # pragma: no cover - runtime errors
|
|
723
832
|
output = McpToolCallOutput(
|
|
@@ -18,6 +18,7 @@ from ripperdoc.core.tool import (
|
|
|
18
18
|
ValidationResult,
|
|
19
19
|
)
|
|
20
20
|
from ripperdoc.utils.log import get_logger
|
|
21
|
+
from ripperdoc.utils.file_watch import record_snapshot
|
|
21
22
|
|
|
22
23
|
logger = get_logger()
|
|
23
24
|
|
|
@@ -360,6 +361,17 @@ class MultiEditTool(Tool[MultiEditToolInput, MultiEditToolOutput]):
|
|
|
360
361
|
file_path.parent.mkdir(parents=True, exist_ok=True)
|
|
361
362
|
try:
|
|
362
363
|
file_path.write_text(updated_content, encoding="utf-8")
|
|
364
|
+
try:
|
|
365
|
+
record_snapshot(
|
|
366
|
+
str(file_path),
|
|
367
|
+
updated_content,
|
|
368
|
+
getattr(context, "file_state_cache", {}),
|
|
369
|
+
)
|
|
370
|
+
except Exception:
|
|
371
|
+
logger.exception(
|
|
372
|
+
"[multi_edit_tool] Failed to record file snapshot",
|
|
373
|
+
extra={"file_path": str(file_path)},
|
|
374
|
+
)
|
|
363
375
|
except Exception as exc:
|
|
364
376
|
logger.exception(
|
|
365
377
|
"[multi_edit_tool] Error writing edited file",
|
|
@@ -20,6 +20,7 @@ from ripperdoc.core.tool import (
|
|
|
20
20
|
ValidationResult,
|
|
21
21
|
)
|
|
22
22
|
from ripperdoc.utils.log import get_logger
|
|
23
|
+
from ripperdoc.utils.file_watch import record_snapshot
|
|
23
24
|
|
|
24
25
|
|
|
25
26
|
logger = get_logger()
|
|
@@ -272,6 +273,17 @@ class NotebookEditTool(Tool[NotebookEditInput, NotebookEditOutput]):
|
|
|
272
273
|
)
|
|
273
274
|
|
|
274
275
|
path.write_text(json.dumps(nb_json, indent=1), encoding="utf-8")
|
|
276
|
+
try:
|
|
277
|
+
record_snapshot(
|
|
278
|
+
input_data.notebook_path,
|
|
279
|
+
json.dumps(nb_json, indent=1),
|
|
280
|
+
getattr(context, "file_state_cache", {}),
|
|
281
|
+
)
|
|
282
|
+
except Exception:
|
|
283
|
+
logger.exception(
|
|
284
|
+
"[notebook_edit_tool] Failed to record file snapshot",
|
|
285
|
+
extra={"file_path": input_data.notebook_path},
|
|
286
|
+
)
|
|
275
287
|
|
|
276
288
|
output = NotebookEditOutput(
|
|
277
289
|
new_source=new_source,
|
ripperdoc/tools/task_tool.py
CHANGED
|
@@ -10,6 +10,9 @@ from pydantic import BaseModel, Field
|
|
|
10
10
|
from ripperdoc.core.agents import (
|
|
11
11
|
AgentDefinition,
|
|
12
12
|
AgentLoadResult,
|
|
13
|
+
FILE_EDIT_TOOL_NAME,
|
|
14
|
+
GREP_TOOL_NAME,
|
|
15
|
+
VIEW_TOOL_NAME,
|
|
13
16
|
clear_agent_cache,
|
|
14
17
|
load_agent_definitions,
|
|
15
18
|
resolve_agent_tools,
|
|
@@ -70,12 +73,92 @@ class TaskTool(Tool[TaskToolInput, TaskToolOutput]):
|
|
|
70
73
|
del safe_mode
|
|
71
74
|
clear_agent_cache()
|
|
72
75
|
agents: AgentLoadResult = load_agent_definitions()
|
|
73
|
-
|
|
76
|
+
|
|
77
|
+
agent_lines: List[str] = []
|
|
78
|
+
for agent in agents.active_agents:
|
|
79
|
+
properties = (
|
|
80
|
+
"Properties: access to current context; "
|
|
81
|
+
if getattr(agent, "fork_context", False)
|
|
82
|
+
else ""
|
|
83
|
+
)
|
|
84
|
+
tools_label = "All tools"
|
|
85
|
+
if getattr(agent, "tools", None):
|
|
86
|
+
tools_label = (
|
|
87
|
+
"All tools" if "*" in agent.tools else ", ".join(agent.tools)
|
|
88
|
+
)
|
|
89
|
+
agent_lines.append(
|
|
90
|
+
f"- {agent.agent_type}: {agent.when_to_use} ({properties}Tools: {tools_label})"
|
|
91
|
+
)
|
|
92
|
+
|
|
93
|
+
agent_block = "\n".join(agent_lines) or "- general-purpose (built-in)"
|
|
94
|
+
|
|
95
|
+
task_tool_name = self.name
|
|
96
|
+
file_read_tool_name = VIEW_TOOL_NAME
|
|
97
|
+
search_tool_name = GREP_TOOL_NAME
|
|
98
|
+
code_tool_name = FILE_EDIT_TOOL_NAME
|
|
99
|
+
background_fetch_tool_name = task_tool_name
|
|
100
|
+
|
|
74
101
|
return (
|
|
75
|
-
"
|
|
76
|
-
"
|
|
77
|
-
"
|
|
78
|
-
f"
|
|
102
|
+
f"Launch a new agent to handle complex, multi-step tasks autonomously. \n\n"
|
|
103
|
+
f"The {task_tool_name} tool launches specialized agents (subprocesses) that autonomously handle complex tasks. Each agent type has specific capabilities and tools available to it.\n\n"
|
|
104
|
+
f"Available agent types and the tools they have access to:\n"
|
|
105
|
+
f"{agent_block}\n\n"
|
|
106
|
+
f"When using the {task_tool_name} tool, you must specify a subagent_type parameter to select which agent type to use.\n\n"
|
|
107
|
+
f"When NOT to use the {task_tool_name} tool:\n"
|
|
108
|
+
f"- If you want to read a specific file path, use the {file_read_tool_name} or {search_tool_name} tool instead of the {task_tool_name} tool, to find the match more quickly\n"
|
|
109
|
+
f'- If you are searching for a specific class definition like "class Foo", use the {search_tool_name} tool instead, to find the match more quickly\n'
|
|
110
|
+
f"- If you are searching for code within a specific file or set of 2-3 files, use the {file_read_tool_name} tool instead of the {task_tool_name} tool, to find the match more quickly\n"
|
|
111
|
+
"- Other tasks that are not related to the agent descriptions above\n"
|
|
112
|
+
"\n"
|
|
113
|
+
"\n"
|
|
114
|
+
"Usage notes:\n"
|
|
115
|
+
"- Launch multiple agents concurrently whenever possible, to maximize performance; to do that, use a single message with multiple tool uses\n"
|
|
116
|
+
"- When the agent is done, it will return a single message back to you. The result returned by the agent is not visible to the user. To show the user the result, you should send a text message back to the user with a concise summary of the result.\n"
|
|
117
|
+
f"- You can optionally run agents in the background using the run_in_background parameter. When an agent runs in the background, you will need to use {background_fetch_tool_name} to retrieve its results once it's done. You can continue to work while background agents run - When you need their results to continue you can use {background_fetch_tool_name} in blocking mode to pause and wait for their results.\n"
|
|
118
|
+
"- Agents can be resumed using the `resume` parameter by passing the agent ID from a previous invocation. When resumed, the agent continues with its full previous context preserved. When NOT resuming, each invocation starts fresh and you should provide a detailed task description with all necessary context.\n"
|
|
119
|
+
"- When the agent is done, it will return a single message back to you along with its agent ID. You can use this ID to resume the agent later if needed for follow-up work.\n"
|
|
120
|
+
"- Provide clear, detailed prompts so the agent can work autonomously and return exactly the information you need.\n"
|
|
121
|
+
'- Agents with "access to current context" can see the full conversation history before the tool call. When using these agents, you can write concise prompts that reference earlier context (e.g., "investigate the error discussed above") instead of repeating information. The agent will receive all prior messages and understand the context.\n'
|
|
122
|
+
"- The agent's outputs should generally be trusted\n"
|
|
123
|
+
"- Clearly tell the agent whether you expect it to write code or just to do research (search, file reads, web fetches, etc.), since it is not aware of the user's intent\n"
|
|
124
|
+
"- If the agent description mentions that it should be used proactively, then you should try your best to use it without the user having to ask for it first. Use your judgement.\n"
|
|
125
|
+
f'- If the user specifies that they want you to run agents "in parallel", you MUST send a single message with multiple {task_tool_name} tool use content blocks. For example, if you need to launch both a code-reviewer agent and a test-runner agent in parallel, send a single message with both tool calls.\n'
|
|
126
|
+
"\n"
|
|
127
|
+
"Example usage:\n"
|
|
128
|
+
"\n"
|
|
129
|
+
"<example_agent_descriptions>\n"
|
|
130
|
+
'"code-reviewer": use this agent after you are done writing a signficant piece of code\n'
|
|
131
|
+
'"greeting-responder": use this agent when to respond to user greetings with a friendly joke\n'
|
|
132
|
+
"</example_agent_description>\n"
|
|
133
|
+
"\n"
|
|
134
|
+
"<example>\n"
|
|
135
|
+
'user: "Please write a function that checks if a number is prime"\n'
|
|
136
|
+
"assistant: Sure let me write a function that checks if a number is prime\n"
|
|
137
|
+
f"assistant: First let me use the {code_tool_name} tool to write a function that checks if a number is prime\n"
|
|
138
|
+
f"assistant: I'm going to use the {code_tool_name} tool to write the following code:\n"
|
|
139
|
+
"<code>\n"
|
|
140
|
+
"function isPrime(n) {\n"
|
|
141
|
+
" if (n <= 1) return false\n"
|
|
142
|
+
" for (let i = 2; i * i <= n; i++) {\n"
|
|
143
|
+
" if (n % i === 0) return false\n"
|
|
144
|
+
" }\n"
|
|
145
|
+
" return true\n"
|
|
146
|
+
"}\n"
|
|
147
|
+
"</code>\n"
|
|
148
|
+
"<commentary>\n"
|
|
149
|
+
"Since a signficant piece of code was written and the task was completed, now use the code-reviewer agent to review the code\n"
|
|
150
|
+
"</commentary>\n"
|
|
151
|
+
"assistant: Now let me use the code-reviewer agent to review the code\n"
|
|
152
|
+
f"assistant: Uses the {task_tool_name} tool to launch the code-reviewer agent \n"
|
|
153
|
+
"</example>\n"
|
|
154
|
+
"\n"
|
|
155
|
+
"<example>\n"
|
|
156
|
+
'user: "Hello"\n'
|
|
157
|
+
"<commentary>\n"
|
|
158
|
+
"Since the user is greeting, use the greeting-responder agent to respond with a friendly joke\n"
|
|
159
|
+
"</commentary>\n"
|
|
160
|
+
f'assistant: "I\'m going to use the {task_tool_name} tool to launch the greeting-responder agent\"\n'
|
|
161
|
+
"</example>"
|
|
79
162
|
)
|
|
80
163
|
|
|
81
164
|
def is_read_only(self) -> bool:
|
ripperdoc/tools/todo_tool.py
CHANGED
|
@@ -361,9 +361,7 @@ class TodoWriteTool(Tool[TodoWriteToolInput, TodoToolOutput]):
|
|
|
361
361
|
)
|
|
362
362
|
yield ToolResult(data=output, result_for_assistant=result_text)
|
|
363
363
|
except Exception as exc:
|
|
364
|
-
logger.exception(
|
|
365
|
-
"[todo_tool] Error updating todos", extra={"error": str(exc)}
|
|
366
|
-
)
|
|
364
|
+
logger.exception("[todo_tool] Error updating todos", extra={"error": str(exc)})
|
|
367
365
|
error = f"Error updating todos: {exc}"
|
|
368
366
|
yield ToolResult(
|
|
369
367
|
data=TodoToolOutput(
|
|
@@ -119,7 +119,9 @@ class ToolSearchTool(Tool[ToolSearchInput, ToolSearchOutput]):
|
|
|
119
119
|
def is_concurrency_safe(self) -> bool:
|
|
120
120
|
return True
|
|
121
121
|
|
|
122
|
-
def needs_permissions(
|
|
122
|
+
def needs_permissions(
|
|
123
|
+
self, input_data: Optional[ToolSearchInput] = None
|
|
124
|
+
) -> bool: # noqa: ARG002
|
|
123
125
|
return False
|
|
124
126
|
|
|
125
127
|
async def validate_input(
|
|
@@ -280,9 +282,11 @@ class ToolSearchTool(Tool[ToolSearchInput, ToolSearchOutput]):
|
|
|
280
282
|
"name": name,
|
|
281
283
|
"user_facing_name": tool.user_facing_name(),
|
|
282
284
|
"description": description,
|
|
283
|
-
"active":
|
|
284
|
-
|
|
285
|
-
|
|
285
|
+
"active": (
|
|
286
|
+
getattr(registry, "is_active", lambda *_: False)(name)
|
|
287
|
+
if hasattr(registry, "is_active")
|
|
288
|
+
else False
|
|
289
|
+
),
|
|
286
290
|
"deferred": name in getattr(registry, "deferred_names", set()),
|
|
287
291
|
"score": 0.0,
|
|
288
292
|
}
|
|
@@ -0,0 +1,134 @@
|
|
|
1
|
+
"""Lightweight file-change tracking for notifying the model about user edits."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import difflib
|
|
6
|
+
import os
|
|
7
|
+
from dataclasses import dataclass
|
|
8
|
+
from typing import Dict, List, Optional
|
|
9
|
+
|
|
10
|
+
from ripperdoc.utils.log import get_logger
|
|
11
|
+
|
|
12
|
+
logger = get_logger()
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
@dataclass
|
|
16
|
+
class FileSnapshot:
|
|
17
|
+
"""Snapshot of a file read by the agent."""
|
|
18
|
+
|
|
19
|
+
content: str
|
|
20
|
+
timestamp: float
|
|
21
|
+
offset: int = 0
|
|
22
|
+
limit: Optional[int] = None
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
@dataclass
|
|
26
|
+
class ChangedFileNotice:
|
|
27
|
+
"""Information about a file that changed after it was read."""
|
|
28
|
+
|
|
29
|
+
file_path: str
|
|
30
|
+
summary: str
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
def record_snapshot(
|
|
34
|
+
file_path: str,
|
|
35
|
+
content: str,
|
|
36
|
+
cache: Dict[str, FileSnapshot],
|
|
37
|
+
*,
|
|
38
|
+
offset: int = 0,
|
|
39
|
+
limit: Optional[int] = None,
|
|
40
|
+
) -> None:
|
|
41
|
+
"""Store the current contents and mtime for a file."""
|
|
42
|
+
try:
|
|
43
|
+
timestamp = os.path.getmtime(file_path)
|
|
44
|
+
except OSError:
|
|
45
|
+
timestamp = 0.0
|
|
46
|
+
cache[file_path] = FileSnapshot(
|
|
47
|
+
content=content, timestamp=timestamp, offset=offset, limit=limit
|
|
48
|
+
)
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
def _read_portion(file_path: str, offset: int, limit: Optional[int]) -> str:
|
|
52
|
+
with open(file_path, "r", encoding="utf-8", errors="replace") as handle:
|
|
53
|
+
lines = handle.readlines()
|
|
54
|
+
start = max(offset, 0)
|
|
55
|
+
if limit is None:
|
|
56
|
+
selected = lines[start:]
|
|
57
|
+
else:
|
|
58
|
+
selected = lines[start : start + limit]
|
|
59
|
+
return "".join(selected)
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
def _build_diff_summary(old_content: str, new_content: str, file_path: str, max_lines: int) -> str:
|
|
63
|
+
diff = list(
|
|
64
|
+
difflib.unified_diff(
|
|
65
|
+
old_content.splitlines(),
|
|
66
|
+
new_content.splitlines(),
|
|
67
|
+
fromfile=file_path,
|
|
68
|
+
tofile=file_path,
|
|
69
|
+
lineterm="",
|
|
70
|
+
)
|
|
71
|
+
)
|
|
72
|
+
if not diff:
|
|
73
|
+
return "File was modified but contents appear unchanged."
|
|
74
|
+
|
|
75
|
+
# Keep the diff short to avoid flooding the model.
|
|
76
|
+
if len(diff) > max_lines:
|
|
77
|
+
diff = diff[:max_lines] + ["... (diff truncated)"]
|
|
78
|
+
return "\n".join(diff)
|
|
79
|
+
|
|
80
|
+
|
|
81
|
+
def detect_changed_files(
|
|
82
|
+
cache: Dict[str, FileSnapshot], *, max_diff_lines: int = 80
|
|
83
|
+
) -> List[ChangedFileNotice]:
|
|
84
|
+
"""Return notices for files whose mtime increased since they were read."""
|
|
85
|
+
notices: List[ChangedFileNotice] = []
|
|
86
|
+
|
|
87
|
+
# Iterate over a static list so we can mutate cache safely.
|
|
88
|
+
for file_path, snapshot in list(cache.items()):
|
|
89
|
+
try:
|
|
90
|
+
current_mtime = os.path.getmtime(file_path)
|
|
91
|
+
except OSError:
|
|
92
|
+
notices.append(
|
|
93
|
+
ChangedFileNotice(
|
|
94
|
+
file_path=file_path, summary="File was deleted or is no longer accessible."
|
|
95
|
+
)
|
|
96
|
+
)
|
|
97
|
+
cache.pop(file_path, None)
|
|
98
|
+
continue
|
|
99
|
+
|
|
100
|
+
if current_mtime <= snapshot.timestamp:
|
|
101
|
+
continue
|
|
102
|
+
|
|
103
|
+
try:
|
|
104
|
+
new_content = _read_portion(file_path, snapshot.offset, snapshot.limit)
|
|
105
|
+
except Exception as exc: # pragma: no cover - best-effort telemetry
|
|
106
|
+
logger.exception(
|
|
107
|
+
"[file_watch] Failed reading changed file",
|
|
108
|
+
extra={"file_path": file_path, "error": str(exc)},
|
|
109
|
+
)
|
|
110
|
+
notices.append(
|
|
111
|
+
ChangedFileNotice(
|
|
112
|
+
file_path=file_path,
|
|
113
|
+
summary=f"File changed but could not be read: {exc}",
|
|
114
|
+
)
|
|
115
|
+
)
|
|
116
|
+
# Avoid spamming repeated errors by updating timestamp.
|
|
117
|
+
snapshot.timestamp = current_mtime
|
|
118
|
+
cache[file_path] = snapshot
|
|
119
|
+
continue
|
|
120
|
+
|
|
121
|
+
diff_summary = _build_diff_summary(
|
|
122
|
+
snapshot.content, new_content, file_path, max_lines=max_diff_lines
|
|
123
|
+
)
|
|
124
|
+
notices.append(ChangedFileNotice(file_path=file_path, summary=diff_summary))
|
|
125
|
+
# Update snapshot so we only notify on subsequent changes.
|
|
126
|
+
record_snapshot(
|
|
127
|
+
file_path,
|
|
128
|
+
new_content,
|
|
129
|
+
cache,
|
|
130
|
+
offset=snapshot.offset,
|
|
131
|
+
limit=snapshot.limit,
|
|
132
|
+
)
|
|
133
|
+
|
|
134
|
+
return notices
|