ripperdoc 0.2.2__py3-none-any.whl → 0.2.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (61) hide show
  1. ripperdoc/__init__.py +1 -1
  2. ripperdoc/cli/cli.py +9 -2
  3. ripperdoc/cli/commands/agents_cmd.py +8 -4
  4. ripperdoc/cli/commands/context_cmd.py +3 -3
  5. ripperdoc/cli/commands/cost_cmd.py +5 -0
  6. ripperdoc/cli/commands/doctor_cmd.py +12 -4
  7. ripperdoc/cli/commands/memory_cmd.py +6 -13
  8. ripperdoc/cli/commands/models_cmd.py +36 -6
  9. ripperdoc/cli/commands/resume_cmd.py +4 -2
  10. ripperdoc/cli/commands/status_cmd.py +1 -1
  11. ripperdoc/cli/ui/rich_ui.py +135 -2
  12. ripperdoc/cli/ui/thinking_spinner.py +128 -0
  13. ripperdoc/core/agents.py +174 -6
  14. ripperdoc/core/config.py +9 -1
  15. ripperdoc/core/default_tools.py +6 -0
  16. ripperdoc/core/providers/__init__.py +47 -0
  17. ripperdoc/core/providers/anthropic.py +147 -0
  18. ripperdoc/core/providers/base.py +236 -0
  19. ripperdoc/core/providers/gemini.py +496 -0
  20. ripperdoc/core/providers/openai.py +253 -0
  21. ripperdoc/core/query.py +337 -141
  22. ripperdoc/core/query_utils.py +65 -24
  23. ripperdoc/core/system_prompt.py +67 -61
  24. ripperdoc/core/tool.py +12 -3
  25. ripperdoc/sdk/client.py +12 -1
  26. ripperdoc/tools/ask_user_question_tool.py +433 -0
  27. ripperdoc/tools/background_shell.py +104 -18
  28. ripperdoc/tools/bash_tool.py +33 -13
  29. ripperdoc/tools/enter_plan_mode_tool.py +223 -0
  30. ripperdoc/tools/exit_plan_mode_tool.py +150 -0
  31. ripperdoc/tools/file_edit_tool.py +13 -0
  32. ripperdoc/tools/file_read_tool.py +16 -0
  33. ripperdoc/tools/file_write_tool.py +13 -0
  34. ripperdoc/tools/glob_tool.py +5 -1
  35. ripperdoc/tools/ls_tool.py +14 -10
  36. ripperdoc/tools/mcp_tools.py +113 -4
  37. ripperdoc/tools/multi_edit_tool.py +12 -0
  38. ripperdoc/tools/notebook_edit_tool.py +12 -0
  39. ripperdoc/tools/task_tool.py +88 -5
  40. ripperdoc/tools/todo_tool.py +1 -3
  41. ripperdoc/tools/tool_search_tool.py +8 -4
  42. ripperdoc/utils/file_watch.py +134 -0
  43. ripperdoc/utils/git_utils.py +36 -38
  44. ripperdoc/utils/json_utils.py +1 -2
  45. ripperdoc/utils/log.py +3 -4
  46. ripperdoc/utils/mcp.py +49 -10
  47. ripperdoc/utils/memory.py +1 -3
  48. ripperdoc/utils/message_compaction.py +5 -11
  49. ripperdoc/utils/messages.py +9 -13
  50. ripperdoc/utils/output_utils.py +1 -3
  51. ripperdoc/utils/prompt.py +17 -0
  52. ripperdoc/utils/session_usage.py +7 -0
  53. ripperdoc/utils/shell_utils.py +159 -0
  54. ripperdoc/utils/token_estimation.py +33 -0
  55. {ripperdoc-0.2.2.dist-info → ripperdoc-0.2.4.dist-info}/METADATA +3 -1
  56. ripperdoc-0.2.4.dist-info/RECORD +99 -0
  57. ripperdoc-0.2.2.dist-info/RECORD +0 -86
  58. {ripperdoc-0.2.2.dist-info → ripperdoc-0.2.4.dist-info}/WHEEL +0 -0
  59. {ripperdoc-0.2.2.dist-info → ripperdoc-0.2.4.dist-info}/entry_points.txt +0 -0
  60. {ripperdoc-0.2.2.dist-info → ripperdoc-0.2.4.dist-info}/licenses/LICENSE +0 -0
  61. {ripperdoc-0.2.2.dist-info → ripperdoc-0.2.4.dist-info}/top_level.txt +0 -0
@@ -4,10 +4,10 @@ from __future__ import annotations
4
4
 
5
5
  import json
6
6
  import re
7
- from typing import Any, Dict, List, Optional, Union
7
+ from typing import Any, Dict, List, Mapping, Optional, Union
8
8
  from uuid import uuid4
9
9
 
10
- from json_repair import repair_json
10
+ from json_repair import repair_json # type: ignore[import-not-found]
11
11
  from pydantic import ValidationError
12
12
 
13
13
  from ripperdoc.core.config import ModelProfile, ProviderType, get_global_config
@@ -26,17 +26,25 @@ from ripperdoc.utils.messages import (
26
26
  logger = get_logger()
27
27
 
28
28
 
29
- def _safe_int(value: Any) -> int:
29
+ def _safe_int(value: object) -> int:
30
30
  """Best-effort int conversion for usage counters."""
31
31
  try:
32
32
  if value is None:
33
33
  return 0
34
- return int(value)
34
+ if isinstance(value, bool):
35
+ return int(value)
36
+ if isinstance(value, (int, float)):
37
+ return int(value)
38
+ if isinstance(value, str):
39
+ return int(value)
40
+ if hasattr(value, "__int__"):
41
+ return int(value) # type: ignore[arg-type]
42
+ return 0
35
43
  except (TypeError, ValueError):
36
44
  return 0
37
45
 
38
46
 
39
- def _get_usage_field(usage: Any, field: str) -> int:
47
+ def _get_usage_field(usage: Optional[Mapping[str, Any] | object], field: str) -> int:
40
48
  """Fetch a usage field from either a dict or object."""
41
49
  if usage is None:
42
50
  return 0
@@ -45,7 +53,7 @@ def _get_usage_field(usage: Any, field: str) -> int:
45
53
  return _safe_int(getattr(usage, field, 0))
46
54
 
47
55
 
48
- def anthropic_usage_tokens(usage: Any) -> Dict[str, int]:
56
+ def anthropic_usage_tokens(usage: Optional[Mapping[str, Any] | object]) -> Dict[str, int]:
49
57
  """Extract token counts from an Anthropic response usage payload."""
50
58
  return {
51
59
  "input_tokens": _get_usage_field(usage, "input_tokens"),
@@ -55,7 +63,7 @@ def anthropic_usage_tokens(usage: Any) -> Dict[str, int]:
55
63
  }
56
64
 
57
65
 
58
- def openai_usage_tokens(usage: Any) -> Dict[str, int]:
66
+ def openai_usage_tokens(usage: Optional[Mapping[str, Any] | object]) -> Dict[str, int]:
59
67
  """Extract token counts from an OpenAI-compatible response usage payload."""
60
68
  prompt_details = None
61
69
  if isinstance(usage, dict):
@@ -73,8 +81,24 @@ def openai_usage_tokens(usage: Any) -> Dict[str, int]:
73
81
  }
74
82
 
75
83
 
84
+ def estimate_cost_usd(model_profile: ModelProfile, usage_tokens: Dict[str, int]) -> float:
85
+ """Compute USD cost using per-1M token pricing from the model profile."""
86
+ input_price = getattr(model_profile, "input_cost_per_million_tokens", 0.0) or 0.0
87
+ output_price = getattr(model_profile, "output_cost_per_million_tokens", 0.0) or 0.0
88
+
89
+ total_input_tokens = (
90
+ _safe_int(usage_tokens.get("input_tokens"))
91
+ + _safe_int(usage_tokens.get("cache_read_input_tokens"))
92
+ + _safe_int(usage_tokens.get("cache_creation_input_tokens"))
93
+ )
94
+ output_tokens = _safe_int(usage_tokens.get("output_tokens"))
95
+
96
+ cost = (total_input_tokens * input_price + output_tokens * output_price) / 1_000_000
97
+ return float(cost)
98
+
99
+
76
100
  def resolve_model_profile(model: str) -> ModelProfile:
77
- """Resolve a model pointer to a concrete profile or raise if missing."""
101
+ """Resolve a model pointer to a concrete profile, falling back to a safe default."""
78
102
  config = get_global_config()
79
103
  profile_name = getattr(config.model_pointers, model, None) or model
80
104
  model_profile = config.model_profiles.get(profile_name)
@@ -82,7 +106,11 @@ def resolve_model_profile(model: str) -> ModelProfile:
82
106
  fallback_profile = getattr(config.model_pointers, "main", "default")
83
107
  model_profile = config.model_profiles.get(fallback_profile)
84
108
  if not model_profile:
85
- raise ValueError(f"No model profile found for pointer: {model}")
109
+ logger.warning(
110
+ "[config] No model profile found; using built-in default profile",
111
+ extra={"model_pointer": model},
112
+ )
113
+ return ModelProfile(provider=ProviderType.OPENAI_COMPATIBLE, model="gpt-4o-mini")
86
114
  return model_profile
87
115
 
88
116
 
@@ -103,12 +131,10 @@ def _parse_text_mode_json_blocks(text: str) -> Optional[List[Dict[str, Any]]]:
103
131
  if not text or not isinstance(text, str):
104
132
  return None
105
133
 
106
- code_blocks = re.findall(
107
- r"```(?:\s*json)?\s*([\s\S]*?)\s*```", text, flags=re.IGNORECASE
108
- )
134
+ code_blocks = re.findall(r"```(?:\s*json)?\s*([\s\S]*?)\s*```", text, flags=re.IGNORECASE)
109
135
  candidates = [blk.strip() for blk in code_blocks if blk.strip()]
110
136
 
111
- def _normalize_blocks(parsed: Any) -> Optional[List[Dict[str, Any]]]:
137
+ def _normalize_blocks(parsed: object) -> Optional[List[Dict[str, Any]]]:
112
138
  raw_blocks = parsed if isinstance(parsed, list) else [parsed]
113
139
  normalized: List[Dict[str, Any]] = []
114
140
  for raw in raw_blocks:
@@ -204,7 +230,9 @@ def _tool_prompt_for_text_mode(tools: List[Tool[Any, Any]]) -> str:
204
230
 
205
231
  schema_json = ""
206
232
  try:
207
- schema_json = json.dumps(tool.input_schema.model_json_schema(), ensure_ascii=False, indent=2)
233
+ schema_json = json.dumps(
234
+ tool.input_schema.model_json_schema(), ensure_ascii=False, indent=2
235
+ )
208
236
  except (AttributeError, TypeError, ValueError) as exc:
209
237
  logger.debug(
210
238
  "[tool_prompt] Failed to render input_schema",
@@ -218,7 +246,12 @@ def _tool_prompt_for_text_mode(tools: List[Tool[Any, Any]]) -> str:
218
246
 
219
247
  example_blocks = [
220
248
  {"type": "text", "text": "好的,我来帮你查看一下README.md文件"},
221
- {"type": "tool_use", "tool_use_id": "tool_id_000001", "tool": "View", "input": {"file_path": "README.md"}},
249
+ {
250
+ "type": "tool_use",
251
+ "tool_use_id": "tool_id_000001",
252
+ "tool": "View",
253
+ "input": {"file_path": "README.md"},
254
+ },
222
255
  ]
223
256
  lines.append("Example:")
224
257
  lines.append("```json")
@@ -228,7 +261,9 @@ def _tool_prompt_for_text_mode(tools: List[Tool[Any, Any]]) -> str:
228
261
  return "\n".join(lines)
229
262
 
230
263
 
231
- def text_mode_history(messages: List[Union[UserMessage, AssistantMessage, ProgressMessage]]) -> List[Union[UserMessage, AssistantMessage]]:
264
+ def text_mode_history(
265
+ messages: List[Union[UserMessage, AssistantMessage, ProgressMessage]],
266
+ ) -> List[Union[UserMessage, AssistantMessage]]:
232
267
  """Convert a message history into text-only form for text mode."""
233
268
 
234
269
  def _normalize_block(block: Any) -> Optional[Dict[str, Any]]:
@@ -269,9 +304,13 @@ def text_mode_history(messages: List[Union[UserMessage, AssistantMessage, Progre
269
304
  if isinstance(content, list):
270
305
  normalized_blocks = []
271
306
  for block in content:
272
- block_type = getattr(block, "type", None) or (block.get("type") if isinstance(block, dict) else None)
273
- block_text = getattr(block, "text", None) if hasattr(block, "text") else (
274
- block.get("text") if isinstance(block, dict) else None
307
+ block_type = getattr(block, "type", None) or (
308
+ block.get("type") if isinstance(block, dict) else None
309
+ )
310
+ block_text = (
311
+ getattr(block, "text", None)
312
+ if hasattr(block, "text")
313
+ else (block.get("text") if isinstance(block, dict) else None)
275
314
  )
276
315
  if block_type == "text" and isinstance(block_text, str):
277
316
  parsed_nested = _parse_text_mode_json_blocks(block_text)
@@ -287,7 +326,9 @@ def text_mode_history(messages: List[Union[UserMessage, AssistantMessage, Progre
287
326
  elif isinstance(content, str):
288
327
  parsed_blocks = _parse_text_mode_json_blocks(content)
289
328
  if parsed_blocks:
290
- text_content = f"```json\n{json.dumps(parsed_blocks, ensure_ascii=False, indent=2)}\n```"
329
+ text_content = (
330
+ f"```json\n{json.dumps(parsed_blocks, ensure_ascii=False, indent=2)}\n```"
331
+ )
291
332
  else:
292
333
  text_content = content
293
334
  else:
@@ -301,7 +342,9 @@ def text_mode_history(messages: List[Union[UserMessage, AssistantMessage, Progre
301
342
  return converted
302
343
 
303
344
 
304
- def _maybe_convert_json_block_to_tool_use(content_blocks: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
345
+ def _maybe_convert_json_block_to_tool_use(
346
+ content_blocks: List[Dict[str, Any]],
347
+ ) -> List[Dict[str, Any]]:
305
348
  """Convert any text blocks containing JSON content to structured content blocks."""
306
349
  if not content_blocks:
307
350
  return content_blocks
@@ -437,9 +480,7 @@ async def build_openai_tool_schemas(tools: List[Tool[Any, Any]]) -> List[Dict[st
437
480
  return openai_tools
438
481
 
439
482
 
440
- def content_blocks_from_anthropic_response(
441
- response: Any, tool_mode: str
442
- ) -> List[Dict[str, Any]]:
483
+ def content_blocks_from_anthropic_response(response: Any, tool_mode: str) -> List[Dict[str, Any]]:
443
484
  """Normalize Anthropic response content to our internal block format."""
444
485
  blocks: List[Dict[str, Any]] = []
445
486
  for block in getattr(response, "content", []) or []:
@@ -8,7 +8,19 @@ from pathlib import Path
8
8
  from textwrap import dedent
9
9
  from typing import Any, Dict, Iterable, List, Optional
10
10
 
11
- from ripperdoc.core.agents import clear_agent_cache, load_agent_definitions, summarize_agent
11
+ from ripperdoc.core.agents import (
12
+ ASK_USER_QUESTION_TOOL_NAME,
13
+ BASH_TOOL_NAME,
14
+ FILE_EDIT_TOOL_NAME,
15
+ FILE_WRITE_TOOL_NAME,
16
+ TASK_TOOL_NAME,
17
+ TODO_WRITE_TOOL_NAME,
18
+ TOOL_SEARCH_TOOL_NAME,
19
+ VIEW_TOOL_NAME,
20
+ clear_agent_cache,
21
+ load_agent_definitions,
22
+ summarize_agent,
23
+ )
12
24
  from ripperdoc.core.tool import Tool
13
25
  from ripperdoc.utils.log import get_logger
14
26
 
@@ -174,10 +186,18 @@ def build_system_prompt(
174
186
  ) -> str:
175
187
  _ = user_prompt, context
176
188
  tool_names = {tool.name for tool in tools}
177
- todo_tool_name = "TodoWrite"
189
+ todo_tool_name = TODO_WRITE_TOOL_NAME
178
190
  todo_available = todo_tool_name in tool_names
179
- task_available = "Task" in tool_names
180
- shell_tool_name = next((tool.name for tool in tools if tool.name.lower() == "bash"), "Bash")
191
+ task_available = TASK_TOOL_NAME in tool_names
192
+ ask_tool_name = ASK_USER_QUESTION_TOOL_NAME
193
+ ask_available = ask_tool_name in tool_names
194
+ view_tool_name = VIEW_TOOL_NAME
195
+ file_edit_tool_name = FILE_EDIT_TOOL_NAME
196
+ file_write_tool_name = FILE_WRITE_TOOL_NAME
197
+ shell_tool_name = next(
198
+ (tool.name for tool in tools if tool.name.lower() == BASH_TOOL_NAME.lower()),
199
+ BASH_TOOL_NAME,
200
+ )
181
201
 
182
202
  main_prompt = dedent(
183
203
  f"""\
@@ -190,61 +210,25 @@ def build_system_prompt(
190
210
  - /help: Get help with using {APP_NAME}
191
211
  - To give feedback, users should report the issue at {FEEDBACK_URL}
192
212
 
193
- # Tone and style
194
- You should be concise, direct, and to the point.
195
- You MUST answer concisely with fewer than 4 lines (not including tool use or code generation), unless user asks for detail.
196
- IMPORTANT: You should minimize output tokens as much as possible while maintaining helpfulness, quality, and accuracy. Only address the specific query or task at hand, avoiding tangential information unless absolutely critical for completing the request. If you can answer in 1-3 sentences or a short paragraph, please do.
197
- IMPORTANT: You should NOT answer with unnecessary preamble or postamble (such as explaining your code or summarizing your action), unless the user asks you to.
198
- Do not add additional code explanation summary unless requested by the user. After working on a file, just stop, rather than providing an explanation of what you did.
199
- Answer the user's question directly, without elaboration, explanation, or details. One word answers are best. Avoid introductions, conclusions, and explanations. You MUST avoid text before/after your response, such as "The answer is <answer>.", "Here is the content of the file..." or "Based on the information provided, the answer is..." or "Here is what I will do next...". Here are some examples to demonstrate appropriate verbosity:
200
- <example>
201
- user: 2 + 2
202
- assistant: 4
203
- </example>
204
-
205
- <example>
206
- user: what is 2+2?
207
- assistant: 4
208
- </example>
209
-
210
- <example>
211
- user: is 11 a prime number?
212
- assistant: Yes
213
- </example>
214
-
215
- <example>
216
- user: what command should I run to list files in the current directory?
217
- assistant: ls
218
- </example>
213
+ # Looking up your own documentation
214
+ When the user asks what {APP_NAME} can do, how to use it (hooks, slash commands, MCP, SDKs), or requests SDK code samples, use the {TASK_TOOL_NAME} tool with a documentation-focused subagent (for example, subagent_type="docs") if available to consult official docs before answering.
219
215
 
220
- <example>
221
- user: what command should I run to watch files in the current directory?
222
- assistant: [use the ls tool to list the files in the current directory, then read docs/commands in the relevant file to find out how to watch files]
223
- npm run dev
224
- </example>
225
-
226
- <example>
227
- user: How many golf balls fit inside a jetta?
228
- assistant: 150000
229
- </example>
230
-
231
- <example>
232
- user: what files are in the directory src/?
233
- assistant: [runs ls and sees foo.c, bar.c, baz.c]
234
- user: which file contains the implementation of foo?
235
- assistant: src/foo.c
236
- </example>
237
-
238
- <example>
239
- user: write tests for new feature
240
- assistant: [uses grep and glob search tools to find where similar tests are defined, uses concurrent read file tool use blocks in one tool call to read relevant files at the same time, uses edit file tool to write new tests]
241
- </example>
216
+ # Tone and style
217
+ - Only use emojis if the user explicitly requests it. Avoid using emojis in all communication unless asked.
218
+ - Your output will be displayed on a command line interface. Your responses should be short and concise. You can use Github-flavored markdown for formatting, and will be rendered in a monospace font using the CommonMark specification.
219
+ - Output text to communicate with the user; all text you output outside of tool use is displayed to the user. Only use tools to complete tasks. Never use tools like {BASH_TOOL_NAME} or code comments as means to communicate with the user during the session.
220
+ - NEVER create files unless they're absolutely necessary for achieving your goal. ALWAYS prefer editing an existing file to creating a new one. This includes markdown files.
221
+
222
+ # Professional objectivity
223
+ Prioritize technical accuracy and truthfulness over validating the user's beliefs. Focus on facts and problem-solving, providing direct, objective technical info without any unnecessary superlatives, praise, or emotional validation. It is best for the user if Claude honestly applies the same rigorous standards to all ideas and disagrees when necessary, even if it may not be what the user wants to hear. Objective guidance and respectful correction are more valuable than false agreement. Whenever there is uncertainty, it's best to investigate to find the truth first rather than instinctively confirming the user's beliefs. Avoid using over-the-top validation or excessive praise when responding to users such as "You're absolutely right" or similar phrases.
224
+
225
+ # Planning without timelines
226
+ When planning tasks, provide concrete implementation steps without time estimates. Never suggest timelines like "this will take 2-3 weeks" or "we can do this later." Focus on what needs to be done, not when. Break work into actionable steps and let users decide scheduling.
227
+
228
+ # Explain Your Code: Bash Command Transparency
242
229
  When you run a non-trivial bash command, you should explain what the command does and why you are running it, to make sure the user understands what you are doing (this is especially important when you are running a command that will make changes to the user's system).
243
230
  Remember that your output will be displayed on a command line interface. Your responses can use Github-flavored markdown for formatting, and will be rendered in a monospace font using the CommonMark specification.
244
- Output text to communicate with the user; all text you output outside of tool use is displayed to the user. Only use tools to complete tasks. Never use tools like {shell_tool_name} or code comments as means to communicate with the user during the session.
245
231
  If you cannot or will not help the user with something, please do not say why or what it could lead to, since this comes across as preachy and annoying. Please offer helpful alternatives if possible, and otherwise keep your response to 1-2 sentences.
246
- Only use emojis if the user explicitly requests it. Avoid using emojis in all communication unless asked.
247
- IMPORTANT: Keep your responses short, since they will be displayed on a command line interface.
248
232
 
249
233
  # Proactiveness
250
234
  You are allowed to be proactive, but only when the user asks you to do something. You should strive to strike a balance between:
@@ -260,7 +244,7 @@ def build_system_prompt(
260
244
  - Always follow security best practices. Never introduce code that exposes or logs secrets and keys. Never commit secrets or keys to the repository.
261
245
 
262
246
  # Code style
263
- - IMPORTANT: DO NOT ADD ***ANY*** COMMENTS unless asked"""
247
+ - Only add comments when the logic is not self-evident and within code you changed. Do not add docstrings, comments, or type annotations to code you did not modify."""
264
248
  ).strip()
265
249
 
266
250
  if mcp_instructions:
@@ -318,6 +302,15 @@ def build_system_prompt(
318
302
  </example>"""
319
303
  ).strip()
320
304
 
305
+ ask_questions_section = ""
306
+ if ask_available:
307
+ ask_questions_section = dedent(
308
+ f"""\
309
+ # Asking questions as you work
310
+
311
+ You have access to the {ask_tool_name} tool to ask the user questions when you need clarification, want to validate assumptions, or need to make a decision you're unsure about. When presenting options or plans, do not include time estimates—focus on what each option involves."""
312
+ ).strip()
313
+
321
314
  hooks_section = dedent(
322
315
  """\
323
316
  Users may configure 'hooks', shell commands that execute in response to events like tool calls, in settings. Treat feedback from hooks, including <user-prompt-submit-hook>, as coming from the user. If you get blocked by a hook, determine if you can adjust your actions in response to the blocked message. If not, ask the user to check their hooks configuration."""
@@ -329,15 +322,26 @@ def build_system_prompt(
329
322
  ]
330
323
  if todo_available:
331
324
  doing_tasks_lines.append(f"- Use the {todo_tool_name} tool to plan the task if required")
325
+ if ask_available:
326
+ doing_tasks_lines.append(
327
+ f"- Use the {ask_tool_name} tool to ask questions, clarify, and gather information as needed."
328
+ )
332
329
  doing_tasks_lines.extend(
333
330
  [
331
+ "- NEVER propose changes to code you haven't read. If a user asks about or wants you to modify a file, read it first.",
334
332
  "- Use the available search tools to understand the codebase and the user's query. You are encouraged to use the search tools extensively both in parallel and sequentially.",
335
- "- Implement the solution using all tools available to you",
333
+ "- When exploring the codebase beyond a needle query, prefer using the Task tool with an exploration subagent if available instead of running raw search commands directly.",
334
+ "- Implement the solution using all tools available to you.",
335
+ "- Be careful not to introduce security vulnerabilities such as command injection, XSS, SQL injection, and other OWASP top 10 vulnerabilities. If you notice that you wrote insecure code, immediately fix it.",
336
+ "- Avoid over-engineering. Only make changes that are directly requested or clearly necessary. Keep solutions simple and focused.",
337
+ " - Don't add features, refactor code, or make improvements beyond what was asked. Don't add docstrings, comments, or type annotations to code you didn't change. Only add comments where the logic isn't self-evident.",
338
+ " - Don't add error handling, fallbacks, or validation for scenarios that can't happen. Validate only at system boundaries (user input, external APIs).",
339
+ " - Don't create helpers, utilities, or abstractions for one-time operations. Avoid feature flags or backwards-compatibility shims when a direct change is sufficient. If something is unused, delete it completely.",
336
340
  "- Verify the solution if possible with tests. NEVER assume specific test framework or test script. Check the README or search codebase to determine the testing approach.",
337
341
  f"- VERY IMPORTANT: When you have completed a task, you MUST run the lint and typecheck commands (eg. npm run lint, npm run typecheck, ruff, etc.) with {shell_tool_name} if they were provided to you to ensure your code is correct. If you are unable to find the correct command, ask the user for the command to run and if they supply it, proactively suggest writing it to AGENTS.md so that you will know to run it next time.",
338
342
  "NEVER commit changes unless the user explicitly asks you to. It is VERY IMPORTANT to only commit when explicitly asked, otherwise the user will feel that you are being too proactive.",
339
- "",
340
343
  "- Tool results and user messages may include <system-reminder> tags. <system-reminder> tags contain useful information and reminders. They are NOT part of the user's provided input or the tool result.",
344
+ "- The conversation has unlimited context through automatic summarization. Complete tasks fully; do not stop mid-task or claim context limits.",
341
345
  ]
342
346
  )
343
347
  doing_tasks_section = "\n".join(doing_tasks_lines)
@@ -345,7 +349,8 @@ def build_system_prompt(
345
349
  tool_usage_lines = [
346
350
  "# Tool usage policy",
347
351
  '- You have the capability to call multiple tools in a single response. When multiple independent pieces of information are requested, batch your tool calls together for optimal performance. When making multiple bash tool calls, you MUST send a single message with multiple tools calls to run the calls in parallel. For example, if you need to run "git status" and "git diff", send a single message with two tool calls to run the calls in parallel.',
348
- "",
352
+ "- If the user asks to run tools in parallel and there are no dependencies, include multiple tool calls in a single message; sequence dependent calls instead of guessing values.",
353
+ f"- Use specialized tools instead of bash when possible: use {view_tool_name} for reading files, {file_edit_tool_name} for editing, and {file_write_tool_name} for creating files. Do not use bash echo or other command-line tools to communicate with the user; reply in text.",
349
354
  "You MUST answer concisely with fewer than 4 lines of text (not including tool use or code generation), unless user asks for detail.",
350
355
  ]
351
356
  if task_available:
@@ -353,7 +358,7 @@ def build_system_prompt(
353
358
  1,
354
359
  "- Use the Task tool with configured subagents when the task matches an agent's description. Always set subagent_type.",
355
360
  )
356
- if "ToolSearch" in tool_names:
361
+ if TOOL_SEARCH_TOOL_NAME in tool_names:
357
362
  tool_usage_lines.insert(
358
363
  1,
359
364
  "- Use the ToolSearch tool to discover and activate deferred or MCP tools. Keep searches focused and load only 3-5 relevant tools.",
@@ -402,6 +407,7 @@ def build_system_prompt(
402
407
  sections: List[str] = [
403
408
  main_prompt,
404
409
  task_management_section,
410
+ ask_questions_section,
405
411
  hooks_section,
406
412
  doing_tasks_section,
407
413
  tool_usage_section,
@@ -409,7 +415,7 @@ def build_system_prompt(
409
415
  build_environment_prompt(),
410
416
  DEFENSIVE_SECURITY_GUIDELINE,
411
417
  always_use_todo,
412
- build_commit_workflow_prompt(shell_tool_name, todo_tool_name, "Task"),
418
+ build_commit_workflow_prompt(shell_tool_name, todo_tool_name, TASK_TOOL_NAME),
413
419
  code_references,
414
420
  ]
415
421
 
ripperdoc/core/tool.py CHANGED
@@ -8,6 +8,7 @@ import json
8
8
  from abc import ABC, abstractmethod
9
9
  from typing import Any, AsyncGenerator, Dict, List, Optional, TypeVar, Generic, Union
10
10
  from pydantic import BaseModel, ConfigDict, Field
11
+ from ripperdoc.utils.file_watch import FileSnapshot
11
12
  from ripperdoc.utils.log import get_logger
12
13
 
13
14
 
@@ -39,9 +40,17 @@ class ToolUseContext(BaseModel):
39
40
  safe_mode: bool = False
40
41
  verbose: bool = False
41
42
  permission_checker: Optional[Any] = None
42
- read_file_timestamps: Dict[str, float] = {}
43
+ read_file_timestamps: Dict[str, float] = Field(default_factory=dict)
44
+ file_state_cache: Dict[str, "FileSnapshot"] = Field(default_factory=dict)
43
45
  tool_registry: Optional[Any] = None
44
46
  abort_signal: Optional[Any] = None
47
+ # UI control callbacks for tools that need user interaction
48
+ pause_ui: Optional[Any] = Field(default=None, description="Callback to pause UI spinner")
49
+ resume_ui: Optional[Any] = Field(default=None, description="Callback to resume UI spinner")
50
+ # Plan mode control callback
51
+ on_exit_plan_mode: Optional[Any] = Field(
52
+ default=None, description="Callback invoked when exiting plan mode"
53
+ )
45
54
  model_config = ConfigDict(arbitrary_types_allowed=True)
46
55
 
47
56
 
@@ -202,7 +211,7 @@ async def build_tool_description(
202
211
  except Exception:
203
212
  logger.exception(
204
213
  "[tool] Failed to build input example section",
205
- extra={"tool": getattr(tool, 'name', None)},
214
+ extra={"tool": getattr(tool, "name", None)},
206
215
  )
207
216
  return description_text
208
217
 
@@ -221,7 +230,7 @@ def tool_input_examples(tool: Tool[Any, Any], limit: int = 5) -> List[Dict[str,
221
230
  except Exception:
222
231
  logger.exception(
223
232
  "[tool] Failed to format tool input example",
224
- extra={"tool": getattr(tool, 'name', None)},
233
+ extra={"tool": getattr(tool, "name", None)},
225
234
  )
226
235
  continue
227
236
  return results
ripperdoc/sdk/client.py CHANGED
@@ -19,11 +19,13 @@ from typing import (
19
19
  List,
20
20
  Optional,
21
21
  Sequence,
22
+ Tuple,
22
23
  Union,
23
24
  )
24
25
 
25
26
  from ripperdoc.core.default_tools import get_default_tools
26
27
  from ripperdoc.core.query import QueryContext, query as _core_query
28
+ from ripperdoc.core.permissions import PermissionResult
27
29
  from ripperdoc.core.system_prompt import build_system_prompt
28
30
  from ripperdoc.core.tool import Tool
29
31
  from ripperdoc.tools.task_tool import TaskTool
@@ -42,7 +44,16 @@ from ripperdoc.utils.mcp import (
42
44
  )
43
45
 
44
46
  MessageType = Union[UserMessage, AssistantMessage, ProgressMessage]
45
- PermissionChecker = Callable[[Tool[Any, Any], Any], Union[Awaitable[Any], Any]]
47
+ PermissionChecker = Callable[
48
+ [Tool[Any, Any], Any],
49
+ Union[
50
+ PermissionResult,
51
+ Dict[str, Any],
52
+ Tuple[bool, Optional[str]],
53
+ bool,
54
+ Awaitable[Union[PermissionResult, Dict[str, Any], Tuple[bool, Optional[str]], bool]],
55
+ ],
56
+ ]
46
57
  QueryRunner = Callable[
47
58
  [
48
59
  List[MessageType],