ripperdoc 0.2.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (107) hide show
  1. ripperdoc/__init__.py +3 -0
  2. ripperdoc/__main__.py +20 -0
  3. ripperdoc/cli/__init__.py +1 -0
  4. ripperdoc/cli/cli.py +405 -0
  5. ripperdoc/cli/commands/__init__.py +82 -0
  6. ripperdoc/cli/commands/agents_cmd.py +263 -0
  7. ripperdoc/cli/commands/base.py +19 -0
  8. ripperdoc/cli/commands/clear_cmd.py +18 -0
  9. ripperdoc/cli/commands/compact_cmd.py +23 -0
  10. ripperdoc/cli/commands/config_cmd.py +31 -0
  11. ripperdoc/cli/commands/context_cmd.py +144 -0
  12. ripperdoc/cli/commands/cost_cmd.py +82 -0
  13. ripperdoc/cli/commands/doctor_cmd.py +221 -0
  14. ripperdoc/cli/commands/exit_cmd.py +19 -0
  15. ripperdoc/cli/commands/help_cmd.py +20 -0
  16. ripperdoc/cli/commands/mcp_cmd.py +70 -0
  17. ripperdoc/cli/commands/memory_cmd.py +202 -0
  18. ripperdoc/cli/commands/models_cmd.py +413 -0
  19. ripperdoc/cli/commands/permissions_cmd.py +302 -0
  20. ripperdoc/cli/commands/resume_cmd.py +98 -0
  21. ripperdoc/cli/commands/status_cmd.py +167 -0
  22. ripperdoc/cli/commands/tasks_cmd.py +278 -0
  23. ripperdoc/cli/commands/todos_cmd.py +69 -0
  24. ripperdoc/cli/commands/tools_cmd.py +19 -0
  25. ripperdoc/cli/ui/__init__.py +1 -0
  26. ripperdoc/cli/ui/context_display.py +298 -0
  27. ripperdoc/cli/ui/helpers.py +22 -0
  28. ripperdoc/cli/ui/rich_ui.py +1557 -0
  29. ripperdoc/cli/ui/spinner.py +49 -0
  30. ripperdoc/cli/ui/thinking_spinner.py +128 -0
  31. ripperdoc/cli/ui/tool_renderers.py +298 -0
  32. ripperdoc/core/__init__.py +1 -0
  33. ripperdoc/core/agents.py +486 -0
  34. ripperdoc/core/commands.py +33 -0
  35. ripperdoc/core/config.py +559 -0
  36. ripperdoc/core/default_tools.py +88 -0
  37. ripperdoc/core/permissions.py +252 -0
  38. ripperdoc/core/providers/__init__.py +47 -0
  39. ripperdoc/core/providers/anthropic.py +250 -0
  40. ripperdoc/core/providers/base.py +265 -0
  41. ripperdoc/core/providers/gemini.py +615 -0
  42. ripperdoc/core/providers/openai.py +487 -0
  43. ripperdoc/core/query.py +1058 -0
  44. ripperdoc/core/query_utils.py +622 -0
  45. ripperdoc/core/skills.py +295 -0
  46. ripperdoc/core/system_prompt.py +431 -0
  47. ripperdoc/core/tool.py +240 -0
  48. ripperdoc/sdk/__init__.py +9 -0
  49. ripperdoc/sdk/client.py +333 -0
  50. ripperdoc/tools/__init__.py +1 -0
  51. ripperdoc/tools/ask_user_question_tool.py +431 -0
  52. ripperdoc/tools/background_shell.py +389 -0
  53. ripperdoc/tools/bash_output_tool.py +98 -0
  54. ripperdoc/tools/bash_tool.py +1016 -0
  55. ripperdoc/tools/dynamic_mcp_tool.py +428 -0
  56. ripperdoc/tools/enter_plan_mode_tool.py +226 -0
  57. ripperdoc/tools/exit_plan_mode_tool.py +153 -0
  58. ripperdoc/tools/file_edit_tool.py +346 -0
  59. ripperdoc/tools/file_read_tool.py +203 -0
  60. ripperdoc/tools/file_write_tool.py +205 -0
  61. ripperdoc/tools/glob_tool.py +179 -0
  62. ripperdoc/tools/grep_tool.py +370 -0
  63. ripperdoc/tools/kill_bash_tool.py +136 -0
  64. ripperdoc/tools/ls_tool.py +471 -0
  65. ripperdoc/tools/mcp_tools.py +591 -0
  66. ripperdoc/tools/multi_edit_tool.py +456 -0
  67. ripperdoc/tools/notebook_edit_tool.py +386 -0
  68. ripperdoc/tools/skill_tool.py +205 -0
  69. ripperdoc/tools/task_tool.py +379 -0
  70. ripperdoc/tools/todo_tool.py +494 -0
  71. ripperdoc/tools/tool_search_tool.py +380 -0
  72. ripperdoc/utils/__init__.py +1 -0
  73. ripperdoc/utils/bash_constants.py +51 -0
  74. ripperdoc/utils/bash_output_utils.py +43 -0
  75. ripperdoc/utils/coerce.py +34 -0
  76. ripperdoc/utils/context_length_errors.py +252 -0
  77. ripperdoc/utils/exit_code_handlers.py +241 -0
  78. ripperdoc/utils/file_watch.py +135 -0
  79. ripperdoc/utils/git_utils.py +274 -0
  80. ripperdoc/utils/json_utils.py +27 -0
  81. ripperdoc/utils/log.py +176 -0
  82. ripperdoc/utils/mcp.py +560 -0
  83. ripperdoc/utils/memory.py +253 -0
  84. ripperdoc/utils/message_compaction.py +676 -0
  85. ripperdoc/utils/messages.py +519 -0
  86. ripperdoc/utils/output_utils.py +258 -0
  87. ripperdoc/utils/path_ignore.py +677 -0
  88. ripperdoc/utils/path_utils.py +46 -0
  89. ripperdoc/utils/permissions/__init__.py +27 -0
  90. ripperdoc/utils/permissions/path_validation_utils.py +174 -0
  91. ripperdoc/utils/permissions/shell_command_validation.py +552 -0
  92. ripperdoc/utils/permissions/tool_permission_utils.py +279 -0
  93. ripperdoc/utils/prompt.py +17 -0
  94. ripperdoc/utils/safe_get_cwd.py +31 -0
  95. ripperdoc/utils/sandbox_utils.py +38 -0
  96. ripperdoc/utils/session_history.py +260 -0
  97. ripperdoc/utils/session_usage.py +117 -0
  98. ripperdoc/utils/shell_token_utils.py +95 -0
  99. ripperdoc/utils/shell_utils.py +159 -0
  100. ripperdoc/utils/todo.py +203 -0
  101. ripperdoc/utils/token_estimation.py +34 -0
  102. ripperdoc-0.2.6.dist-info/METADATA +193 -0
  103. ripperdoc-0.2.6.dist-info/RECORD +107 -0
  104. ripperdoc-0.2.6.dist-info/WHEEL +5 -0
  105. ripperdoc-0.2.6.dist-info/entry_points.txt +3 -0
  106. ripperdoc-0.2.6.dist-info/licenses/LICENSE +53 -0
  107. ripperdoc-0.2.6.dist-info/top_level.txt +1 -0
@@ -0,0 +1,252 @@
1
+ """Detection helpers for context-window overflow errors across providers.
2
+
3
+ Observed provider responses when the request is too large:
4
+ - OpenAI/OpenRouter style (400 BadRequestError): error.code/context_length_exceeded with
5
+ a message like "This model's maximum context length is 128000 tokens. However, you
6
+ requested 130000 tokens (... in the messages, ... in the completion)."
7
+ - Anthropic (400 BadRequestError): invalid_request_error with a message such as
8
+ "prompt is too long for model claude-3-5-sonnet. max tokens: 200000 prompt tokens: 240000".
9
+ - Gemini / google-genai (FAILED_PRECONDITION or INVALID_ARGUMENT): APIError message like
10
+ "The input to the model was too long. The requested input has X tokens, which exceeds
11
+ the maximum of Y tokens for models/gemini-...".
12
+
13
+ These helpers allow callers to detect the condition and trigger auto-compaction.
14
+ """
15
+
16
+ from __future__ import annotations
17
+
18
+ from dataclasses import dataclass
19
+ from typing import Any, List, Optional, Set
20
+
21
+ ContextLengthErrorCode = Optional[str]
22
+
23
+
24
+ @dataclass
25
+ class ContextLengthErrorInfo:
26
+ """Normalized metadata about a context-length error."""
27
+
28
+ provider: Optional[str]
29
+ message: str
30
+ error_code: ContextLengthErrorCode = None
31
+ status_code: Optional[int] = None
32
+
33
+
34
+ _CONTEXT_PATTERNS = [
35
+ "context_length_exceeded",
36
+ "maximum context length",
37
+ "max context length",
38
+ "maximum context window",
39
+ "max context window",
40
+ "context length is",
41
+ "context length was exceeded",
42
+ "context window of",
43
+ "token limit exceeded",
44
+ "token length exceeded",
45
+ "prompt is too long",
46
+ "input is too long",
47
+ "request is too large",
48
+ "exceeds the maximum context",
49
+ "exceeds the model's context",
50
+ "requested input has",
51
+ "too many tokens",
52
+ "reduce the length of the messages",
53
+ ]
54
+
55
+
56
+ def detect_context_length_error(error: Any) -> Optional[ContextLengthErrorInfo]:
57
+ """Return normalized context-length error info if the exception matches."""
58
+ if error is None:
59
+ return None
60
+
61
+ provider = _guess_provider(error)
62
+ status_code = _extract_status_code(error)
63
+ codes = _extract_codes(error)
64
+ messages = _collect_strings(error)
65
+
66
+ # Check explicit error codes first.
67
+ for code in codes:
68
+ normalized = code.lower()
69
+ if any(
70
+ keyword in normalized
71
+ for keyword in (
72
+ "context_length",
73
+ "max_tokens",
74
+ "token_length",
75
+ "prompt_too_long",
76
+ "input_too_large",
77
+ "token_limit",
78
+ )
79
+ ):
80
+ message = messages[0] if messages else code
81
+ return ContextLengthErrorInfo(
82
+ provider=provider,
83
+ message=message,
84
+ error_code=code,
85
+ status_code=status_code,
86
+ )
87
+
88
+ # Fall back to message-based detection.
89
+ for text in messages:
90
+ if _looks_like_context_length_message(text):
91
+ return ContextLengthErrorInfo(
92
+ provider=provider,
93
+ message=text,
94
+ error_code=codes[0] if codes else None,
95
+ status_code=status_code,
96
+ )
97
+
98
+ return None
99
+
100
+
101
+ def _looks_like_context_length_message(text: str) -> bool:
102
+ lower = text.lower()
103
+ if any(pattern in lower for pattern in _CONTEXT_PATTERNS):
104
+ return True
105
+ if "too long" in lower and (
106
+ "prompt" in lower or "input" in lower or "context" in lower or "token" in lower
107
+ ):
108
+ return True
109
+ if "exceed" in lower and ("token" in lower or "context" in lower):
110
+ return True
111
+ if "max" in lower and "token" in lower and ("context" in lower or "limit" in lower):
112
+ return True
113
+ return False
114
+
115
+
116
+ def _guess_provider(error: Any) -> Optional[str]:
117
+ module = getattr(getattr(error, "__class__", None), "__module__", "") or ""
118
+ name = getattr(getattr(error, "__class__", None), "__name__", "").lower()
119
+ if "openai" in module or "openai" in name:
120
+ return "openai"
121
+ if "anthropic" in module or "claude" in module:
122
+ return "anthropic"
123
+ if "google.genai" in module or "vertexai" in module:
124
+ return "gemini"
125
+ return None
126
+
127
+
128
+ def _extract_status_code(error: Any) -> Optional[int]:
129
+ for attr in ("status_code", "http_status", "code"):
130
+ value = getattr(error, attr, None)
131
+ if isinstance(value, int):
132
+ return value
133
+ if isinstance(value, str) and value.isdigit():
134
+ return int(value)
135
+
136
+ for payload in (
137
+ _safe_getattr(error, "body"),
138
+ _safe_getattr(error, "details"),
139
+ _safe_getattr(error, "error"),
140
+ ):
141
+ if isinstance(payload, dict):
142
+ for key in ("status_code", "code"):
143
+ value = payload.get(key)
144
+ if isinstance(value, int):
145
+ return value
146
+ if isinstance(value, str) and value.isdigit():
147
+ return int(value)
148
+
149
+ return None
150
+
151
+
152
+ def _extract_codes(error: Any) -> List[str]:
153
+ codes: List[str] = []
154
+ seen: Set[str] = set()
155
+
156
+ def _add(value: Any) -> None:
157
+ if value is None:
158
+ return
159
+ if isinstance(value, int):
160
+ value = str(value)
161
+ if not isinstance(value, str):
162
+ return
163
+ normalized = value.strip()
164
+ if not normalized or normalized in seen:
165
+ return
166
+ seen.add(normalized)
167
+ codes.append(normalized)
168
+
169
+ for attr in ("code", "error_code", "type", "status"):
170
+ _add(_safe_getattr(error, attr))
171
+
172
+ for payload in (
173
+ _safe_getattr(error, "body"),
174
+ _safe_getattr(error, "details"),
175
+ _safe_getattr(error, "error"),
176
+ ):
177
+ if isinstance(payload, dict):
178
+ for key in ("code", "type", "status"):
179
+ _add(payload.get(key))
180
+ nested = payload.get("error")
181
+ if isinstance(nested, dict):
182
+ for key in ("code", "type", "status"):
183
+ _add(nested.get(key))
184
+
185
+ if isinstance(error, dict):
186
+ for key in ("code", "type", "status"):
187
+ _add(error.get(key))
188
+
189
+ return codes
190
+
191
+
192
+ def _collect_strings(error: Any) -> List[str]:
193
+ """Collect human-readable strings from an exception/payload."""
194
+ texts: List[str] = []
195
+ seen_texts: Set[str] = set()
196
+ seen_objs: Set[int] = set()
197
+
198
+ def _add_text(value: Any) -> None:
199
+ if not isinstance(value, str):
200
+ return
201
+ normalized = value.strip()
202
+ if not normalized or normalized in seen_texts:
203
+ return
204
+ seen_texts.add(normalized)
205
+ texts.append(normalized)
206
+
207
+ def _walk(obj: Any) -> None:
208
+ if obj is None:
209
+ return
210
+ obj_id = id(obj)
211
+ if obj_id in seen_objs:
212
+ return
213
+ seen_objs.add(obj_id)
214
+
215
+ if isinstance(obj, str):
216
+ _add_text(obj)
217
+ return
218
+
219
+ if isinstance(obj, BaseException):
220
+ _add_text(_safe_getattr(obj, "message"))
221
+ for arg in getattr(obj, "args", ()):
222
+ _walk(arg)
223
+ for attr in ("body", "error", "details"):
224
+ _walk(_safe_getattr(obj, attr))
225
+ return
226
+
227
+ if isinstance(obj, dict):
228
+ for val in obj.values():
229
+ _walk(val)
230
+ return
231
+
232
+ if isinstance(obj, (list, tuple, set)):
233
+ for item in obj:
234
+ _walk(item)
235
+ return
236
+
237
+ _add_text(_safe_getattr(obj, "message"))
238
+
239
+ _walk(error)
240
+ try:
241
+ _add_text(str(error))
242
+ except (TypeError, ValueError):
243
+ pass
244
+
245
+ return texts
246
+
247
+
248
+ def _safe_getattr(obj: Any, attr: str) -> Any:
249
+ try:
250
+ return getattr(obj, attr, None)
251
+ except (TypeError, AttributeError):
252
+ return None
@@ -0,0 +1,241 @@
1
+ """Smart exit code handlers for common shell commands and related helpers.
2
+
3
+ Provides intelligent interpretation of exit codes for commands like grep, diff, test, etc.
4
+ where non-zero exit codes don't necessarily indicate errors. Also includes small utilities
5
+ shared by bash tooling such as command classification, preview sizing, and lightweight
6
+ command/result schemas.
7
+ """
8
+
9
+ import shlex
10
+ from typing import Callable, Optional
11
+ from pydantic import BaseModel, Field
12
+ from dataclasses import dataclass
13
+
14
+
15
+ @dataclass
16
+ class ExitCodeResult:
17
+ """Result of exit code interpretation."""
18
+
19
+ is_error: bool
20
+ message: Optional[str] = None
21
+ semantic_meaning: Optional[str] = None
22
+
23
+
24
+ ExitCodeHandler = Callable[[int, str, str], ExitCodeResult]
25
+
26
+
27
+ # Default/max timeouts exposed for bash tooling (keep aligned with BashTool).
28
+ DEFAULT_BASH_TIMEOUT_MS = 120000
29
+ MAX_BASH_TIMEOUT_MS = 600000
30
+
31
+ # Commands we intentionally ignore in certain contexts (e.g., background-safety checks).
32
+ IGNORED_COMMANDS: tuple[str, ...] = ("sleep",)
33
+
34
+ # Preview limits for rendering long commands compactly.
35
+ MAX_PREVIEW_LINES = 2
36
+ MAX_PREVIEW_CHARS = 160
37
+
38
+ # Heuristic command classification list (mirrors the reference set).
39
+ COMMON_COMMANDS: tuple[str, ...] = (
40
+ "npm",
41
+ "yarn",
42
+ "pnpm",
43
+ "node",
44
+ "python",
45
+ "python3",
46
+ "go",
47
+ "cargo",
48
+ "make",
49
+ "docker",
50
+ "terraform",
51
+ "webpack",
52
+ "vite",
53
+ "jest",
54
+ "pytest",
55
+ "curl",
56
+ "wget",
57
+ "build",
58
+ "test",
59
+ "serve",
60
+ "watch",
61
+ "dev",
62
+ )
63
+
64
+
65
+ class BashCommandSchema(BaseModel):
66
+ """Schema describing a bash command request."""
67
+
68
+ command: str = Field(description="The command to execute")
69
+ timeout: Optional[int] = Field(
70
+ default=None, description=f"Optional timeout in milliseconds (max {MAX_BASH_TIMEOUT_MS})"
71
+ )
72
+ description: Optional[str] = Field(
73
+ default=None,
74
+ description="Clear, concise description of what this command does in 5-10 words.",
75
+ )
76
+ run_in_background: bool = Field(
77
+ default=False, description="Set to true to run this command in the background."
78
+ )
79
+
80
+
81
+ class ExtendedBashCommandSchema(BashCommandSchema):
82
+ """Schema describing an extended bash command request."""
83
+
84
+ sandbox: Optional[bool] = Field(
85
+ default=None, description="Whether to request sandboxed execution (read-only)."
86
+ )
87
+ shell_executable: Optional[str] = Field(
88
+ default=None, description="Optional shell path to use instead of the default shell."
89
+ )
90
+
91
+
92
+ class CommandResultSchema(BaseModel):
93
+ """Schema describing the shape of a command result."""
94
+
95
+ stdout: str = Field(description="The standard output of the command")
96
+ stderr: str = Field(description="The standard error output of the command")
97
+ summary: Optional[str] = Field(default=None, description="Summarized output when available")
98
+ interrupted: bool = Field(default=False, description="Whether the command was interrupted")
99
+ is_image: Optional[bool] = Field(
100
+ default=None, description="Flag to indicate if stdout contains image data"
101
+ )
102
+ background_task_id: Optional[str] = Field(
103
+ default=None, description="ID of the background task if command is running in background"
104
+ )
105
+ sandbox: Optional[bool] = Field(
106
+ default=None, description="Flag to indicate if the command was run in sandbox mode"
107
+ )
108
+ return_code_interpretation: Optional[str] = Field(
109
+ default=None,
110
+ description="Semantic interpretation for non-error exit codes with special meaning",
111
+ )
112
+
113
+
114
+ def default_handler(exit_code: int, stdout: str, stderr: str) -> ExitCodeResult:
115
+ """Default exit code handler - non-zero is error."""
116
+ return ExitCodeResult(
117
+ is_error=exit_code != 0,
118
+ message=f"Command failed with exit code {exit_code}" if exit_code != 0 else None,
119
+ )
120
+
121
+
122
+ def grep_handler(exit_code: int, stdout: str, stderr: str) -> ExitCodeResult:
123
+ """Handle grep/rg exit codes: 0=found, 1=not found, 2+=error."""
124
+ if exit_code == 0:
125
+ return ExitCodeResult(is_error=False)
126
+ elif exit_code == 1:
127
+ return ExitCodeResult(is_error=False, semantic_meaning="No matches found")
128
+ else:
129
+ return ExitCodeResult(is_error=True, message=f"grep failed with exit code {exit_code}")
130
+
131
+
132
+ def diff_handler(exit_code: int, stdout: str, stderr: str) -> ExitCodeResult:
133
+ """Handle diff exit codes: 0=same, 1=different, 2+=error."""
134
+ if exit_code == 0:
135
+ return ExitCodeResult(is_error=False, semantic_meaning="Files are identical")
136
+ elif exit_code == 1:
137
+ return ExitCodeResult(is_error=False, semantic_meaning="Files differ")
138
+ else:
139
+ return ExitCodeResult(is_error=True, message=f"diff failed with exit code {exit_code}")
140
+
141
+
142
+ def test_handler(exit_code: int, stdout: str, stderr: str) -> ExitCodeResult:
143
+ """Handle test/[ exit codes: 0=true, 1=false, 2+=error."""
144
+ if exit_code == 0:
145
+ return ExitCodeResult(is_error=False, semantic_meaning="Condition is true")
146
+ elif exit_code == 1:
147
+ return ExitCodeResult(is_error=False, semantic_meaning="Condition is false")
148
+ else:
149
+ return ExitCodeResult(
150
+ is_error=True, message=f"test command failed with exit code {exit_code}"
151
+ )
152
+
153
+
154
+ def find_handler(exit_code: int, stdout: str, stderr: str) -> ExitCodeResult:
155
+ """Handle find exit codes: 0=ok, 1=partial, 2+=error."""
156
+ if exit_code == 0:
157
+ return ExitCodeResult(is_error=False)
158
+ elif exit_code == 1:
159
+ return ExitCodeResult(is_error=False, semantic_meaning="Some directories were inaccessible")
160
+ else:
161
+ return ExitCodeResult(is_error=True, message=f"find failed with exit code {exit_code}")
162
+
163
+
164
+ # Command-specific handlers
165
+ COMMAND_HANDLERS: dict[str, ExitCodeHandler] = {
166
+ "grep": grep_handler,
167
+ "rg": grep_handler,
168
+ "ripgrep": grep_handler,
169
+ "diff": diff_handler,
170
+ "test": test_handler,
171
+ "[": test_handler,
172
+ "find": find_handler,
173
+ }
174
+
175
+
176
+ def normalize_command(command: str) -> str:
177
+ """Extract the base command from a command string.
178
+
179
+ Handles pipes, command chains, and extracts the final command.
180
+ Examples:
181
+ 'git status' -> 'git'
182
+ 'cat file | grep pattern' -> 'grep'
183
+ 'ls -la' -> 'ls'
184
+ """
185
+ # Get the last command in a pipe chain
186
+ if "|" in command:
187
+ command = command.split("|")[-1].strip()
188
+
189
+ # Get the first word (the actual command)
190
+ command = command.strip().split()[0] if command.strip() else ""
191
+
192
+ return command
193
+
194
+
195
+ def classify_command(command: str) -> str:
196
+ """Classify a shell command into a known category or 'other'."""
197
+ try:
198
+ tokens = shlex.split(command)
199
+ except ValueError:
200
+ tokens = command.split()
201
+
202
+ if not tokens:
203
+ return "other"
204
+
205
+ for token in tokens:
206
+ cleaned = token.strip()
207
+ if not cleaned or cleaned in {"&&", "||", ";", "|"}:
208
+ continue
209
+
210
+ first_word = cleaned.split()[0].lower()
211
+ if first_word in COMMON_COMMANDS:
212
+ return first_word
213
+
214
+ return "other"
215
+
216
+
217
+ def get_exit_code_handler(command: str) -> ExitCodeHandler:
218
+ """Get the appropriate exit code handler for a command."""
219
+ normalized = normalize_command(command)
220
+ return COMMAND_HANDLERS.get(normalized, default_handler)
221
+
222
+
223
+ def interpret_exit_code(command: str, exit_code: int, stdout: str, stderr: str) -> ExitCodeResult:
224
+ """Interpret an exit code in the context of the command.
225
+
226
+ Args:
227
+ command: The shell command that was executed
228
+ exit_code: The exit code returned
229
+ stdout: Standard output from the command
230
+ stderr: Standard error from the command
231
+
232
+ Returns:
233
+ ExitCodeResult with interpretation
234
+ """
235
+ handler = get_exit_code_handler(command)
236
+ return handler(exit_code, stdout, stderr)
237
+
238
+
239
+ def create_exit_result(command: str, exit_code: int, stdout: str, stderr: str) -> ExitCodeResult:
240
+ """Convenience wrapper to mirror reference API naming."""
241
+ return interpret_exit_code(command, exit_code, stdout, stderr)
@@ -0,0 +1,135 @@
1
+ """Lightweight file-change tracking for notifying the model about user edits."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import difflib
6
+ import os
7
+ from dataclasses import dataclass
8
+ from typing import Dict, List, Optional
9
+
10
+ from ripperdoc.utils.log import get_logger
11
+
12
+ logger = get_logger()
13
+
14
+
15
+ @dataclass
16
+ class FileSnapshot:
17
+ """Snapshot of a file read by the agent."""
18
+
19
+ content: str
20
+ timestamp: float
21
+ offset: int = 0
22
+ limit: Optional[int] = None
23
+
24
+
25
+ @dataclass
26
+ class ChangedFileNotice:
27
+ """Information about a file that changed after it was read."""
28
+
29
+ file_path: str
30
+ summary: str
31
+
32
+
33
+ def record_snapshot(
34
+ file_path: str,
35
+ content: str,
36
+ cache: Dict[str, FileSnapshot],
37
+ *,
38
+ offset: int = 0,
39
+ limit: Optional[int] = None,
40
+ ) -> None:
41
+ """Store the current contents and mtime for a file."""
42
+ try:
43
+ timestamp = os.path.getmtime(file_path)
44
+ except OSError:
45
+ timestamp = 0.0
46
+ cache[file_path] = FileSnapshot(
47
+ content=content, timestamp=timestamp, offset=offset, limit=limit
48
+ )
49
+
50
+
51
+ def _read_portion(file_path: str, offset: int, limit: Optional[int]) -> str:
52
+ with open(file_path, "r", encoding="utf-8", errors="replace") as handle:
53
+ lines = handle.readlines()
54
+ start = max(offset, 0)
55
+ if limit is None:
56
+ selected = lines[start:]
57
+ else:
58
+ selected = lines[start : start + limit]
59
+ return "".join(selected)
60
+
61
+
62
+ def _build_diff_summary(old_content: str, new_content: str, file_path: str, max_lines: int) -> str:
63
+ diff = list(
64
+ difflib.unified_diff(
65
+ old_content.splitlines(),
66
+ new_content.splitlines(),
67
+ fromfile=file_path,
68
+ tofile=file_path,
69
+ lineterm="",
70
+ )
71
+ )
72
+ if not diff:
73
+ return "File was modified but contents appear unchanged."
74
+
75
+ # Keep the diff short to avoid flooding the model.
76
+ if len(diff) > max_lines:
77
+ diff = diff[:max_lines] + ["... (diff truncated)"]
78
+ return "\n".join(diff)
79
+
80
+
81
+ def detect_changed_files(
82
+ cache: Dict[str, FileSnapshot], *, max_diff_lines: int = 80
83
+ ) -> List[ChangedFileNotice]:
84
+ """Return notices for files whose mtime increased since they were read."""
85
+ notices: List[ChangedFileNotice] = []
86
+
87
+ # Iterate over a static list so we can mutate cache safely.
88
+ for file_path, snapshot in list(cache.items()):
89
+ try:
90
+ current_mtime = os.path.getmtime(file_path)
91
+ except OSError:
92
+ notices.append(
93
+ ChangedFileNotice(
94
+ file_path=file_path, summary="File was deleted or is no longer accessible."
95
+ )
96
+ )
97
+ cache.pop(file_path, None)
98
+ continue
99
+
100
+ if current_mtime <= snapshot.timestamp:
101
+ continue
102
+
103
+ try:
104
+ new_content = _read_portion(file_path, snapshot.offset, snapshot.limit)
105
+ except (OSError, IOError, UnicodeDecodeError, ValueError) as exc: # pragma: no cover - best-effort telemetry
106
+ logger.warning(
107
+ "[file_watch] Failed reading changed file: %s: %s",
108
+ type(exc).__name__, exc,
109
+ extra={"file_path": file_path},
110
+ )
111
+ notices.append(
112
+ ChangedFileNotice(
113
+ file_path=file_path,
114
+ summary=f"File changed but could not be read: {exc}",
115
+ )
116
+ )
117
+ # Avoid spamming repeated errors by updating timestamp.
118
+ snapshot.timestamp = current_mtime
119
+ cache[file_path] = snapshot
120
+ continue
121
+
122
+ diff_summary = _build_diff_summary(
123
+ snapshot.content, new_content, file_path, max_lines=max_diff_lines
124
+ )
125
+ notices.append(ChangedFileNotice(file_path=file_path, summary=diff_summary))
126
+ # Update snapshot so we only notify on subsequent changes.
127
+ record_snapshot(
128
+ file_path,
129
+ new_content,
130
+ cache,
131
+ offset=snapshot.offset,
132
+ limit=snapshot.limit,
133
+ )
134
+
135
+ return notices