ripperdoc 0.2.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ripperdoc/__init__.py +3 -0
- ripperdoc/__main__.py +20 -0
- ripperdoc/cli/__init__.py +1 -0
- ripperdoc/cli/cli.py +405 -0
- ripperdoc/cli/commands/__init__.py +82 -0
- ripperdoc/cli/commands/agents_cmd.py +263 -0
- ripperdoc/cli/commands/base.py +19 -0
- ripperdoc/cli/commands/clear_cmd.py +18 -0
- ripperdoc/cli/commands/compact_cmd.py +23 -0
- ripperdoc/cli/commands/config_cmd.py +31 -0
- ripperdoc/cli/commands/context_cmd.py +144 -0
- ripperdoc/cli/commands/cost_cmd.py +82 -0
- ripperdoc/cli/commands/doctor_cmd.py +221 -0
- ripperdoc/cli/commands/exit_cmd.py +19 -0
- ripperdoc/cli/commands/help_cmd.py +20 -0
- ripperdoc/cli/commands/mcp_cmd.py +70 -0
- ripperdoc/cli/commands/memory_cmd.py +202 -0
- ripperdoc/cli/commands/models_cmd.py +413 -0
- ripperdoc/cli/commands/permissions_cmd.py +302 -0
- ripperdoc/cli/commands/resume_cmd.py +98 -0
- ripperdoc/cli/commands/status_cmd.py +167 -0
- ripperdoc/cli/commands/tasks_cmd.py +278 -0
- ripperdoc/cli/commands/todos_cmd.py +69 -0
- ripperdoc/cli/commands/tools_cmd.py +19 -0
- ripperdoc/cli/ui/__init__.py +1 -0
- ripperdoc/cli/ui/context_display.py +298 -0
- ripperdoc/cli/ui/helpers.py +22 -0
- ripperdoc/cli/ui/rich_ui.py +1557 -0
- ripperdoc/cli/ui/spinner.py +49 -0
- ripperdoc/cli/ui/thinking_spinner.py +128 -0
- ripperdoc/cli/ui/tool_renderers.py +298 -0
- ripperdoc/core/__init__.py +1 -0
- ripperdoc/core/agents.py +486 -0
- ripperdoc/core/commands.py +33 -0
- ripperdoc/core/config.py +559 -0
- ripperdoc/core/default_tools.py +88 -0
- ripperdoc/core/permissions.py +252 -0
- ripperdoc/core/providers/__init__.py +47 -0
- ripperdoc/core/providers/anthropic.py +250 -0
- ripperdoc/core/providers/base.py +265 -0
- ripperdoc/core/providers/gemini.py +615 -0
- ripperdoc/core/providers/openai.py +487 -0
- ripperdoc/core/query.py +1058 -0
- ripperdoc/core/query_utils.py +622 -0
- ripperdoc/core/skills.py +295 -0
- ripperdoc/core/system_prompt.py +431 -0
- ripperdoc/core/tool.py +240 -0
- ripperdoc/sdk/__init__.py +9 -0
- ripperdoc/sdk/client.py +333 -0
- ripperdoc/tools/__init__.py +1 -0
- ripperdoc/tools/ask_user_question_tool.py +431 -0
- ripperdoc/tools/background_shell.py +389 -0
- ripperdoc/tools/bash_output_tool.py +98 -0
- ripperdoc/tools/bash_tool.py +1016 -0
- ripperdoc/tools/dynamic_mcp_tool.py +428 -0
- ripperdoc/tools/enter_plan_mode_tool.py +226 -0
- ripperdoc/tools/exit_plan_mode_tool.py +153 -0
- ripperdoc/tools/file_edit_tool.py +346 -0
- ripperdoc/tools/file_read_tool.py +203 -0
- ripperdoc/tools/file_write_tool.py +205 -0
- ripperdoc/tools/glob_tool.py +179 -0
- ripperdoc/tools/grep_tool.py +370 -0
- ripperdoc/tools/kill_bash_tool.py +136 -0
- ripperdoc/tools/ls_tool.py +471 -0
- ripperdoc/tools/mcp_tools.py +591 -0
- ripperdoc/tools/multi_edit_tool.py +456 -0
- ripperdoc/tools/notebook_edit_tool.py +386 -0
- ripperdoc/tools/skill_tool.py +205 -0
- ripperdoc/tools/task_tool.py +379 -0
- ripperdoc/tools/todo_tool.py +494 -0
- ripperdoc/tools/tool_search_tool.py +380 -0
- ripperdoc/utils/__init__.py +1 -0
- ripperdoc/utils/bash_constants.py +51 -0
- ripperdoc/utils/bash_output_utils.py +43 -0
- ripperdoc/utils/coerce.py +34 -0
- ripperdoc/utils/context_length_errors.py +252 -0
- ripperdoc/utils/exit_code_handlers.py +241 -0
- ripperdoc/utils/file_watch.py +135 -0
- ripperdoc/utils/git_utils.py +274 -0
- ripperdoc/utils/json_utils.py +27 -0
- ripperdoc/utils/log.py +176 -0
- ripperdoc/utils/mcp.py +560 -0
- ripperdoc/utils/memory.py +253 -0
- ripperdoc/utils/message_compaction.py +676 -0
- ripperdoc/utils/messages.py +519 -0
- ripperdoc/utils/output_utils.py +258 -0
- ripperdoc/utils/path_ignore.py +677 -0
- ripperdoc/utils/path_utils.py +46 -0
- ripperdoc/utils/permissions/__init__.py +27 -0
- ripperdoc/utils/permissions/path_validation_utils.py +174 -0
- ripperdoc/utils/permissions/shell_command_validation.py +552 -0
- ripperdoc/utils/permissions/tool_permission_utils.py +279 -0
- ripperdoc/utils/prompt.py +17 -0
- ripperdoc/utils/safe_get_cwd.py +31 -0
- ripperdoc/utils/sandbox_utils.py +38 -0
- ripperdoc/utils/session_history.py +260 -0
- ripperdoc/utils/session_usage.py +117 -0
- ripperdoc/utils/shell_token_utils.py +95 -0
- ripperdoc/utils/shell_utils.py +159 -0
- ripperdoc/utils/todo.py +203 -0
- ripperdoc/utils/token_estimation.py +34 -0
- ripperdoc-0.2.6.dist-info/METADATA +193 -0
- ripperdoc-0.2.6.dist-info/RECORD +107 -0
- ripperdoc-0.2.6.dist-info/WHEEL +5 -0
- ripperdoc-0.2.6.dist-info/entry_points.txt +3 -0
- ripperdoc-0.2.6.dist-info/licenses/LICENSE +53 -0
- ripperdoc-0.2.6.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,279 @@
|
|
|
1
|
+
"""Permission evaluation helpers."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from dataclasses import dataclass
|
|
6
|
+
from typing import Callable, Iterable, List, Optional, Set
|
|
7
|
+
|
|
8
|
+
from ripperdoc.utils.permissions.path_validation_utils import validate_shell_command_paths
|
|
9
|
+
from ripperdoc.utils.permissions.shell_command_validation import validate_shell_command
|
|
10
|
+
from ripperdoc.utils.safe_get_cwd import safe_get_cwd
|
|
11
|
+
from ripperdoc.utils.shell_token_utils import parse_and_clean_shell_tokens, parse_shell_tokens
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
@dataclass
|
|
15
|
+
class ToolRule:
|
|
16
|
+
tool_name: str
|
|
17
|
+
rule_content: str
|
|
18
|
+
behavior: str = "allow"
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
@dataclass
|
|
22
|
+
class PermissionDecision:
|
|
23
|
+
behavior: str # 'allow' | 'deny' | 'ask' | 'passthrough'
|
|
24
|
+
message: Optional[str] = None
|
|
25
|
+
updated_input: Optional[object] = None
|
|
26
|
+
decision_reason: Optional[dict] = None
|
|
27
|
+
rule_suggestions: Optional[List[ToolRule] | List[str]] = None
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
def create_wildcard_rule(rule_name: str) -> str:
|
|
31
|
+
"""Create a wildcard/prefix rule string."""
|
|
32
|
+
return f"{rule_name}:*"
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
def create_tool_rule(rule_content: str) -> List[ToolRule]:
|
|
36
|
+
return [ToolRule(tool_name="Bash", rule_content=rule_content)]
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
def create_wildcard_tool_rule(rule_name: str) -> List[ToolRule]:
|
|
40
|
+
return [ToolRule(tool_name="Bash", rule_content=create_wildcard_rule(rule_name))]
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
def extract_rule_prefix(rule_string: str) -> Optional[str]:
|
|
44
|
+
return rule_string[:-2] if rule_string.endswith(":*") else None
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
def match_rule(command: str, rule: str) -> bool:
|
|
48
|
+
"""Return True if a command matches a rule (exact or wildcard)."""
|
|
49
|
+
command = command.strip()
|
|
50
|
+
if not command:
|
|
51
|
+
return False
|
|
52
|
+
prefix = extract_rule_prefix(rule)
|
|
53
|
+
if prefix is not None:
|
|
54
|
+
return command.startswith(prefix)
|
|
55
|
+
return command == rule
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
def _merge_rules(*rules: Iterable[str]) -> Set[str]:
|
|
59
|
+
merged: Set[str] = set()
|
|
60
|
+
for collection in rules:
|
|
61
|
+
merged.update(collection)
|
|
62
|
+
return merged
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
def _is_command_read_only(
|
|
66
|
+
command: str,
|
|
67
|
+
injection_check: Callable[[str], bool],
|
|
68
|
+
) -> bool:
|
|
69
|
+
"""Heuristic read-only detector mirroring the reference intent."""
|
|
70
|
+
validation = validate_shell_command(command)
|
|
71
|
+
if validation.behavior != "passthrough":
|
|
72
|
+
return False
|
|
73
|
+
|
|
74
|
+
cleaned_tokens = parse_and_clean_shell_tokens(command)
|
|
75
|
+
if not cleaned_tokens:
|
|
76
|
+
return True
|
|
77
|
+
|
|
78
|
+
# Treat pipelines/compound commands as read-only only if every segment is safe.
|
|
79
|
+
tokens = parse_shell_tokens(command)
|
|
80
|
+
if "|" in tokens:
|
|
81
|
+
parts: list[str] = []
|
|
82
|
+
current: list[str] = []
|
|
83
|
+
for token in tokens:
|
|
84
|
+
if token == "|":
|
|
85
|
+
if current:
|
|
86
|
+
parts.append(" ".join(current))
|
|
87
|
+
current = []
|
|
88
|
+
else:
|
|
89
|
+
current.append(token)
|
|
90
|
+
if current:
|
|
91
|
+
parts.append(" ".join(current))
|
|
92
|
+
return all(_is_command_read_only(part, injection_check) for part in parts)
|
|
93
|
+
|
|
94
|
+
dangerous_prefixes = {
|
|
95
|
+
"rm",
|
|
96
|
+
"mv",
|
|
97
|
+
"chmod",
|
|
98
|
+
"chown",
|
|
99
|
+
"sudo",
|
|
100
|
+
"dd",
|
|
101
|
+
"tee",
|
|
102
|
+
"truncate",
|
|
103
|
+
"kill",
|
|
104
|
+
"pkill",
|
|
105
|
+
"systemctl",
|
|
106
|
+
"service",
|
|
107
|
+
}
|
|
108
|
+
first = cleaned_tokens[0]
|
|
109
|
+
if first in dangerous_prefixes:
|
|
110
|
+
return False
|
|
111
|
+
if first == "git":
|
|
112
|
+
if len(cleaned_tokens) < 2:
|
|
113
|
+
return False
|
|
114
|
+
allowed_git = {
|
|
115
|
+
"status",
|
|
116
|
+
"diff",
|
|
117
|
+
"show",
|
|
118
|
+
"log",
|
|
119
|
+
"rev-parse",
|
|
120
|
+
"ls-files",
|
|
121
|
+
"remote",
|
|
122
|
+
"branch",
|
|
123
|
+
"tag",
|
|
124
|
+
"blame",
|
|
125
|
+
"reflog",
|
|
126
|
+
}
|
|
127
|
+
return cleaned_tokens[1] in allowed_git
|
|
128
|
+
|
|
129
|
+
# If no injection was detected and the command is free of mutations, treat as read-only.
|
|
130
|
+
return not injection_check(command)
|
|
131
|
+
|
|
132
|
+
|
|
133
|
+
def _collect_rule_suggestions(command: str) -> List[ToolRule]:
|
|
134
|
+
suggestions: list[ToolRule] = [ToolRule(tool_name="Bash", rule_content=command)]
|
|
135
|
+
tokens = parse_and_clean_shell_tokens(command)
|
|
136
|
+
if tokens:
|
|
137
|
+
suggestions.append(ToolRule(tool_name="Bash", rule_content=create_wildcard_rule(tokens[0])))
|
|
138
|
+
return suggestions
|
|
139
|
+
|
|
140
|
+
|
|
141
|
+
def evaluate_shell_command_permissions(
|
|
142
|
+
tool_request: object,
|
|
143
|
+
allowed_rules: Iterable[str],
|
|
144
|
+
denied_rules: Iterable[str],
|
|
145
|
+
allowed_working_dirs: Set[str] | None = None,
|
|
146
|
+
*,
|
|
147
|
+
command_injection_detected: bool = False,
|
|
148
|
+
injection_detector: Callable[[str], bool] | None = None,
|
|
149
|
+
read_only_detector: Callable[[str, Callable[[str], bool]], bool] | None = None,
|
|
150
|
+
) -> PermissionDecision:
|
|
151
|
+
"""Evaluate whether a bash command should be allowed."""
|
|
152
|
+
command = tool_request.command if hasattr(tool_request, "command") else str(tool_request)
|
|
153
|
+
trimmed_command = command.strip()
|
|
154
|
+
allowed_working_dirs = allowed_working_dirs or {safe_get_cwd()}
|
|
155
|
+
injection_detector = injection_detector or (
|
|
156
|
+
lambda cmd: validate_shell_command(cmd).behavior != "passthrough"
|
|
157
|
+
)
|
|
158
|
+
read_only_detector = read_only_detector or _is_command_read_only
|
|
159
|
+
|
|
160
|
+
merged_denied = _merge_rules(denied_rules)
|
|
161
|
+
merged_allowed = _merge_rules(allowed_rules)
|
|
162
|
+
|
|
163
|
+
if any(match_rule(trimmed_command, rule) for rule in merged_denied):
|
|
164
|
+
return PermissionDecision(
|
|
165
|
+
behavior="deny",
|
|
166
|
+
message=f"Permission to run '{trimmed_command}' has been denied.",
|
|
167
|
+
decision_reason={"type": "rule"},
|
|
168
|
+
rule_suggestions=None,
|
|
169
|
+
)
|
|
170
|
+
|
|
171
|
+
if any(match_rule(trimmed_command, rule) for rule in merged_allowed):
|
|
172
|
+
return PermissionDecision(
|
|
173
|
+
behavior="allow",
|
|
174
|
+
updated_input=tool_request,
|
|
175
|
+
decision_reason={"type": "rule"},
|
|
176
|
+
message="Command approved by configured rule.",
|
|
177
|
+
)
|
|
178
|
+
|
|
179
|
+
path_result = validate_shell_command_paths(
|
|
180
|
+
trimmed_command, safe_get_cwd(), allowed_working_dirs
|
|
181
|
+
)
|
|
182
|
+
if path_result.behavior != "passthrough":
|
|
183
|
+
return PermissionDecision(
|
|
184
|
+
behavior="ask",
|
|
185
|
+
message=path_result.message,
|
|
186
|
+
decision_reason={"type": "path_validation"},
|
|
187
|
+
rule_suggestions=None,
|
|
188
|
+
)
|
|
189
|
+
|
|
190
|
+
validation_result = validate_shell_command(trimmed_command)
|
|
191
|
+
if validation_result.behavior != "passthrough":
|
|
192
|
+
return PermissionDecision(
|
|
193
|
+
behavior="ask",
|
|
194
|
+
message=validation_result.message,
|
|
195
|
+
decision_reason={"type": "validation"},
|
|
196
|
+
rule_suggestions=None,
|
|
197
|
+
)
|
|
198
|
+
|
|
199
|
+
tokens = parse_shell_tokens(trimmed_command)
|
|
200
|
+
if "|" in tokens:
|
|
201
|
+
left_tokens = []
|
|
202
|
+
right_tokens = []
|
|
203
|
+
pipe_seen = False
|
|
204
|
+
for token in tokens:
|
|
205
|
+
if token == "|":
|
|
206
|
+
pipe_seen = True
|
|
207
|
+
continue
|
|
208
|
+
if pipe_seen:
|
|
209
|
+
right_tokens.append(token)
|
|
210
|
+
else:
|
|
211
|
+
left_tokens.append(token)
|
|
212
|
+
left_command = " ".join(left_tokens).strip()
|
|
213
|
+
right_command = " ".join(right_tokens).strip()
|
|
214
|
+
|
|
215
|
+
left_result = evaluate_shell_command_permissions(
|
|
216
|
+
type("Cmd", (), {"command": left_command}),
|
|
217
|
+
merged_allowed,
|
|
218
|
+
merged_denied,
|
|
219
|
+
allowed_working_dirs,
|
|
220
|
+
command_injection_detected=command_injection_detected,
|
|
221
|
+
injection_detector=injection_detector,
|
|
222
|
+
read_only_detector=read_only_detector,
|
|
223
|
+
)
|
|
224
|
+
right_read_only = read_only_detector(right_command, injection_detector)
|
|
225
|
+
|
|
226
|
+
if left_result.behavior == "deny":
|
|
227
|
+
return left_result
|
|
228
|
+
if not right_read_only:
|
|
229
|
+
return PermissionDecision(
|
|
230
|
+
behavior="ask",
|
|
231
|
+
message="Pipe right-hand command is not read-only.",
|
|
232
|
+
decision_reason={"type": "subcommand"},
|
|
233
|
+
rule_suggestions=_collect_rule_suggestions(right_command),
|
|
234
|
+
)
|
|
235
|
+
if left_result.behavior == "allow":
|
|
236
|
+
return PermissionDecision(
|
|
237
|
+
behavior="allow",
|
|
238
|
+
updated_input=tool_request,
|
|
239
|
+
decision_reason={"type": "subcommand"},
|
|
240
|
+
)
|
|
241
|
+
return PermissionDecision(
|
|
242
|
+
behavior="ask",
|
|
243
|
+
message="Permission required for piped command.",
|
|
244
|
+
decision_reason={"type": "subcommand"},
|
|
245
|
+
rule_suggestions=_collect_rule_suggestions(trimmed_command),
|
|
246
|
+
)
|
|
247
|
+
|
|
248
|
+
if read_only_detector(trimmed_command, injection_detector) and not command_injection_detected:
|
|
249
|
+
return PermissionDecision(
|
|
250
|
+
behavior="allow",
|
|
251
|
+
updated_input=tool_request,
|
|
252
|
+
decision_reason={"type": "other", "reason": "Read-only command"},
|
|
253
|
+
)
|
|
254
|
+
|
|
255
|
+
return PermissionDecision(
|
|
256
|
+
behavior="passthrough",
|
|
257
|
+
message="Command requires permission",
|
|
258
|
+
decision_reason={"type": "default"},
|
|
259
|
+
rule_suggestions=_collect_rule_suggestions(trimmed_command),
|
|
260
|
+
)
|
|
261
|
+
|
|
262
|
+
|
|
263
|
+
def is_command_read_only(command: str) -> bool:
|
|
264
|
+
"""Public wrapper to test if a command is read-only using reference heuristics."""
|
|
265
|
+
return _is_command_read_only(
|
|
266
|
+
command, lambda cmd: validate_shell_command(cmd).behavior != "passthrough"
|
|
267
|
+
)
|
|
268
|
+
|
|
269
|
+
|
|
270
|
+
__all__ = [
|
|
271
|
+
"PermissionDecision",
|
|
272
|
+
"ToolRule",
|
|
273
|
+
"create_tool_rule",
|
|
274
|
+
"create_wildcard_tool_rule",
|
|
275
|
+
"evaluate_shell_command_permissions",
|
|
276
|
+
"extract_rule_prefix",
|
|
277
|
+
"match_rule",
|
|
278
|
+
"is_command_read_only",
|
|
279
|
+
]
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
"""Prompt helpers for interactive input."""
|
|
2
|
+
|
|
3
|
+
from getpass import getpass
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
def prompt_secret(prompt_text: str, prompt_suffix: str = ": ") -> str:
|
|
7
|
+
"""Prompt for sensitive input, masking characters when possible.
|
|
8
|
+
|
|
9
|
+
Falls back to getpass (no echo) if prompt_toolkit is unavailable.
|
|
10
|
+
"""
|
|
11
|
+
full_prompt = f"{prompt_text}{prompt_suffix}"
|
|
12
|
+
try:
|
|
13
|
+
from prompt_toolkit import prompt as pt_prompt
|
|
14
|
+
|
|
15
|
+
return pt_prompt(full_prompt, is_password=True)
|
|
16
|
+
except (ImportError, OSError, RuntimeError, EOFError):
|
|
17
|
+
return getpass(full_prompt)
|
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
"""Safe helpers for tracking and restoring the working directory."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import os
|
|
6
|
+
from pathlib import Path
|
|
7
|
+
from ripperdoc.utils.log import get_logger
|
|
8
|
+
|
|
9
|
+
logger = get_logger()
|
|
10
|
+
|
|
11
|
+
_ORIGINAL_CWD = Path(os.getcwd()).resolve()
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
def get_original_cwd() -> str:
|
|
15
|
+
"""Return the process's initial working directory."""
|
|
16
|
+
return str(_ORIGINAL_CWD)
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
def safe_get_cwd() -> str:
|
|
20
|
+
"""Return the current working directory, falling back to the original on error."""
|
|
21
|
+
try:
|
|
22
|
+
return str(Path(os.getcwd()).resolve())
|
|
23
|
+
except (OSError, RuntimeError, ValueError) as exc:
|
|
24
|
+
logger.warning(
|
|
25
|
+
"[safe_get_cwd] Failed to resolve cwd: %s: %s",
|
|
26
|
+
type(exc).__name__, exc,
|
|
27
|
+
)
|
|
28
|
+
return get_original_cwd()
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
__all__ = ["get_original_cwd", "safe_get_cwd"]
|
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
"""Sandbox helpers.
|
|
2
|
+
|
|
3
|
+
The reference uses macOS sandbox-exec profiles; in this environment we
|
|
4
|
+
surface the same API surface but report unavailability by default.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from __future__ import annotations
|
|
8
|
+
|
|
9
|
+
from dataclasses import dataclass
|
|
10
|
+
import shutil
|
|
11
|
+
import shlex
|
|
12
|
+
from typing import Callable
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
@dataclass
|
|
16
|
+
class SandboxWrapper:
|
|
17
|
+
"""Represents a wrapped command plus a cleanup callback."""
|
|
18
|
+
|
|
19
|
+
final_command: str
|
|
20
|
+
cleanup: Callable[[], None]
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
def is_sandbox_available() -> bool:
|
|
24
|
+
"""Return whether sandboxed execution is available on this host."""
|
|
25
|
+
return shutil.which("srt") is not None
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
def create_sandbox_wrapper(command: str) -> SandboxWrapper:
|
|
29
|
+
"""Wrap a command for sandboxed execution or raise if unsupported."""
|
|
30
|
+
if not is_sandbox_available():
|
|
31
|
+
raise RuntimeError(
|
|
32
|
+
"Sandbox mode requested but not available (install @anthropic-ai/sandbox-runtime and ensure 'srt' is on PATH)"
|
|
33
|
+
)
|
|
34
|
+
quoted = shlex.quote(command)
|
|
35
|
+
return SandboxWrapper(final_command=f"srt {quoted}", cleanup=lambda: None)
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
__all__ = ["SandboxWrapper", "is_sandbox_available", "create_sandbox_wrapper"]
|
|
@@ -0,0 +1,260 @@
|
|
|
1
|
+
"""Session log storage and retrieval."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import json
|
|
6
|
+
from dataclasses import dataclass
|
|
7
|
+
from datetime import datetime
|
|
8
|
+
from pathlib import Path
|
|
9
|
+
from typing import List, Optional
|
|
10
|
+
|
|
11
|
+
from ripperdoc.utils.log import get_logger
|
|
12
|
+
from ripperdoc.utils.messages import (
|
|
13
|
+
AssistantMessage,
|
|
14
|
+
ProgressMessage,
|
|
15
|
+
UserMessage,
|
|
16
|
+
)
|
|
17
|
+
from ripperdoc.utils.path_utils import project_storage_dir
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
logger = get_logger()
|
|
21
|
+
|
|
22
|
+
ConversationMessage = UserMessage | AssistantMessage | ProgressMessage
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
@dataclass
|
|
26
|
+
class SessionSummary:
|
|
27
|
+
session_id: str
|
|
28
|
+
path: Path
|
|
29
|
+
message_count: int
|
|
30
|
+
created_at: datetime
|
|
31
|
+
updated_at: datetime
|
|
32
|
+
first_prompt: str
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
def _sessions_root() -> Path:
|
|
36
|
+
return Path.home() / ".ripperdoc" / "sessions"
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
def _session_file(project_path: Path, session_id: str) -> Path:
|
|
40
|
+
directory = project_storage_dir(_sessions_root(), project_path, ensure=True)
|
|
41
|
+
return directory / f"{session_id}.jsonl"
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
def _now_iso() -> str:
|
|
45
|
+
return datetime.utcnow().isoformat() + "Z"
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
def _extract_prompt(payload: dict) -> str:
|
|
49
|
+
"""Pull a short preview of the first user message."""
|
|
50
|
+
if payload.get("type") != "user":
|
|
51
|
+
return ""
|
|
52
|
+
message = payload.get("message") or {}
|
|
53
|
+
content = message.get("content")
|
|
54
|
+
preview = ""
|
|
55
|
+
if isinstance(content, str):
|
|
56
|
+
preview = content
|
|
57
|
+
elif isinstance(content, list):
|
|
58
|
+
for block in content:
|
|
59
|
+
if not isinstance(block, dict):
|
|
60
|
+
continue
|
|
61
|
+
if block.get("type") == "text" and block.get("text"):
|
|
62
|
+
preview = str(block["text"])
|
|
63
|
+
break
|
|
64
|
+
preview = (preview or "").replace("\n", " ").strip()
|
|
65
|
+
if len(preview) > 80:
|
|
66
|
+
preview = preview[:77] + "..."
|
|
67
|
+
return preview
|
|
68
|
+
|
|
69
|
+
|
|
70
|
+
def _deserialize_message(payload: dict) -> Optional[ConversationMessage]:
|
|
71
|
+
"""Rebuild a message model from a stored payload."""
|
|
72
|
+
msg_type = payload.get("type")
|
|
73
|
+
if msg_type == "user":
|
|
74
|
+
return UserMessage(**payload)
|
|
75
|
+
if msg_type == "assistant":
|
|
76
|
+
return AssistantMessage(**payload)
|
|
77
|
+
if msg_type == "progress":
|
|
78
|
+
return ProgressMessage(**payload)
|
|
79
|
+
return None
|
|
80
|
+
|
|
81
|
+
|
|
82
|
+
class SessionHistory:
|
|
83
|
+
"""Append-only session log for a single session id."""
|
|
84
|
+
|
|
85
|
+
def __init__(self, project_path: Path, session_id: str):
|
|
86
|
+
self.project_path = project_path
|
|
87
|
+
self.session_id = session_id
|
|
88
|
+
self.path = _session_file(project_path, session_id)
|
|
89
|
+
self._seen_ids: set[str] = set()
|
|
90
|
+
self._load_seen_ids()
|
|
91
|
+
|
|
92
|
+
def _load_seen_ids(self) -> None:
|
|
93
|
+
if not self.path.exists():
|
|
94
|
+
return
|
|
95
|
+
try:
|
|
96
|
+
with self.path.open("r", encoding="utf-8") as fh:
|
|
97
|
+
for line in fh:
|
|
98
|
+
try:
|
|
99
|
+
data = json.loads(line)
|
|
100
|
+
payload = data.get("payload") or {}
|
|
101
|
+
msg_uuid = payload.get("uuid")
|
|
102
|
+
if isinstance(msg_uuid, str):
|
|
103
|
+
self._seen_ids.add(msg_uuid)
|
|
104
|
+
except (json.JSONDecodeError, KeyError, TypeError, ValueError) as exc:
|
|
105
|
+
logger.debug(
|
|
106
|
+
"[session_history] Failed to parse session history line: %s: %s",
|
|
107
|
+
type(exc).__name__, exc,
|
|
108
|
+
)
|
|
109
|
+
continue
|
|
110
|
+
except (OSError, IOError) as exc:
|
|
111
|
+
logger.warning(
|
|
112
|
+
"Failed to load seen IDs from session: %s: %s",
|
|
113
|
+
type(exc).__name__, exc,
|
|
114
|
+
extra={"session_id": self.session_id, "path": str(self.path)},
|
|
115
|
+
)
|
|
116
|
+
return
|
|
117
|
+
|
|
118
|
+
def append(self, message: ConversationMessage) -> None:
|
|
119
|
+
"""Persist a single message to the session log."""
|
|
120
|
+
# Skip progress noise
|
|
121
|
+
if getattr(message, "type", None) == "progress":
|
|
122
|
+
return
|
|
123
|
+
msg_uuid = getattr(message, "uuid", None)
|
|
124
|
+
if isinstance(msg_uuid, str) and msg_uuid in self._seen_ids:
|
|
125
|
+
return
|
|
126
|
+
|
|
127
|
+
payload = message.model_dump(mode="json")
|
|
128
|
+
entry = {
|
|
129
|
+
"logged_at": _now_iso(),
|
|
130
|
+
"project_path": str(self.project_path.resolve()),
|
|
131
|
+
"payload": payload,
|
|
132
|
+
}
|
|
133
|
+
try:
|
|
134
|
+
with self.path.open("a", encoding="utf-8") as fh:
|
|
135
|
+
json.dump(entry, fh)
|
|
136
|
+
fh.write("\n")
|
|
137
|
+
if isinstance(msg_uuid, str):
|
|
138
|
+
self._seen_ids.add(msg_uuid)
|
|
139
|
+
except (OSError, IOError) as exc:
|
|
140
|
+
# Avoid crashing the UI if logging fails
|
|
141
|
+
logger.warning(
|
|
142
|
+
"Failed to append message to session log: %s: %s",
|
|
143
|
+
type(exc).__name__, exc,
|
|
144
|
+
extra={"session_id": self.session_id, "path": str(self.path)},
|
|
145
|
+
)
|
|
146
|
+
return
|
|
147
|
+
|
|
148
|
+
|
|
149
|
+
def list_session_summaries(project_path: Path) -> List[SessionSummary]:
|
|
150
|
+
"""Return available sessions for the project ordered by last update desc."""
|
|
151
|
+
directory = project_storage_dir(_sessions_root(), project_path)
|
|
152
|
+
if not directory.exists():
|
|
153
|
+
return []
|
|
154
|
+
|
|
155
|
+
current_project = str(project_path.resolve())
|
|
156
|
+
summaries: List[SessionSummary] = []
|
|
157
|
+
for jsonl_path in directory.glob("*.jsonl"):
|
|
158
|
+
try:
|
|
159
|
+
with jsonl_path.open("r", encoding="utf-8") as fh:
|
|
160
|
+
messages = [json.loads(line) for line in fh if line.strip()]
|
|
161
|
+
except (OSError, IOError, json.JSONDecodeError) as exc:
|
|
162
|
+
logger.warning(
|
|
163
|
+
"Failed to load session summary: %s: %s",
|
|
164
|
+
type(exc).__name__, exc,
|
|
165
|
+
extra={"path": str(jsonl_path)},
|
|
166
|
+
)
|
|
167
|
+
continue
|
|
168
|
+
|
|
169
|
+
# Check if this session belongs to the current project
|
|
170
|
+
# If any message has a project_path field, use it to verify
|
|
171
|
+
session_project_path = None
|
|
172
|
+
for entry in messages:
|
|
173
|
+
entry_path = entry.get("project_path")
|
|
174
|
+
if entry_path:
|
|
175
|
+
session_project_path = entry_path
|
|
176
|
+
break
|
|
177
|
+
|
|
178
|
+
# Skip sessions that belong to a different project
|
|
179
|
+
if session_project_path and session_project_path != current_project:
|
|
180
|
+
continue
|
|
181
|
+
|
|
182
|
+
payloads = [entry.get("payload") or {} for entry in messages]
|
|
183
|
+
conversation_payloads = [
|
|
184
|
+
payload for payload in payloads if payload.get("type") in ("user", "assistant")
|
|
185
|
+
]
|
|
186
|
+
if not conversation_payloads:
|
|
187
|
+
continue
|
|
188
|
+
|
|
189
|
+
created_raw = messages[0].get("logged_at")
|
|
190
|
+
updated_raw = messages[-1].get("logged_at")
|
|
191
|
+
created_at = (
|
|
192
|
+
datetime.fromisoformat(created_raw.replace("Z", "+00:00"))
|
|
193
|
+
if isinstance(created_raw, str)
|
|
194
|
+
else datetime.fromtimestamp(jsonl_path.stat().st_ctime)
|
|
195
|
+
)
|
|
196
|
+
updated_at = (
|
|
197
|
+
datetime.fromisoformat(updated_raw.replace("Z", "+00:00"))
|
|
198
|
+
if isinstance(updated_raw, str)
|
|
199
|
+
else datetime.fromtimestamp(jsonl_path.stat().st_mtime)
|
|
200
|
+
)
|
|
201
|
+
first_prompt = ""
|
|
202
|
+
for payload in conversation_payloads:
|
|
203
|
+
first_prompt = _extract_prompt(payload)
|
|
204
|
+
if first_prompt:
|
|
205
|
+
break
|
|
206
|
+
summaries.append(
|
|
207
|
+
SessionSummary(
|
|
208
|
+
session_id=jsonl_path.stem,
|
|
209
|
+
path=jsonl_path,
|
|
210
|
+
message_count=len(conversation_payloads),
|
|
211
|
+
created_at=created_at,
|
|
212
|
+
updated_at=updated_at,
|
|
213
|
+
first_prompt=first_prompt or "(no prompt)",
|
|
214
|
+
)
|
|
215
|
+
)
|
|
216
|
+
|
|
217
|
+
return sorted(summaries, key=lambda s: s.updated_at, reverse=True)
|
|
218
|
+
|
|
219
|
+
|
|
220
|
+
def load_session_messages(project_path: Path, session_id: str) -> List[ConversationMessage]:
|
|
221
|
+
"""Load messages for a stored session."""
|
|
222
|
+
path = _session_file(project_path, session_id)
|
|
223
|
+
if not path.exists():
|
|
224
|
+
return []
|
|
225
|
+
|
|
226
|
+
messages: List[ConversationMessage] = []
|
|
227
|
+
try:
|
|
228
|
+
with path.open("r", encoding="utf-8") as fh:
|
|
229
|
+
for line in fh:
|
|
230
|
+
if not line.strip():
|
|
231
|
+
continue
|
|
232
|
+
try:
|
|
233
|
+
data = json.loads(line)
|
|
234
|
+
payload = data.get("payload") or {}
|
|
235
|
+
msg = _deserialize_message(payload)
|
|
236
|
+
if msg is not None and getattr(msg, "type", None) != "progress":
|
|
237
|
+
messages.append(msg)
|
|
238
|
+
except (json.JSONDecodeError, KeyError, TypeError, ValueError) as exc:
|
|
239
|
+
logger.debug(
|
|
240
|
+
"[session_history] Failed to deserialize message in session %s: %s: %s",
|
|
241
|
+
session_id, type(exc).__name__, exc,
|
|
242
|
+
)
|
|
243
|
+
continue
|
|
244
|
+
except (OSError, IOError) as exc:
|
|
245
|
+
logger.warning(
|
|
246
|
+
"Failed to load session messages: %s: %s",
|
|
247
|
+
type(exc).__name__, exc,
|
|
248
|
+
extra={"session_id": session_id, "path": str(path)},
|
|
249
|
+
)
|
|
250
|
+
return []
|
|
251
|
+
|
|
252
|
+
return messages
|
|
253
|
+
|
|
254
|
+
|
|
255
|
+
__all__ = [
|
|
256
|
+
"SessionHistory",
|
|
257
|
+
"SessionSummary",
|
|
258
|
+
"list_session_summaries",
|
|
259
|
+
"load_session_messages",
|
|
260
|
+
]
|