ripperdoc 0.2.9__py3-none-any.whl → 0.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (76) hide show
  1. ripperdoc/__init__.py +1 -1
  2. ripperdoc/cli/cli.py +379 -51
  3. ripperdoc/cli/commands/__init__.py +6 -0
  4. ripperdoc/cli/commands/agents_cmd.py +128 -5
  5. ripperdoc/cli/commands/clear_cmd.py +8 -0
  6. ripperdoc/cli/commands/doctor_cmd.py +29 -0
  7. ripperdoc/cli/commands/exit_cmd.py +1 -0
  8. ripperdoc/cli/commands/memory_cmd.py +2 -1
  9. ripperdoc/cli/commands/models_cmd.py +63 -7
  10. ripperdoc/cli/commands/resume_cmd.py +5 -0
  11. ripperdoc/cli/commands/skills_cmd.py +103 -0
  12. ripperdoc/cli/commands/stats_cmd.py +244 -0
  13. ripperdoc/cli/commands/status_cmd.py +10 -0
  14. ripperdoc/cli/commands/tasks_cmd.py +6 -3
  15. ripperdoc/cli/commands/themes_cmd.py +139 -0
  16. ripperdoc/cli/ui/file_mention_completer.py +63 -13
  17. ripperdoc/cli/ui/helpers.py +6 -3
  18. ripperdoc/cli/ui/interrupt_handler.py +34 -0
  19. ripperdoc/cli/ui/panels.py +14 -8
  20. ripperdoc/cli/ui/rich_ui.py +737 -47
  21. ripperdoc/cli/ui/spinner.py +93 -18
  22. ripperdoc/cli/ui/thinking_spinner.py +1 -2
  23. ripperdoc/cli/ui/tool_renderers.py +10 -9
  24. ripperdoc/cli/ui/wizard.py +24 -19
  25. ripperdoc/core/agents.py +14 -3
  26. ripperdoc/core/config.py +238 -6
  27. ripperdoc/core/default_tools.py +91 -10
  28. ripperdoc/core/hooks/events.py +4 -0
  29. ripperdoc/core/hooks/llm_callback.py +58 -0
  30. ripperdoc/core/hooks/manager.py +6 -0
  31. ripperdoc/core/permissions.py +160 -9
  32. ripperdoc/core/providers/openai.py +84 -28
  33. ripperdoc/core/query.py +489 -87
  34. ripperdoc/core/query_utils.py +17 -14
  35. ripperdoc/core/skills.py +1 -0
  36. ripperdoc/core/theme.py +298 -0
  37. ripperdoc/core/tool.py +15 -5
  38. ripperdoc/protocol/__init__.py +14 -0
  39. ripperdoc/protocol/models.py +300 -0
  40. ripperdoc/protocol/stdio.py +1453 -0
  41. ripperdoc/tools/background_shell.py +354 -139
  42. ripperdoc/tools/bash_tool.py +117 -22
  43. ripperdoc/tools/file_edit_tool.py +228 -50
  44. ripperdoc/tools/file_read_tool.py +154 -3
  45. ripperdoc/tools/file_write_tool.py +53 -11
  46. ripperdoc/tools/grep_tool.py +98 -8
  47. ripperdoc/tools/lsp_tool.py +609 -0
  48. ripperdoc/tools/multi_edit_tool.py +26 -3
  49. ripperdoc/tools/skill_tool.py +52 -1
  50. ripperdoc/tools/task_tool.py +539 -65
  51. ripperdoc/utils/conversation_compaction.py +1 -1
  52. ripperdoc/utils/file_watch.py +216 -7
  53. ripperdoc/utils/image_utils.py +125 -0
  54. ripperdoc/utils/log.py +30 -3
  55. ripperdoc/utils/lsp.py +812 -0
  56. ripperdoc/utils/mcp.py +80 -18
  57. ripperdoc/utils/message_formatting.py +7 -4
  58. ripperdoc/utils/messages.py +198 -33
  59. ripperdoc/utils/pending_messages.py +50 -0
  60. ripperdoc/utils/permissions/shell_command_validation.py +3 -3
  61. ripperdoc/utils/permissions/tool_permission_utils.py +180 -15
  62. ripperdoc/utils/platform.py +198 -0
  63. ripperdoc/utils/session_heatmap.py +242 -0
  64. ripperdoc/utils/session_history.py +2 -2
  65. ripperdoc/utils/session_stats.py +294 -0
  66. ripperdoc/utils/shell_utils.py +8 -5
  67. ripperdoc/utils/todo.py +0 -6
  68. {ripperdoc-0.2.9.dist-info → ripperdoc-0.3.0.dist-info}/METADATA +55 -17
  69. ripperdoc-0.3.0.dist-info/RECORD +136 -0
  70. {ripperdoc-0.2.9.dist-info → ripperdoc-0.3.0.dist-info}/WHEEL +1 -1
  71. ripperdoc/sdk/__init__.py +0 -9
  72. ripperdoc/sdk/client.py +0 -333
  73. ripperdoc-0.2.9.dist-info/RECORD +0 -123
  74. {ripperdoc-0.2.9.dist-info → ripperdoc-0.3.0.dist-info}/entry_points.txt +0 -0
  75. {ripperdoc-0.2.9.dist-info → ripperdoc-0.3.0.dist-info}/licenses/LICENSE +0 -0
  76. {ripperdoc-0.2.9.dist-info → ripperdoc-0.3.0.dist-info}/top_level.txt +0 -0
@@ -333,7 +333,7 @@ async def summarize_conversation(
333
333
  system_prompt=system_prompt,
334
334
  tools=[],
335
335
  max_thinking_tokens=0,
336
- model="main",
336
+ model="quick",
337
337
  )
338
338
 
339
339
  result = extract_assistant_text(assistant_response)
@@ -4,13 +4,20 @@ from __future__ import annotations
4
4
 
5
5
  import difflib
6
6
  import os
7
+ import sys
8
+ import threading
9
+ from collections import OrderedDict
7
10
  from dataclasses import dataclass
8
- from typing import Dict, List, Optional
11
+ from typing import Dict, Iterator, List, Optional, Tuple, Union
9
12
 
10
13
  from ripperdoc.utils.log import get_logger
11
14
 
12
15
  logger = get_logger()
13
16
 
17
+ # Default limits for BoundedFileCache
18
+ DEFAULT_MAX_ENTRIES = int(os.getenv("RIPPERDOC_FILE_CACHE_MAX_ENTRIES", "500"))
19
+ DEFAULT_MAX_MEMORY_MB = float(os.getenv("RIPPERDOC_FILE_CACHE_MAX_MEMORY_MB", "50"))
20
+
14
21
 
15
22
  @dataclass
16
23
  class FileSnapshot:
@@ -20,6 +27,199 @@ class FileSnapshot:
20
27
  timestamp: float
21
28
  offset: int = 0
22
29
  limit: Optional[int] = None
30
+ encoding: str = "utf-8"
31
+
32
+ def memory_size(self) -> int:
33
+ """Estimate memory usage of this snapshot in bytes."""
34
+ # String memory: roughly 1 byte per char for ASCII, more for unicode
35
+ # Plus object overhead (~50 bytes for dataclass)
36
+ return sys.getsizeof(self.content) + 50
37
+
38
+
39
+ class BoundedFileCache:
40
+ """Thread-safe LRU cache for FileSnapshots with memory and entry limits.
41
+
42
+ This cache prevents unbounded memory growth in long sessions by:
43
+ 1. Limiting the maximum number of entries (LRU eviction)
44
+ 2. Limiting total memory usage
45
+ 3. Providing thread-safe access
46
+
47
+ Usage:
48
+ cache = BoundedFileCache(max_entries=500, max_memory_mb=50)
49
+ cache["/path/to/file"] = FileSnapshot(content="...", timestamp=123.0)
50
+ snapshot = cache.get("/path/to/file")
51
+ """
52
+
53
+ def __init__(
54
+ self,
55
+ max_entries: int = DEFAULT_MAX_ENTRIES,
56
+ max_memory_mb: float = DEFAULT_MAX_MEMORY_MB,
57
+ ) -> None:
58
+ """Initialize the bounded cache.
59
+
60
+ Args:
61
+ max_entries: Maximum number of file snapshots to keep
62
+ max_memory_mb: Maximum total memory usage in megabytes
63
+ """
64
+ self._max_entries = max(1, max_entries)
65
+ self._max_memory_bytes = int(max_memory_mb * 1024 * 1024)
66
+ self._cache: OrderedDict[str, FileSnapshot] = OrderedDict()
67
+ self._current_memory = 0
68
+ self._lock = threading.RLock()
69
+ self._eviction_count = 0
70
+
71
+ @property
72
+ def max_entries(self) -> int:
73
+ """Maximum number of entries allowed."""
74
+ return self._max_entries
75
+
76
+ @property
77
+ def max_memory_bytes(self) -> int:
78
+ """Maximum memory in bytes."""
79
+ return self._max_memory_bytes
80
+
81
+ @property
82
+ def current_memory(self) -> int:
83
+ """Current estimated memory usage in bytes."""
84
+ with self._lock:
85
+ return self._current_memory
86
+
87
+ @property
88
+ def eviction_count(self) -> int:
89
+ """Number of entries evicted due to limits."""
90
+ with self._lock:
91
+ return self._eviction_count
92
+
93
+ def __len__(self) -> int:
94
+ with self._lock:
95
+ return len(self._cache)
96
+
97
+ def __contains__(self, key: str) -> bool:
98
+ with self._lock:
99
+ return key in self._cache
100
+
101
+ def __getitem__(self, key: str) -> FileSnapshot:
102
+ with self._lock:
103
+ if key not in self._cache:
104
+ raise KeyError(key)
105
+ # Move to end (most recently used)
106
+ self._cache.move_to_end(key)
107
+ return self._cache[key]
108
+
109
+ def __setitem__(self, key: str, value: FileSnapshot) -> None:
110
+ with self._lock:
111
+ new_size = value.memory_size()
112
+
113
+ # If key exists, remove old entry first (atomic pop to avoid TOCTOU)
114
+ old_value = self._cache.pop(key, None)
115
+ if old_value is not None:
116
+ self._current_memory = max(0, self._current_memory - old_value.memory_size())
117
+
118
+ # Evict entries if needed (memory limit)
119
+ while self._current_memory + new_size > self._max_memory_bytes and self._cache:
120
+ self._evict_oldest()
121
+
122
+ # Evict entries if needed (entry limit)
123
+ while len(self._cache) >= self._max_entries:
124
+ self._evict_oldest()
125
+
126
+ # Add new entry
127
+ self._cache[key] = value
128
+ self._current_memory += new_size
129
+
130
+ def __delitem__(self, key: str) -> None:
131
+ with self._lock:
132
+ # Use atomic pop to avoid TOCTOU between check and delete
133
+ old_value = self._cache.pop(key, None)
134
+ if old_value is not None:
135
+ self._current_memory = max(0, self._current_memory - old_value.memory_size())
136
+
137
+ def _evict_oldest(self) -> None:
138
+ """Evict the least recently used entry. Must be called with lock held."""
139
+ if self._cache:
140
+ oldest_key, oldest_value = self._cache.popitem(last=False)
141
+ self._current_memory = max(0, self._current_memory - oldest_value.memory_size())
142
+ self._eviction_count += 1
143
+ logger.debug(
144
+ "[file_cache] Evicted entry due to cache limits",
145
+ extra={"evicted_path": oldest_key, "total_evictions": self._eviction_count},
146
+ )
147
+
148
+ def get(self, key: str, default: Optional[FileSnapshot] = None) -> Optional[FileSnapshot]:
149
+ """Get a snapshot, returning default if not found."""
150
+ with self._lock:
151
+ if key not in self._cache:
152
+ return default
153
+ self._cache.move_to_end(key)
154
+ return self._cache[key]
155
+
156
+ def pop(self, key: str, default: Optional[FileSnapshot] = None) -> Optional[FileSnapshot]:
157
+ """Remove and return a snapshot."""
158
+ with self._lock:
159
+ if key not in self._cache:
160
+ return default
161
+ value = self._cache.pop(key)
162
+ self._current_memory = max(0, self._current_memory - value.memory_size())
163
+ return value
164
+
165
+ def setdefault(self, key: str, default: FileSnapshot) -> FileSnapshot:
166
+ """Atomically get or set a snapshot.
167
+
168
+ If key exists, return its value (and mark as recently used).
169
+ If key doesn't exist, set it to default and return default.
170
+ This provides a thread-safe get-or-create operation.
171
+ """
172
+ with self._lock:
173
+ if key in self._cache:
174
+ self._cache.move_to_end(key)
175
+ return self._cache[key]
176
+ # Key doesn't exist - add it
177
+ new_size = default.memory_size()
178
+ # Evict if needed
179
+ while self._current_memory + new_size > self._max_memory_bytes and self._cache:
180
+ self._evict_oldest()
181
+ while len(self._cache) >= self._max_entries:
182
+ self._evict_oldest()
183
+ self._cache[key] = default
184
+ self._current_memory += new_size
185
+ return default
186
+
187
+ def clear(self) -> None:
188
+ """Remove all entries from the cache."""
189
+ with self._lock:
190
+ self._cache.clear()
191
+ self._current_memory = 0
192
+
193
+ def keys(self) -> List[str]:
194
+ """Return list of cached file paths."""
195
+ with self._lock:
196
+ return list(self._cache.keys())
197
+
198
+ def values(self) -> List[FileSnapshot]:
199
+ """Return list of cached snapshots."""
200
+ with self._lock:
201
+ return list(self._cache.values())
202
+
203
+ def items(self) -> List[Tuple[str, FileSnapshot]]:
204
+ """Return list of (path, snapshot) pairs."""
205
+ with self._lock:
206
+ return list(self._cache.items())
207
+
208
+ def __iter__(self) -> Iterator[str]:
209
+ """Iterate over keys (not thread-safe for modifications during iteration)."""
210
+ with self._lock:
211
+ return iter(list(self._cache.keys()))
212
+
213
+ def stats(self) -> Dict[str, int]:
214
+ """Return cache statistics."""
215
+ with self._lock:
216
+ return {
217
+ "entries": len(self._cache),
218
+ "max_entries": self._max_entries,
219
+ "memory_bytes": self._current_memory,
220
+ "max_memory_bytes": self._max_memory_bytes,
221
+ "eviction_count": self._eviction_count,
222
+ }
23
223
 
24
224
 
25
225
  @dataclass
@@ -30,13 +230,18 @@ class ChangedFileNotice:
30
230
  summary: str
31
231
 
32
232
 
233
+ # Type alias for cache - supports both Dict and BoundedFileCache
234
+ FileCacheType = Union[Dict[str, FileSnapshot], BoundedFileCache]
235
+
236
+
33
237
  def record_snapshot(
34
238
  file_path: str,
35
239
  content: str,
36
- cache: Dict[str, FileSnapshot],
240
+ cache: FileCacheType,
37
241
  *,
38
242
  offset: int = 0,
39
243
  limit: Optional[int] = None,
244
+ encoding: str = "utf-8",
40
245
  ) -> None:
41
246
  """Store the current contents and mtime for a file."""
42
247
  try:
@@ -44,12 +249,14 @@ def record_snapshot(
44
249
  except OSError:
45
250
  timestamp = 0.0
46
251
  cache[file_path] = FileSnapshot(
47
- content=content, timestamp=timestamp, offset=offset, limit=limit
252
+ content=content, timestamp=timestamp, offset=offset, limit=limit, encoding=encoding
48
253
  )
49
254
 
50
255
 
51
- def _read_portion(file_path: str, offset: int, limit: Optional[int]) -> str:
52
- with open(file_path, "r", encoding="utf-8", errors="replace") as handle:
256
+ def _read_portion(
257
+ file_path: str, offset: int, limit: Optional[int], encoding: str = "utf-8"
258
+ ) -> str:
259
+ with open(file_path, "r", encoding=encoding, errors="replace") as handle:
53
260
  lines = handle.readlines()
54
261
  start = max(offset, 0)
55
262
  if limit is None:
@@ -79,7 +286,7 @@ def _build_diff_summary(old_content: str, new_content: str, file_path: str, max_
79
286
 
80
287
 
81
288
  def detect_changed_files(
82
- cache: Dict[str, FileSnapshot], *, max_diff_lines: int = 80
289
+ cache: FileCacheType, *, max_diff_lines: int = 80
83
290
  ) -> List[ChangedFileNotice]:
84
291
  """Return notices for files whose mtime increased since they were read."""
85
292
  notices: List[ChangedFileNotice] = []
@@ -101,7 +308,9 @@ def detect_changed_files(
101
308
  continue
102
309
 
103
310
  try:
104
- new_content = _read_portion(file_path, snapshot.offset, snapshot.limit)
311
+ new_content = _read_portion(
312
+ file_path, snapshot.offset, snapshot.limit, snapshot.encoding
313
+ )
105
314
  except (
106
315
  OSError,
107
316
  IOError,
@@ -0,0 +1,125 @@
1
+ """Image processing utilities for Ripperdoc."""
2
+
3
+ import base64
4
+ import mimetypes
5
+ from pathlib import Path
6
+ from typing import Optional, Tuple
7
+
8
+ from ripperdoc.utils.log import get_logger
9
+
10
+ logger = get_logger()
11
+
12
+ # Supported image formats
13
+ SUPPORTED_IMAGE_EXTENSIONS = {".jpg", ".jpeg", ".png", ".gif", ".webp", ".bmp"}
14
+ SUPPORTED_IMAGE_MIME_TYPES = {"image/jpeg", "image/png", "image/gif", "image/webp", "image/bmp"}
15
+
16
+ MAX_IMAGE_SIZE_BYTES = 32 * 1024 * 1024 # 32MB
17
+
18
+
19
+ def is_image_file(file_path: Path) -> bool:
20
+ """Check if a file is a supported image format.
21
+
22
+ Args:
23
+ file_path: Path to the file
24
+
25
+ Returns:
26
+ True if the file has a supported image extension
27
+ """
28
+ return file_path.suffix.lower() in SUPPORTED_IMAGE_EXTENSIONS
29
+
30
+
31
+ def detect_mime_type(file_path: Path) -> str:
32
+ """Detect the MIME type of an image file.
33
+
34
+ Args:
35
+ file_path: Path to the image file
36
+
37
+ Returns:
38
+ MIME type string (e.g., "image/jpeg", "image/png")
39
+ """
40
+ mime_type, _ = mimetypes.guess_type(file_path)
41
+ if mime_type in SUPPORTED_IMAGE_MIME_TYPES:
42
+ return mime_type
43
+
44
+ # Fallback to extension-based detection
45
+ ext = file_path.suffix.lower()
46
+ if ext in {".jpg", ".jpeg"}:
47
+ return "image/jpeg"
48
+ if ext == ".png":
49
+ return "image/png"
50
+ if ext == ".gif":
51
+ return "image/gif"
52
+ if ext == ".webp":
53
+ return "image/webp"
54
+ if ext == ".bmp":
55
+ return "image/bmp"
56
+ return "image/jpeg" # Default fallback
57
+
58
+
59
+ def read_image_as_base64(file_path: Path) -> Optional[Tuple[str, str]]:
60
+ """Read an image file and return its base64-encoded data and MIME type.
61
+
62
+ Args:
63
+ file_path: Absolute path to the image file
64
+
65
+ Returns:
66
+ (base64_data, mime_type) tuple or None if reading fails
67
+ """
68
+ if not file_path.exists():
69
+ logger.warning(
70
+ "[image_utils] Image file not found",
71
+ extra={"path": str(file_path)},
72
+ )
73
+ return None
74
+
75
+ if not file_path.is_file():
76
+ logger.warning(
77
+ "[image_utils] Not a file",
78
+ extra={"path": str(file_path)},
79
+ )
80
+ return None
81
+
82
+ # Check file size
83
+ file_size = file_path.stat().st_size
84
+ if file_size > MAX_IMAGE_SIZE_BYTES:
85
+ logger.warning(
86
+ "[image_utils] Image too large",
87
+ extra={
88
+ "path": str(file_path),
89
+ "size_bytes": file_size,
90
+ "max_bytes": MAX_IMAGE_SIZE_BYTES,
91
+ },
92
+ )
93
+ return None
94
+
95
+ if not is_image_file(file_path):
96
+ logger.warning(
97
+ "[image_utils] Not a supported image format",
98
+ extra={"path": str(file_path)},
99
+ )
100
+ return None
101
+
102
+ try:
103
+ with open(file_path, "rb") as f:
104
+ image_bytes = f.read()
105
+
106
+ base64_data = base64.b64encode(image_bytes).decode("utf-8")
107
+ mime_type = detect_mime_type(file_path)
108
+
109
+ logger.debug(
110
+ "[image_utils] Loaded image",
111
+ extra={
112
+ "path": str(file_path),
113
+ "size_bytes": len(image_bytes),
114
+ "mime_type": mime_type,
115
+ },
116
+ )
117
+
118
+ return (base64_data, mime_type)
119
+
120
+ except (OSError, IOError) as e:
121
+ logger.error(
122
+ "[image_utils] Failed to read image",
123
+ extra={"path": str(file_path), "error": str(e)},
124
+ )
125
+ return None
ripperdoc/utils/log.py CHANGED
@@ -4,13 +4,40 @@ import json
4
4
  import logging
5
5
  import sys
6
6
  import os
7
- from datetime import datetime
7
+ from datetime import datetime, timezone
8
8
  from pathlib import Path
9
9
  from typing import Any, Optional
10
10
 
11
11
  from ripperdoc.utils.path_utils import sanitize_project_path
12
12
 
13
13
 
14
+ class SpinnerSafeStreamHandler(logging.StreamHandler):
15
+ """StreamHandler that clears the current line before ERROR/WARNING messages.
16
+
17
+ This prevents log messages from appearing after a spinner's text,
18
+ which would cause formatting issues.
19
+ """
20
+
21
+ def emit(self, record: logging.LogRecord) -> None:
22
+ """Emit a log record, clearing the line first for ERROR/WARNING."""
23
+ try:
24
+ msg = self.format(record)
25
+ stream = self.stream
26
+
27
+ # Clear the current line before ERROR/WARNING to avoid spinner interference
28
+ if record.levelno >= logging.ERROR:
29
+ # Use \r to return to start, then clear with spaces, then \r again
30
+ stream.write("\r" + " " * 100 + "\r")
31
+ elif record.levelno >= logging.WARNING:
32
+ # Also clear for WARNING
33
+ stream.write("\r" + " " * 100 + "\r")
34
+
35
+ stream.write(msg + self.terminator)
36
+ self.flush()
37
+ except Exception:
38
+ self.handleError(record)
39
+
40
+
14
41
  _LOG_RECORD_FIELDS = {
15
42
  "name",
16
43
  "msg",
@@ -42,7 +69,7 @@ class StructuredFormatter(logging.Formatter):
42
69
  """Formatter with ISO timestamps and context."""
43
70
 
44
71
  def formatTime(self, record: logging.LogRecord, datefmt: Optional[str] = None) -> str:
45
- timestamp = datetime.utcfromtimestamp(record.created)
72
+ timestamp = datetime.fromtimestamp(record.created, tz=timezone.utc)
46
73
  return timestamp.strftime("%Y-%m-%dT%H:%M:%S.%f")[:-3] + "Z"
47
74
 
48
75
  def format(self, record: logging.LogRecord) -> str:
@@ -74,7 +101,7 @@ class RipperdocLogger:
74
101
 
75
102
  # Avoid adding duplicate handlers if an existing logger is reused.
76
103
  if not self.logger.handlers:
77
- console_handler = logging.StreamHandler(sys.stderr)
104
+ console_handler = SpinnerSafeStreamHandler(sys.stderr)
78
105
  console_handler.setLevel(level)
79
106
  console_formatter = logging.Formatter("%(levelname)s: %(message)s")
80
107
  console_handler.setFormatter(console_formatter)