ripperdoc 0.2.9__py3-none-any.whl → 0.2.10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (45) hide show
  1. ripperdoc/__init__.py +1 -1
  2. ripperdoc/cli/cli.py +235 -14
  3. ripperdoc/cli/commands/__init__.py +2 -0
  4. ripperdoc/cli/commands/agents_cmd.py +132 -5
  5. ripperdoc/cli/commands/clear_cmd.py +8 -0
  6. ripperdoc/cli/commands/exit_cmd.py +1 -0
  7. ripperdoc/cli/commands/models_cmd.py +3 -3
  8. ripperdoc/cli/commands/resume_cmd.py +4 -0
  9. ripperdoc/cli/commands/stats_cmd.py +244 -0
  10. ripperdoc/cli/ui/panels.py +1 -0
  11. ripperdoc/cli/ui/rich_ui.py +295 -24
  12. ripperdoc/cli/ui/spinner.py +30 -18
  13. ripperdoc/cli/ui/thinking_spinner.py +1 -2
  14. ripperdoc/cli/ui/wizard.py +6 -8
  15. ripperdoc/core/agents.py +10 -3
  16. ripperdoc/core/config.py +3 -6
  17. ripperdoc/core/default_tools.py +90 -10
  18. ripperdoc/core/hooks/events.py +4 -0
  19. ripperdoc/core/hooks/llm_callback.py +59 -0
  20. ripperdoc/core/permissions.py +78 -4
  21. ripperdoc/core/providers/openai.py +29 -19
  22. ripperdoc/core/query.py +192 -31
  23. ripperdoc/core/tool.py +9 -4
  24. ripperdoc/sdk/client.py +77 -2
  25. ripperdoc/tools/background_shell.py +305 -134
  26. ripperdoc/tools/bash_tool.py +42 -13
  27. ripperdoc/tools/file_edit_tool.py +159 -50
  28. ripperdoc/tools/file_read_tool.py +20 -0
  29. ripperdoc/tools/file_write_tool.py +7 -8
  30. ripperdoc/tools/lsp_tool.py +615 -0
  31. ripperdoc/tools/task_tool.py +514 -65
  32. ripperdoc/utils/conversation_compaction.py +1 -1
  33. ripperdoc/utils/file_watch.py +206 -3
  34. ripperdoc/utils/lsp.py +806 -0
  35. ripperdoc/utils/message_formatting.py +5 -2
  36. ripperdoc/utils/messages.py +21 -1
  37. ripperdoc/utils/permissions/tool_permission_utils.py +174 -15
  38. ripperdoc/utils/session_heatmap.py +244 -0
  39. ripperdoc/utils/session_stats.py +293 -0
  40. {ripperdoc-0.2.9.dist-info → ripperdoc-0.2.10.dist-info}/METADATA +8 -2
  41. {ripperdoc-0.2.9.dist-info → ripperdoc-0.2.10.dist-info}/RECORD +45 -39
  42. {ripperdoc-0.2.9.dist-info → ripperdoc-0.2.10.dist-info}/WHEEL +0 -0
  43. {ripperdoc-0.2.9.dist-info → ripperdoc-0.2.10.dist-info}/entry_points.txt +0 -0
  44. {ripperdoc-0.2.9.dist-info → ripperdoc-0.2.10.dist-info}/licenses/LICENSE +0 -0
  45. {ripperdoc-0.2.9.dist-info → ripperdoc-0.2.10.dist-info}/top_level.txt +0 -0
@@ -333,7 +333,7 @@ async def summarize_conversation(
333
333
  system_prompt=system_prompt,
334
334
  tools=[],
335
335
  max_thinking_tokens=0,
336
- model="main",
336
+ model="quick",
337
337
  )
338
338
 
339
339
  result = extract_assistant_text(assistant_response)
@@ -4,13 +4,20 @@ from __future__ import annotations
4
4
 
5
5
  import difflib
6
6
  import os
7
+ import sys
8
+ import threading
9
+ from collections import OrderedDict
7
10
  from dataclasses import dataclass
8
- from typing import Dict, List, Optional
11
+ from typing import Dict, Iterator, List, Optional, Tuple
9
12
 
10
13
  from ripperdoc.utils.log import get_logger
11
14
 
12
15
  logger = get_logger()
13
16
 
17
+ # Default limits for BoundedFileCache
18
+ DEFAULT_MAX_ENTRIES = int(os.getenv("RIPPERDOC_FILE_CACHE_MAX_ENTRIES", "500"))
19
+ DEFAULT_MAX_MEMORY_MB = float(os.getenv("RIPPERDOC_FILE_CACHE_MAX_MEMORY_MB", "50"))
20
+
14
21
 
15
22
  @dataclass
16
23
  class FileSnapshot:
@@ -21,6 +28,198 @@ class FileSnapshot:
21
28
  offset: int = 0
22
29
  limit: Optional[int] = None
23
30
 
31
+ def memory_size(self) -> int:
32
+ """Estimate memory usage of this snapshot in bytes."""
33
+ # String memory: roughly 1 byte per char for ASCII, more for unicode
34
+ # Plus object overhead (~50 bytes for dataclass)
35
+ return sys.getsizeof(self.content) + 50
36
+
37
+
38
+ class BoundedFileCache:
39
+ """Thread-safe LRU cache for FileSnapshots with memory and entry limits.
40
+
41
+ This cache prevents unbounded memory growth in long sessions by:
42
+ 1. Limiting the maximum number of entries (LRU eviction)
43
+ 2. Limiting total memory usage
44
+ 3. Providing thread-safe access
45
+
46
+ Usage:
47
+ cache = BoundedFileCache(max_entries=500, max_memory_mb=50)
48
+ cache["/path/to/file"] = FileSnapshot(content="...", timestamp=123.0)
49
+ snapshot = cache.get("/path/to/file")
50
+ """
51
+
52
+ def __init__(
53
+ self,
54
+ max_entries: int = DEFAULT_MAX_ENTRIES,
55
+ max_memory_mb: float = DEFAULT_MAX_MEMORY_MB,
56
+ ) -> None:
57
+ """Initialize the bounded cache.
58
+
59
+ Args:
60
+ max_entries: Maximum number of file snapshots to keep
61
+ max_memory_mb: Maximum total memory usage in megabytes
62
+ """
63
+ self._max_entries = max(1, max_entries)
64
+ self._max_memory_bytes = int(max_memory_mb * 1024 * 1024)
65
+ self._cache: OrderedDict[str, FileSnapshot] = OrderedDict()
66
+ self._current_memory = 0
67
+ self._lock = threading.RLock()
68
+ self._eviction_count = 0
69
+
70
+ @property
71
+ def max_entries(self) -> int:
72
+ """Maximum number of entries allowed."""
73
+ return self._max_entries
74
+
75
+ @property
76
+ def max_memory_bytes(self) -> int:
77
+ """Maximum memory in bytes."""
78
+ return self._max_memory_bytes
79
+
80
+ @property
81
+ def current_memory(self) -> int:
82
+ """Current estimated memory usage in bytes."""
83
+ with self._lock:
84
+ return self._current_memory
85
+
86
+ @property
87
+ def eviction_count(self) -> int:
88
+ """Number of entries evicted due to limits."""
89
+ with self._lock:
90
+ return self._eviction_count
91
+
92
+ def __len__(self) -> int:
93
+ with self._lock:
94
+ return len(self._cache)
95
+
96
+ def __contains__(self, key: str) -> bool:
97
+ with self._lock:
98
+ return key in self._cache
99
+
100
+ def __getitem__(self, key: str) -> FileSnapshot:
101
+ with self._lock:
102
+ if key not in self._cache:
103
+ raise KeyError(key)
104
+ # Move to end (most recently used)
105
+ self._cache.move_to_end(key)
106
+ return self._cache[key]
107
+
108
+ def __setitem__(self, key: str, value: FileSnapshot) -> None:
109
+ with self._lock:
110
+ new_size = value.memory_size()
111
+
112
+ # If key exists, remove old entry first (atomic pop to avoid TOCTOU)
113
+ old_value = self._cache.pop(key, None)
114
+ if old_value is not None:
115
+ self._current_memory = max(0, self._current_memory - old_value.memory_size())
116
+
117
+ # Evict entries if needed (memory limit)
118
+ while self._current_memory + new_size > self._max_memory_bytes and self._cache:
119
+ self._evict_oldest()
120
+
121
+ # Evict entries if needed (entry limit)
122
+ while len(self._cache) >= self._max_entries:
123
+ self._evict_oldest()
124
+
125
+ # Add new entry
126
+ self._cache[key] = value
127
+ self._current_memory += new_size
128
+
129
+ def __delitem__(self, key: str) -> None:
130
+ with self._lock:
131
+ # Use atomic pop to avoid TOCTOU between check and delete
132
+ old_value = self._cache.pop(key, None)
133
+ if old_value is not None:
134
+ self._current_memory = max(0, self._current_memory - old_value.memory_size())
135
+
136
+ def _evict_oldest(self) -> None:
137
+ """Evict the least recently used entry. Must be called with lock held."""
138
+ if self._cache:
139
+ oldest_key, oldest_value = self._cache.popitem(last=False)
140
+ self._current_memory = max(0, self._current_memory - oldest_value.memory_size())
141
+ self._eviction_count += 1
142
+ logger.debug(
143
+ "[file_cache] Evicted entry due to cache limits",
144
+ extra={"evicted_path": oldest_key, "total_evictions": self._eviction_count},
145
+ )
146
+
147
+ def get(self, key: str, default: Optional[FileSnapshot] = None) -> Optional[FileSnapshot]:
148
+ """Get a snapshot, returning default if not found."""
149
+ with self._lock:
150
+ if key not in self._cache:
151
+ return default
152
+ self._cache.move_to_end(key)
153
+ return self._cache[key]
154
+
155
+ def pop(self, key: str, default: Optional[FileSnapshot] = None) -> Optional[FileSnapshot]:
156
+ """Remove and return a snapshot."""
157
+ with self._lock:
158
+ if key not in self._cache:
159
+ return default
160
+ value = self._cache.pop(key)
161
+ self._current_memory = max(0, self._current_memory - value.memory_size())
162
+ return value
163
+
164
+ def setdefault(self, key: str, default: FileSnapshot) -> FileSnapshot:
165
+ """Atomically get or set a snapshot.
166
+
167
+ If key exists, return its value (and mark as recently used).
168
+ If key doesn't exist, set it to default and return default.
169
+ This provides a thread-safe get-or-create operation.
170
+ """
171
+ with self._lock:
172
+ if key in self._cache:
173
+ self._cache.move_to_end(key)
174
+ return self._cache[key]
175
+ # Key doesn't exist - add it
176
+ new_size = default.memory_size()
177
+ # Evict if needed
178
+ while self._current_memory + new_size > self._max_memory_bytes and self._cache:
179
+ self._evict_oldest()
180
+ while len(self._cache) >= self._max_entries:
181
+ self._evict_oldest()
182
+ self._cache[key] = default
183
+ self._current_memory += new_size
184
+ return default
185
+
186
+ def clear(self) -> None:
187
+ """Remove all entries from the cache."""
188
+ with self._lock:
189
+ self._cache.clear()
190
+ self._current_memory = 0
191
+
192
+ def keys(self) -> List[str]:
193
+ """Return list of cached file paths."""
194
+ with self._lock:
195
+ return list(self._cache.keys())
196
+
197
+ def values(self) -> List[FileSnapshot]:
198
+ """Return list of cached snapshots."""
199
+ with self._lock:
200
+ return list(self._cache.values())
201
+
202
+ def items(self) -> List[Tuple[str, FileSnapshot]]:
203
+ """Return list of (path, snapshot) pairs."""
204
+ with self._lock:
205
+ return list(self._cache.items())
206
+
207
+ def __iter__(self) -> Iterator[str]:
208
+ """Iterate over keys (not thread-safe for modifications during iteration)."""
209
+ with self._lock:
210
+ return iter(list(self._cache.keys()))
211
+
212
+ def stats(self) -> Dict[str, int]:
213
+ """Return cache statistics."""
214
+ with self._lock:
215
+ return {
216
+ "entries": len(self._cache),
217
+ "max_entries": self._max_entries,
218
+ "memory_bytes": self._current_memory,
219
+ "max_memory_bytes": self._max_memory_bytes,
220
+ "eviction_count": self._eviction_count,
221
+ }
222
+
24
223
 
25
224
  @dataclass
26
225
  class ChangedFileNotice:
@@ -30,10 +229,14 @@ class ChangedFileNotice:
30
229
  summary: str
31
230
 
32
231
 
232
+ # Type alias for cache - supports both Dict and BoundedFileCache
233
+ FileCacheType = Dict[str, FileSnapshot] | BoundedFileCache
234
+
235
+
33
236
  def record_snapshot(
34
237
  file_path: str,
35
238
  content: str,
36
- cache: Dict[str, FileSnapshot],
239
+ cache: FileCacheType,
37
240
  *,
38
241
  offset: int = 0,
39
242
  limit: Optional[int] = None,
@@ -79,7 +282,7 @@ def _build_diff_summary(old_content: str, new_content: str, file_path: str, max_
79
282
 
80
283
 
81
284
  def detect_changed_files(
82
- cache: Dict[str, FileSnapshot], *, max_diff_lines: int = 80
285
+ cache: FileCacheType, *, max_diff_lines: int = 80
83
286
  ) -> List[ChangedFileNotice]:
84
287
  """Return notices for files whose mtime increased since they were read."""
85
288
  notices: List[ChangedFileNotice] = []