ripperdoc 0.2.9__py3-none-any.whl → 0.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (76) hide show
  1. ripperdoc/__init__.py +1 -1
  2. ripperdoc/cli/cli.py +379 -51
  3. ripperdoc/cli/commands/__init__.py +6 -0
  4. ripperdoc/cli/commands/agents_cmd.py +128 -5
  5. ripperdoc/cli/commands/clear_cmd.py +8 -0
  6. ripperdoc/cli/commands/doctor_cmd.py +29 -0
  7. ripperdoc/cli/commands/exit_cmd.py +1 -0
  8. ripperdoc/cli/commands/memory_cmd.py +2 -1
  9. ripperdoc/cli/commands/models_cmd.py +63 -7
  10. ripperdoc/cli/commands/resume_cmd.py +5 -0
  11. ripperdoc/cli/commands/skills_cmd.py +103 -0
  12. ripperdoc/cli/commands/stats_cmd.py +244 -0
  13. ripperdoc/cli/commands/status_cmd.py +10 -0
  14. ripperdoc/cli/commands/tasks_cmd.py +6 -3
  15. ripperdoc/cli/commands/themes_cmd.py +139 -0
  16. ripperdoc/cli/ui/file_mention_completer.py +63 -13
  17. ripperdoc/cli/ui/helpers.py +6 -3
  18. ripperdoc/cli/ui/interrupt_handler.py +34 -0
  19. ripperdoc/cli/ui/panels.py +14 -8
  20. ripperdoc/cli/ui/rich_ui.py +737 -47
  21. ripperdoc/cli/ui/spinner.py +93 -18
  22. ripperdoc/cli/ui/thinking_spinner.py +1 -2
  23. ripperdoc/cli/ui/tool_renderers.py +10 -9
  24. ripperdoc/cli/ui/wizard.py +24 -19
  25. ripperdoc/core/agents.py +14 -3
  26. ripperdoc/core/config.py +238 -6
  27. ripperdoc/core/default_tools.py +91 -10
  28. ripperdoc/core/hooks/events.py +4 -0
  29. ripperdoc/core/hooks/llm_callback.py +58 -0
  30. ripperdoc/core/hooks/manager.py +6 -0
  31. ripperdoc/core/permissions.py +160 -9
  32. ripperdoc/core/providers/openai.py +84 -28
  33. ripperdoc/core/query.py +489 -87
  34. ripperdoc/core/query_utils.py +17 -14
  35. ripperdoc/core/skills.py +1 -0
  36. ripperdoc/core/theme.py +298 -0
  37. ripperdoc/core/tool.py +15 -5
  38. ripperdoc/protocol/__init__.py +14 -0
  39. ripperdoc/protocol/models.py +300 -0
  40. ripperdoc/protocol/stdio.py +1453 -0
  41. ripperdoc/tools/background_shell.py +354 -139
  42. ripperdoc/tools/bash_tool.py +117 -22
  43. ripperdoc/tools/file_edit_tool.py +228 -50
  44. ripperdoc/tools/file_read_tool.py +154 -3
  45. ripperdoc/tools/file_write_tool.py +53 -11
  46. ripperdoc/tools/grep_tool.py +98 -8
  47. ripperdoc/tools/lsp_tool.py +609 -0
  48. ripperdoc/tools/multi_edit_tool.py +26 -3
  49. ripperdoc/tools/skill_tool.py +52 -1
  50. ripperdoc/tools/task_tool.py +539 -65
  51. ripperdoc/utils/conversation_compaction.py +1 -1
  52. ripperdoc/utils/file_watch.py +216 -7
  53. ripperdoc/utils/image_utils.py +125 -0
  54. ripperdoc/utils/log.py +30 -3
  55. ripperdoc/utils/lsp.py +812 -0
  56. ripperdoc/utils/mcp.py +80 -18
  57. ripperdoc/utils/message_formatting.py +7 -4
  58. ripperdoc/utils/messages.py +198 -33
  59. ripperdoc/utils/pending_messages.py +50 -0
  60. ripperdoc/utils/permissions/shell_command_validation.py +3 -3
  61. ripperdoc/utils/permissions/tool_permission_utils.py +180 -15
  62. ripperdoc/utils/platform.py +198 -0
  63. ripperdoc/utils/session_heatmap.py +242 -0
  64. ripperdoc/utils/session_history.py +2 -2
  65. ripperdoc/utils/session_stats.py +294 -0
  66. ripperdoc/utils/shell_utils.py +8 -5
  67. ripperdoc/utils/todo.py +0 -6
  68. {ripperdoc-0.2.9.dist-info → ripperdoc-0.3.0.dist-info}/METADATA +55 -17
  69. ripperdoc-0.3.0.dist-info/RECORD +136 -0
  70. {ripperdoc-0.2.9.dist-info → ripperdoc-0.3.0.dist-info}/WHEEL +1 -1
  71. ripperdoc/sdk/__init__.py +0 -9
  72. ripperdoc/sdk/client.py +0 -333
  73. ripperdoc-0.2.9.dist-info/RECORD +0 -123
  74. {ripperdoc-0.2.9.dist-info → ripperdoc-0.3.0.dist-info}/entry_points.txt +0 -0
  75. {ripperdoc-0.2.9.dist-info → ripperdoc-0.3.0.dist-info}/licenses/LICENSE +0 -0
  76. {ripperdoc-0.2.9.dist-info → ripperdoc-0.3.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,242 @@
1
+ """Activity heatmap visualization for session statistics."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from datetime import datetime, timedelta
6
+ from typing import Dict
7
+
8
+ from rich.console import Console
9
+ from rich.text import Text
10
+
11
+
12
+ def _get_intensity_char(count: int, max_count: int) -> str:
13
+ """Get unicode block character based on activity intensity.
14
+
15
+ Returns character and color code for 8 intensity levels.
16
+ """
17
+ if count == 0:
18
+ return "·"
19
+ if max_count == 0:
20
+ return "█"
21
+
22
+ # Calculate intensity (0-1)
23
+ intensity = count / max_count
24
+
25
+ # Use unicode block characters for different intensities (8 levels)
26
+ if intensity <= 0.125:
27
+ return "░"
28
+ elif intensity <= 0.25:
29
+ return "░"
30
+ elif intensity <= 0.375:
31
+ return "▒"
32
+ elif intensity <= 0.5:
33
+ return "▒"
34
+ elif intensity <= 0.625:
35
+ return "▓"
36
+ elif intensity <= 0.75:
37
+ return "▓"
38
+ elif intensity <= 0.875:
39
+ return "█"
40
+ else:
41
+ return "█"
42
+
43
+
44
+ def _get_intensity_color(count: int, max_count: int) -> str:
45
+ """Get color style based on activity intensity (8 levels)."""
46
+ if count == 0:
47
+ return "dim white"
48
+ if max_count == 0:
49
+ return "bold color(46)"
50
+
51
+ # Calculate intensity (0-1)
52
+ intensity = count / max_count
53
+
54
+ # 8-level green gradient from very light to very dark
55
+ if intensity <= 0.125:
56
+ return "color(22)" # Very light green
57
+ elif intensity <= 0.25:
58
+ return "color(28)" # Light green
59
+ elif intensity <= 0.375:
60
+ return "color(34)" # Light-medium green
61
+ elif intensity <= 0.5:
62
+ return "color(40)" # Medium green
63
+ elif intensity <= 0.625:
64
+ return "color(46)" # Medium-dark green
65
+ elif intensity <= 0.75:
66
+ return "color(82)" # Dark green
67
+ elif intensity <= 0.875:
68
+ return "bold color(46)" # Bright green with bold
69
+ else:
70
+ return "bold color(82)" # Very dark green with bold
71
+
72
+
73
+ def _get_week_grid(
74
+ daily_activity: Dict[str, int], weeks_count: int = 52
75
+ ) -> tuple[list[list[tuple[str, int]]], int]:
76
+ """Build a grid of weeks for heatmap display.
77
+
78
+ Args:
79
+ daily_activity: Dictionary mapping date strings to activity counts
80
+ weeks_count: Number of weeks to display (default 52 for full year)
81
+
82
+ Returns:
83
+ Tuple of (grid, max_count) where grid is a list of weeks,
84
+ each week is a list of (date_str, count) tuples.
85
+ """
86
+ # Calculate total days to display
87
+ total_days = weeks_count * 7
88
+
89
+ # Start from today and go back
90
+ end_date = datetime.now().date()
91
+ start_date = end_date - timedelta(days=total_days - 1)
92
+
93
+ # Find max count for intensity calculation
94
+ max_count = max(daily_activity.values()) if daily_activity else 1
95
+
96
+ # Build grid by weeks (Sunday to Saturday)
97
+ weeks: list[list[tuple[str, int]]] = []
98
+ current_week: list[tuple[str, int]] = []
99
+
100
+ current_date = start_date
101
+ # Pad the start to align with week start (Sunday = 6 in Python's weekday, we want 0)
102
+ weekday = (current_date.weekday() + 1) % 7 # Convert to Sunday=0
103
+ if weekday > 0:
104
+ # Add empty days at the start
105
+ for _ in range(weekday):
106
+ current_week.append(("", 0))
107
+
108
+ while current_date <= end_date:
109
+ date_str = current_date.isoformat()
110
+ count = daily_activity.get(date_str, 0)
111
+ current_week.append((date_str, count))
112
+
113
+ # If we've completed a week (7 days), start a new week
114
+ if len(current_week) == 7:
115
+ weeks.append(current_week)
116
+ current_week = []
117
+
118
+ current_date += timedelta(days=1)
119
+
120
+ # Add remaining days
121
+ if current_week:
122
+ # Pad to complete the week
123
+ while len(current_week) < 7:
124
+ current_week.append(("", 0))
125
+ weeks.append(current_week)
126
+
127
+ return weeks, max_count
128
+
129
+
130
+ def render_heatmap(console: Console, daily_activity: Dict[str, int], weeks_count: int = 52) -> None:
131
+ """Render activity heatmap to console.
132
+
133
+ Args:
134
+ console: Rich console for output
135
+ daily_activity: Dictionary mapping date strings to activity counts
136
+ weeks_count: Number of weeks to display (default 52 for full year)
137
+ """
138
+ # Alignment constant: width of weekday labels column
139
+ WEEKDAY_LABEL_WIDTH = 8
140
+
141
+ weeks, max_count = _get_week_grid(daily_activity, weeks_count)
142
+ if not weeks:
143
+ console.print("[dim]No activity data[/dim]")
144
+ return
145
+
146
+ # Build month labels row
147
+ # Each week column is exactly 1 character wide in the heatmap
148
+ # Month labels are 3 characters wide (e.g., "Dec", "Jan", "Feb")
149
+ month_positions = [] # List of (week_idx, month_name) tuples
150
+ current_month = None
151
+
152
+ for week_idx, week in enumerate(weeks):
153
+ # Check first non-empty day in week for month
154
+ for date_str, _ in week:
155
+ if date_str:
156
+ date = datetime.fromisoformat(date_str).date()
157
+ month_str = date.strftime("%b")
158
+
159
+ # Record position when month changes
160
+ if month_str != current_month:
161
+ month_positions.append((week_idx, month_str))
162
+ current_month = month_str
163
+ break
164
+
165
+ # Build month label string with precise alignment
166
+ month_chars = [" "] * len(weeks)
167
+
168
+ for week_idx, month_name in month_positions:
169
+ # Check if we have space for the full month name (3 chars)
170
+ can_place = True
171
+ for i in range(3):
172
+ if week_idx + i < len(month_chars) and month_chars[week_idx + i] != " ":
173
+ can_place = False
174
+ break
175
+
176
+ if can_place and week_idx < len(month_chars):
177
+ # Place the month name starting at this week position
178
+ for i, char in enumerate(month_name):
179
+ if week_idx + i < len(month_chars):
180
+ month_chars[week_idx + i] = char
181
+
182
+ # Print month labels with proper alignment
183
+ month_row = " " * WEEKDAY_LABEL_WIDTH + "".join(month_chars)
184
+ console.print(month_row)
185
+
186
+ # Weekday labels - show Mon, Wed, Fri
187
+ weekday_labels = ["Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"]
188
+ show_weekdays = [1, 3, 5] # Show Mon, Wed, Fri
189
+
190
+ # Render each row (weekday)
191
+ for weekday_idx in range(7):
192
+ row = Text()
193
+
194
+ # Add weekday label - exactly WEEKDAY_LABEL_WIDTH characters
195
+ if weekday_idx in show_weekdays:
196
+ label = weekday_labels[weekday_idx]
197
+ # Right-align the label within the width, then add a space
198
+ row.append(f"{label:>3} ", style="dim") # "Mon " = 4 chars
199
+ row.append(" " * (WEEKDAY_LABEL_WIDTH - 4)) # Fill remaining space
200
+ else:
201
+ row.append(" " * WEEKDAY_LABEL_WIDTH)
202
+
203
+ # Add activity cells for this weekday across all weeks
204
+ # Each week column is exactly 1 character wide
205
+ for week in weeks:
206
+ if weekday_idx < len(week):
207
+ date_str, count = week[weekday_idx]
208
+ if date_str:
209
+ char = _get_intensity_char(count, max_count)
210
+ color = _get_intensity_color(count, max_count)
211
+ row.append(char, style=color)
212
+ else:
213
+ row.append("·", style="dim white")
214
+ else:
215
+ row.append(" ")
216
+
217
+ console.print(row)
218
+
219
+ # Legend with 8-level green gradient
220
+ console.print()
221
+ legend = Text(" " * WEEKDAY_LABEL_WIDTH + "Less ", style="dim")
222
+ # Show representative levels from the 8-level gradient
223
+ legend.append("░", style="color(22)") # Level 1: Very light green
224
+ legend.append(" ", style="dim")
225
+ legend.append("░", style="color(28)") # Level 2: Light green
226
+ legend.append(" ", style="dim")
227
+ legend.append("▒", style="color(34)") # Level 3: Light-medium green
228
+ legend.append(" ", style="dim")
229
+ legend.append("▒", style="color(40)") # Level 4: Medium green
230
+ legend.append(" ", style="dim")
231
+ legend.append("▓", style="color(46)") # Level 5: Medium-dark green
232
+ legend.append(" ", style="dim")
233
+ legend.append("▓", style="color(82)") # Level 6: Dark green
234
+ legend.append(" ", style="dim")
235
+ legend.append("█", style="bold color(46)") # Level 7: Bright green
236
+ legend.append(" ", style="dim")
237
+ legend.append("█", style="bold color(82)") # Level 8: Very dark green
238
+ legend.append(" More", style="dim")
239
+ console.print(legend)
240
+
241
+
242
+ __all__ = ["render_heatmap"]
@@ -6,7 +6,7 @@ import json
6
6
  from dataclasses import dataclass
7
7
  from datetime import datetime
8
8
  from pathlib import Path
9
- from typing import List, Optional
9
+ from typing import List, Optional, Union
10
10
 
11
11
  from ripperdoc.utils.log import get_logger
12
12
  from ripperdoc.utils.messages import (
@@ -19,7 +19,7 @@ from ripperdoc.utils.path_utils import project_storage_dir
19
19
 
20
20
  logger = get_logger()
21
21
 
22
- ConversationMessage = UserMessage | AssistantMessage | ProgressMessage
22
+ ConversationMessage = Union[UserMessage, AssistantMessage, ProgressMessage]
23
23
 
24
24
 
25
25
  @dataclass
@@ -0,0 +1,294 @@
1
+ """Session statistics collection and analysis."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from collections import defaultdict
6
+ from dataclasses import dataclass, field
7
+ from datetime import datetime, timedelta
8
+ from pathlib import Path
9
+ from typing import Dict, List, Tuple
10
+
11
+ from ripperdoc.utils.log import get_logger
12
+ from ripperdoc.utils.session_history import list_session_summaries
13
+
14
+ logger = get_logger()
15
+
16
+
17
+ @dataclass
18
+ class SessionStats:
19
+ """Aggregated statistics across all sessions."""
20
+
21
+ # Basic counts
22
+ total_sessions: int = 0
23
+ total_messages: int = 0
24
+ total_cost_usd: float = 0.0
25
+
26
+ # Token statistics
27
+ total_tokens: int = 0
28
+ total_input_tokens: int = 0
29
+ total_output_tokens: int = 0
30
+ total_cache_read_tokens: int = 0
31
+ total_cache_creation_tokens: int = 0
32
+
33
+ # Model statistics (model -> count)
34
+ model_usage: Dict[str, int] = field(default_factory=dict)
35
+ favorite_model: str = ""
36
+
37
+ # Time statistics
38
+ longest_session_duration: timedelta = field(default_factory=lambda: timedelta(0))
39
+ earliest_session: datetime | None = None
40
+ latest_session: datetime | None = None
41
+
42
+ # Streak statistics
43
+ current_streak: int = 0
44
+ longest_streak: int = 0
45
+ active_days: int = 0
46
+ total_days: int = 0
47
+
48
+ # Activity patterns (hour -> count)
49
+ hourly_activity: Dict[int, int] = field(default_factory=dict)
50
+
51
+ # Daily activity (date_str -> count)
52
+ daily_activity: Dict[str, int] = field(default_factory=dict)
53
+
54
+ # Weekday activity (0=Monday -> 6=Sunday, count)
55
+ weekday_activity: Dict[int, int] = field(default_factory=dict)
56
+
57
+ # Peak hour (hour with most activity)
58
+ peak_hour: int = 0
59
+
60
+
61
+ def _calculate_streaks(active_dates: List[datetime]) -> Tuple[int, int]:
62
+ """Calculate current and longest streak from sorted active dates."""
63
+ if not active_dates:
64
+ return 0, 0
65
+
66
+ # Sort dates
67
+ sorted_dates = sorted(set(d.date() for d in active_dates))
68
+
69
+ # Calculate longest streak
70
+ longest = 1
71
+ current = 1
72
+ for i in range(1, len(sorted_dates)):
73
+ if (sorted_dates[i] - sorted_dates[i - 1]).days == 1:
74
+ current += 1
75
+ longest = max(longest, current)
76
+ else:
77
+ current = 1
78
+
79
+ # Calculate current streak (from today backwards)
80
+ today = datetime.now().date()
81
+ current_streak = 0
82
+
83
+ # Check if today or yesterday has activity
84
+ if sorted_dates[-1] == today:
85
+ current_streak = 1
86
+ check_date = today - timedelta(days=1)
87
+ elif sorted_dates[-1] == today - timedelta(days=1):
88
+ current_streak = 1
89
+ check_date = sorted_dates[-1] - timedelta(days=1)
90
+ else:
91
+ return 0, longest
92
+
93
+ # Count backwards
94
+ i = len(sorted_dates) - 2
95
+ while i >= 0:
96
+ if sorted_dates[i] == check_date:
97
+ current_streak += 1
98
+ check_date -= timedelta(days=1)
99
+ i -= 1
100
+ else:
101
+ break
102
+
103
+ return current_streak, longest
104
+
105
+
106
+ def collect_session_stats(project_path: Path, days: int = 32) -> SessionStats:
107
+ """Collect statistics from session history.
108
+
109
+ Args:
110
+ project_path: Project root directory
111
+ days: Number of days to look back (default 32)
112
+ """
113
+ stats = SessionStats(
114
+ hourly_activity=defaultdict(int),
115
+ daily_activity=defaultdict(int),
116
+ weekday_activity=defaultdict(int),
117
+ model_usage=defaultdict(int),
118
+ )
119
+
120
+ summaries = list_session_summaries(project_path)
121
+ if not summaries:
122
+ return stats
123
+
124
+ # Filter by date range (use timezone-aware cutoff if needed)
125
+ from datetime import timezone
126
+
127
+ cutoff = datetime.now(timezone.utc) - timedelta(days=days)
128
+
129
+ # Ensure comparison works with both naive and aware datetimes
130
+ recent_summaries = []
131
+ for s in summaries:
132
+ # Make updated_at timezone-aware if it's naive
133
+ updated_at = s.updated_at
134
+ if updated_at.tzinfo is None:
135
+ updated_at = updated_at.replace(tzinfo=timezone.utc)
136
+ if updated_at >= cutoff:
137
+ recent_summaries.append(s)
138
+
139
+ if not recent_summaries:
140
+ return stats
141
+
142
+ # Basic counts
143
+ stats.total_sessions = len(recent_summaries)
144
+ stats.total_messages = sum(s.message_count for s in recent_summaries)
145
+
146
+ # Time statistics
147
+ stats.earliest_session = min(s.created_at for s in recent_summaries)
148
+ stats.latest_session = max(s.updated_at for s in recent_summaries)
149
+ stats.total_days = (stats.latest_session - stats.earliest_session).days + 1
150
+
151
+ # Calculate longest session and activity patterns in single pass
152
+ active_dates: List[datetime] = []
153
+ date_set: set[str] = set()
154
+
155
+ for summary in recent_summaries:
156
+ # Longest session
157
+ duration = summary.updated_at - summary.created_at
158
+ if duration > stats.longest_session_duration:
159
+ stats.longest_session_duration = duration
160
+
161
+ # Track dates
162
+ date_str = summary.updated_at.date().isoformat()
163
+ if date_str not in date_set:
164
+ date_set.add(date_str)
165
+ active_dates.append(summary.updated_at)
166
+
167
+ # Hourly activity
168
+ stats.hourly_activity[summary.updated_at.hour] += 1
169
+
170
+ # Daily activity (for heatmap)
171
+ stats.daily_activity[date_str] += 1
172
+
173
+ # Weekday activity
174
+ stats.weekday_activity[summary.updated_at.weekday()] += 1
175
+
176
+ # Active days
177
+ stats.active_days = len(date_set)
178
+
179
+ # Streaks
180
+ stats.current_streak, stats.longest_streak = _calculate_streaks(active_dates)
181
+
182
+ # Peak hour
183
+ if stats.hourly_activity:
184
+ stats.peak_hour = max(stats.hourly_activity.items(), key=lambda x: x[1])[0]
185
+
186
+ # Load detailed session data for token and model statistics
187
+ import json
188
+
189
+ for summary in recent_summaries:
190
+ session_file = summary.path
191
+ if not session_file.exists():
192
+ continue
193
+
194
+ try:
195
+ with session_file.open("r", encoding="utf-8") as fh:
196
+ for line in fh:
197
+ if not line.strip():
198
+ continue
199
+ # Quick string check before full JSON parse
200
+ if '"type":"assistant"' not in line and '"type": "assistant"' not in line:
201
+ continue
202
+ try:
203
+ entry = json.loads(line)
204
+ payload = entry.get("payload", {})
205
+
206
+ # Only process assistant messages
207
+ if payload.get("type") != "assistant":
208
+ continue
209
+
210
+ # Extract model and token information
211
+ model = payload.get("model")
212
+ if model:
213
+ stats.model_usage[model] += 1
214
+
215
+ # Extract token counts
216
+ input_tokens = payload.get("input_tokens", 0)
217
+ output_tokens = payload.get("output_tokens", 0)
218
+ cache_read = payload.get("cache_read_tokens", 0)
219
+ cache_creation = payload.get("cache_creation_tokens", 0)
220
+
221
+ stats.total_input_tokens += input_tokens
222
+ stats.total_output_tokens += output_tokens
223
+ stats.total_cache_read_tokens += cache_read
224
+ stats.total_cache_creation_tokens += cache_creation
225
+
226
+ # Extract cost
227
+ cost = payload.get("cost_usd", 0.0)
228
+ stats.total_cost_usd += cost
229
+
230
+ except (json.JSONDecodeError, KeyError, TypeError, ValueError):
231
+ continue
232
+ except (OSError, IOError):
233
+ continue
234
+
235
+ # Calculate total tokens
236
+ stats.total_tokens = (
237
+ stats.total_input_tokens
238
+ + stats.total_output_tokens
239
+ + stats.total_cache_read_tokens
240
+ + stats.total_cache_creation_tokens
241
+ )
242
+
243
+ # Determine favorite model
244
+ if stats.model_usage:
245
+ stats.favorite_model = max(stats.model_usage.items(), key=lambda x: x[1])[0]
246
+
247
+ return stats
248
+
249
+
250
+ def format_duration(td: timedelta) -> str:
251
+ """Format timedelta as human-readable string."""
252
+ total_seconds = int(td.total_seconds())
253
+ if total_seconds < 60:
254
+ return f"{total_seconds}s"
255
+
256
+ minutes = total_seconds // 60
257
+ if minutes < 60:
258
+ seconds = total_seconds % 60
259
+ return f"{minutes}m {seconds}s"
260
+
261
+ hours = minutes // 60
262
+ remaining_mins = minutes % 60
263
+ if hours < 24:
264
+ return f"{hours}h {remaining_mins}m"
265
+
266
+ days = hours // 24
267
+ remaining_hours = hours % 24
268
+ return f"{days}d {remaining_hours}h {remaining_mins}m"
269
+
270
+
271
+ def format_large_number(num: int) -> str:
272
+ """Format large numbers with k/m/b suffix.
273
+
274
+ Examples:
275
+ 1234 -> "1.2k"
276
+ 1234567 -> "1.2m"
277
+ 1234567890 -> "1.2b"
278
+ """
279
+ if num < 1000:
280
+ return str(num)
281
+ elif num < 1_000_000:
282
+ return f"{num / 1000:.1f}k"
283
+ elif num < 1_000_000_000:
284
+ return f"{num / 1_000_000:.1f}m"
285
+ else:
286
+ return f"{num / 1_000_000_000:.1f}b"
287
+
288
+
289
+ __all__ = [
290
+ "SessionStats",
291
+ "collect_session_stats",
292
+ "format_duration",
293
+ "format_large_number",
294
+ ]
@@ -9,15 +9,16 @@ from __future__ import annotations
9
9
 
10
10
  import os
11
11
  import shutil
12
+ from pathlib import PureWindowsPath
12
13
  from typing import Iterable, List
13
14
 
14
15
  from ripperdoc.utils.log import get_logger
16
+ from ripperdoc.utils.platform import is_windows
15
17
 
16
18
  logger = get_logger()
17
19
 
18
20
  # Common locations to probe if shutil.which misses an otherwise standard path.
19
21
  _COMMON_BIN_DIRS: tuple[str, ...] = ("/bin", "/usr/bin", "/usr/local/bin", "/opt/homebrew/bin")
20
- _IS_WINDOWS = os.name == "nt"
21
22
 
22
23
 
23
24
  def _is_executable(path: str) -> bool:
@@ -93,7 +94,7 @@ def find_suitable_shell() -> str:
93
94
  current_is_bash = "bash" in current_shell
94
95
  current_is_zsh = "zsh" in current_shell
95
96
 
96
- if not _IS_WINDOWS:
97
+ if not is_windows():
97
98
  if (current_is_bash or current_is_zsh) and _is_executable(current_shell):
98
99
  logger.debug("Using SHELL from environment: %s", current_shell)
99
100
  return current_shell
@@ -149,9 +150,11 @@ def build_shell_command(shell_path: str, command: str) -> List[str]:
149
150
  For bash/zsh (including Git Bash), use -lc to run as login shell.
150
151
  For cmd.exe fallback, use /d /s /c.
151
152
  """
152
-
153
- lower = shell_path.lower()
154
- if lower.endswith("cmd.exe") or lower.endswith("\\cmd"):
153
+ # Use PureWindowsPath to correctly extract the shell name from the path.
154
+ # This handles both Windows-style (C:\\Windows\\System32\\cmd.exe) and Unix-style
155
+ # (/usr/bin/bash) paths, as well as simple names (cmd, bash) on any platform.
156
+ shell_name = PureWindowsPath(shell_path).name.lower()
157
+ if shell_name in ("cmd", "cmd.exe"):
155
158
  return [shell_path, "/d", "/s", "/c", command]
156
159
  return [shell_path, "-lc", command]
157
160
 
ripperdoc/utils/todo.py CHANGED
@@ -185,12 +185,6 @@ def format_todo_summary(todos: Sequence[TodoItem]) -> str:
185
185
  f"{stats['by_status']['completed']} completed)."
186
186
  )
187
187
 
188
- next_item = get_next_actionable(todos)
189
- if next_item:
190
- summary += f" Next to tackle: {next_item.content} (id: {next_item.id}, status: {next_item.status})."
191
- elif stats["total"] == 0:
192
- summary += " No todos stored yet."
193
-
194
188
  return summary
195
189
 
196
190