ripperdoc 0.2.10__py3-none-any.whl → 0.3.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ripperdoc/__init__.py +1 -1
- ripperdoc/cli/cli.py +164 -57
- ripperdoc/cli/commands/__init__.py +4 -0
- ripperdoc/cli/commands/agents_cmd.py +3 -7
- ripperdoc/cli/commands/doctor_cmd.py +29 -0
- ripperdoc/cli/commands/memory_cmd.py +2 -1
- ripperdoc/cli/commands/models_cmd.py +61 -5
- ripperdoc/cli/commands/resume_cmd.py +1 -0
- ripperdoc/cli/commands/skills_cmd.py +103 -0
- ripperdoc/cli/commands/stats_cmd.py +4 -4
- ripperdoc/cli/commands/status_cmd.py +10 -0
- ripperdoc/cli/commands/tasks_cmd.py +6 -3
- ripperdoc/cli/commands/themes_cmd.py +139 -0
- ripperdoc/cli/ui/file_mention_completer.py +63 -13
- ripperdoc/cli/ui/helpers.py +6 -3
- ripperdoc/cli/ui/interrupt_listener.py +233 -0
- ripperdoc/cli/ui/message_display.py +7 -0
- ripperdoc/cli/ui/panels.py +13 -8
- ripperdoc/cli/ui/rich_ui.py +513 -84
- ripperdoc/cli/ui/spinner.py +68 -5
- ripperdoc/cli/ui/tool_renderers.py +10 -9
- ripperdoc/cli/ui/wizard.py +18 -11
- ripperdoc/core/agents.py +4 -0
- ripperdoc/core/config.py +235 -0
- ripperdoc/core/default_tools.py +1 -0
- ripperdoc/core/hooks/llm_callback.py +0 -1
- ripperdoc/core/hooks/manager.py +6 -0
- ripperdoc/core/permissions.py +123 -39
- ripperdoc/core/providers/openai.py +55 -9
- ripperdoc/core/query.py +349 -108
- ripperdoc/core/query_utils.py +17 -14
- ripperdoc/core/skills.py +1 -0
- ripperdoc/core/theme.py +298 -0
- ripperdoc/core/tool.py +8 -3
- ripperdoc/protocol/__init__.py +14 -0
- ripperdoc/protocol/models.py +300 -0
- ripperdoc/protocol/stdio.py +1453 -0
- ripperdoc/tools/background_shell.py +49 -5
- ripperdoc/tools/bash_tool.py +75 -9
- ripperdoc/tools/file_edit_tool.py +98 -29
- ripperdoc/tools/file_read_tool.py +139 -8
- ripperdoc/tools/file_write_tool.py +46 -3
- ripperdoc/tools/grep_tool.py +98 -8
- ripperdoc/tools/lsp_tool.py +9 -15
- ripperdoc/tools/multi_edit_tool.py +26 -3
- ripperdoc/tools/skill_tool.py +52 -1
- ripperdoc/tools/task_tool.py +33 -8
- ripperdoc/utils/file_watch.py +12 -6
- ripperdoc/utils/image_utils.py +125 -0
- ripperdoc/utils/log.py +30 -3
- ripperdoc/utils/lsp.py +9 -3
- ripperdoc/utils/mcp.py +80 -18
- ripperdoc/utils/message_formatting.py +2 -2
- ripperdoc/utils/messages.py +177 -32
- ripperdoc/utils/pending_messages.py +50 -0
- ripperdoc/utils/permissions/shell_command_validation.py +3 -3
- ripperdoc/utils/permissions/tool_permission_utils.py +9 -3
- ripperdoc/utils/platform.py +198 -0
- ripperdoc/utils/session_heatmap.py +1 -3
- ripperdoc/utils/session_history.py +2 -2
- ripperdoc/utils/session_stats.py +1 -0
- ripperdoc/utils/shell_utils.py +8 -5
- ripperdoc/utils/todo.py +0 -6
- {ripperdoc-0.2.10.dist-info → ripperdoc-0.3.1.dist-info}/METADATA +49 -17
- ripperdoc-0.3.1.dist-info/RECORD +136 -0
- {ripperdoc-0.2.10.dist-info → ripperdoc-0.3.1.dist-info}/WHEEL +1 -1
- ripperdoc/cli/ui/interrupt_handler.py +0 -174
- ripperdoc/sdk/__init__.py +0 -9
- ripperdoc/sdk/client.py +0 -408
- ripperdoc-0.2.10.dist-info/RECORD +0 -129
- {ripperdoc-0.2.10.dist-info → ripperdoc-0.3.1.dist-info}/entry_points.txt +0 -0
- {ripperdoc-0.2.10.dist-info → ripperdoc-0.3.1.dist-info}/licenses/LICENSE +0 -0
- {ripperdoc-0.2.10.dist-info → ripperdoc-0.3.1.dist-info}/top_level.txt +0 -0
ripperdoc/utils/messages.py
CHANGED
|
@@ -5,7 +5,7 @@ for communication with AI models.
|
|
|
5
5
|
"""
|
|
6
6
|
|
|
7
7
|
from typing import Any, Dict, List, Optional, Union
|
|
8
|
-
from pydantic import BaseModel, ConfigDict, Field
|
|
8
|
+
from pydantic import BaseModel, ConfigDict, Field, field_validator
|
|
9
9
|
from uuid import uuid4
|
|
10
10
|
from enum import Enum
|
|
11
11
|
from ripperdoc.utils.log import get_logger
|
|
@@ -35,6 +35,23 @@ class MessageContent(BaseModel):
|
|
|
35
35
|
name: Optional[str] = None
|
|
36
36
|
input: Optional[Dict[str, object]] = None
|
|
37
37
|
is_error: Optional[bool] = None
|
|
38
|
+
# Image/vision content fields
|
|
39
|
+
source_type: Optional[str] = None # "base64", "url", "file"
|
|
40
|
+
media_type: Optional[str] = None # "image/jpeg", "image/png", etc.
|
|
41
|
+
image_data: Optional[str] = None # base64-encoded image data or URL
|
|
42
|
+
|
|
43
|
+
@field_validator("input", mode="before")
|
|
44
|
+
@classmethod
|
|
45
|
+
def validate_input(cls, v):
|
|
46
|
+
"""Ensure input is always a dict, never a Pydantic model."""
|
|
47
|
+
if v is not None and not isinstance(v, dict):
|
|
48
|
+
if hasattr(v, "model_dump"):
|
|
49
|
+
v = v.model_dump()
|
|
50
|
+
elif hasattr(v, "dict"):
|
|
51
|
+
v = v.dict()
|
|
52
|
+
else:
|
|
53
|
+
v = {"value": str(v)}
|
|
54
|
+
return v
|
|
38
55
|
|
|
39
56
|
|
|
40
57
|
def _content_block_to_api(block: MessageContent) -> Dict[str, Any]:
|
|
@@ -53,11 +70,19 @@ def _content_block_to_api(block: MessageContent) -> Dict[str, Any]:
|
|
|
53
70
|
"signature": getattr(block, "signature", None),
|
|
54
71
|
}
|
|
55
72
|
if block_type == "tool_use":
|
|
73
|
+
input_value = getattr(block, "input", None) or {}
|
|
74
|
+
# Ensure input is a dict, not a Pydantic model
|
|
75
|
+
if hasattr(input_value, "model_dump"):
|
|
76
|
+
input_value = input_value.model_dump()
|
|
77
|
+
elif hasattr(input_value, "dict"):
|
|
78
|
+
input_value = input_value.dict()
|
|
79
|
+
elif not isinstance(input_value, dict):
|
|
80
|
+
input_value = {"value": str(input_value)}
|
|
56
81
|
return {
|
|
57
82
|
"type": "tool_use",
|
|
58
83
|
"id": getattr(block, "id", None) or getattr(block, "tool_use_id", "") or "",
|
|
59
84
|
"name": getattr(block, "name", None) or "",
|
|
60
|
-
"input":
|
|
85
|
+
"input": input_value,
|
|
61
86
|
}
|
|
62
87
|
if block_type == "tool_result":
|
|
63
88
|
result: Dict[str, Any] = {
|
|
@@ -73,6 +98,15 @@ def _content_block_to_api(block: MessageContent) -> Dict[str, Any]:
|
|
|
73
98
|
if getattr(block, "is_error", None) is not None:
|
|
74
99
|
result["is_error"] = block.is_error
|
|
75
100
|
return result
|
|
101
|
+
if block_type == "image":
|
|
102
|
+
return {
|
|
103
|
+
"type": "image",
|
|
104
|
+
"source": {
|
|
105
|
+
"type": getattr(block, "source_type", None) or "base64",
|
|
106
|
+
"media_type": getattr(block, "media_type", None) or "image/jpeg",
|
|
107
|
+
"data": getattr(block, "image_data", None) or "",
|
|
108
|
+
},
|
|
109
|
+
}
|
|
76
110
|
# Default to text block
|
|
77
111
|
return {
|
|
78
112
|
"type": "text",
|
|
@@ -124,6 +158,15 @@ def _content_block_to_openai(block: MessageContent) -> Dict[str, Any]:
|
|
|
124
158
|
"tool_call_id": tool_call_id,
|
|
125
159
|
"content": getattr(block, "text", None) or getattr(block, "content", None) or "",
|
|
126
160
|
}
|
|
161
|
+
if block_type == "image":
|
|
162
|
+
# OpenAI uses data URL format for images
|
|
163
|
+
media_type = getattr(block, "media_type", None) or "image/jpeg"
|
|
164
|
+
image_data = getattr(block, "image_data", None) or ""
|
|
165
|
+
data_url = f"data:{media_type};base64,{image_data}"
|
|
166
|
+
return {
|
|
167
|
+
"type": "image_url",
|
|
168
|
+
"image_url": {"url": data_url},
|
|
169
|
+
}
|
|
127
170
|
# Fallback text message
|
|
128
171
|
return {
|
|
129
172
|
"role": "assistant",
|
|
@@ -152,6 +195,7 @@ class UserMessage(BaseModel):
|
|
|
152
195
|
type: str = "user"
|
|
153
196
|
message: Message
|
|
154
197
|
uuid: str = ""
|
|
198
|
+
parent_tool_use_id: Optional[str] = None
|
|
155
199
|
tool_use_result: Optional[object] = None
|
|
156
200
|
|
|
157
201
|
def __init__(self, **data: object) -> None:
|
|
@@ -166,6 +210,7 @@ class AssistantMessage(BaseModel):
|
|
|
166
210
|
type: str = "assistant"
|
|
167
211
|
message: Message
|
|
168
212
|
uuid: str = ""
|
|
213
|
+
parent_tool_use_id: Optional[str] = None
|
|
169
214
|
cost_usd: float = 0.0
|
|
170
215
|
duration_ms: float = 0.0
|
|
171
216
|
is_api_error_message: bool = False
|
|
@@ -175,6 +220,7 @@ class AssistantMessage(BaseModel):
|
|
|
175
220
|
output_tokens: int = 0
|
|
176
221
|
cache_read_tokens: int = 0
|
|
177
222
|
cache_creation_tokens: int = 0
|
|
223
|
+
error: Optional[str] = None
|
|
178
224
|
|
|
179
225
|
def __init__(self, **data: object) -> None:
|
|
180
226
|
if "uuid" not in data or not data["uuid"]:
|
|
@@ -191,6 +237,7 @@ class ProgressMessage(BaseModel):
|
|
|
191
237
|
content: Any
|
|
192
238
|
normalized_messages: List[Message] = []
|
|
193
239
|
sibling_tool_use_ids: set[str] = set()
|
|
240
|
+
is_subagent_message: bool = False # Flag to indicate if content is a subagent message
|
|
194
241
|
model_config = ConfigDict(arbitrary_types_allowed=True)
|
|
195
242
|
|
|
196
243
|
def __init__(self, **data: object) -> None:
|
|
@@ -200,7 +247,9 @@ class ProgressMessage(BaseModel):
|
|
|
200
247
|
|
|
201
248
|
|
|
202
249
|
def create_user_message(
|
|
203
|
-
content: Union[str, List[Dict[str, Any]]],
|
|
250
|
+
content: Union[str, List[Dict[str, Any]]],
|
|
251
|
+
tool_use_result: Optional[object] = None,
|
|
252
|
+
parent_tool_use_id: Optional[str] = None,
|
|
204
253
|
) -> UserMessage:
|
|
205
254
|
"""Create a user message."""
|
|
206
255
|
if isinstance(content, str):
|
|
@@ -234,7 +283,39 @@ def create_user_message(
|
|
|
234
283
|
f"ids={[getattr(b, 'tool_use_id', None) for b in tool_result_blocks]}"
|
|
235
284
|
)
|
|
236
285
|
|
|
237
|
-
return UserMessage(
|
|
286
|
+
return UserMessage(
|
|
287
|
+
message=message,
|
|
288
|
+
tool_use_result=tool_use_result,
|
|
289
|
+
parent_tool_use_id=parent_tool_use_id,
|
|
290
|
+
)
|
|
291
|
+
|
|
292
|
+
|
|
293
|
+
def _normalize_content_item(item: Dict[str, Any]) -> Dict[str, Any]:
|
|
294
|
+
"""Normalize a content item to ensure all fields are JSON-serializable.
|
|
295
|
+
|
|
296
|
+
This is needed because some API providers may return Pydantic models
|
|
297
|
+
for tool input fields, which need to be converted to dicts for proper
|
|
298
|
+
serialization and later processing.
|
|
299
|
+
|
|
300
|
+
Args:
|
|
301
|
+
item: The content item dict from API response
|
|
302
|
+
|
|
303
|
+
Returns:
|
|
304
|
+
Normalized content item with all fields JSON-serializable
|
|
305
|
+
"""
|
|
306
|
+
normalized = dict(item)
|
|
307
|
+
|
|
308
|
+
# If input is a Pydantic model, convert to dict
|
|
309
|
+
if 'input' in normalized and normalized['input'] is not None:
|
|
310
|
+
input_value = normalized['input']
|
|
311
|
+
if hasattr(input_value, 'model_dump'):
|
|
312
|
+
normalized['input'] = input_value.model_dump()
|
|
313
|
+
elif hasattr(input_value, 'dict'):
|
|
314
|
+
normalized['input'] = input_value.dict()
|
|
315
|
+
elif not isinstance(input_value, dict):
|
|
316
|
+
normalized['input'] = {'value': str(input_value)}
|
|
317
|
+
|
|
318
|
+
return normalized
|
|
238
319
|
|
|
239
320
|
|
|
240
321
|
def create_assistant_message(
|
|
@@ -248,12 +329,15 @@ def create_assistant_message(
|
|
|
248
329
|
output_tokens: int = 0,
|
|
249
330
|
cache_read_tokens: int = 0,
|
|
250
331
|
cache_creation_tokens: int = 0,
|
|
332
|
+
parent_tool_use_id: Optional[str] = None,
|
|
333
|
+
error: Optional[str] = None,
|
|
251
334
|
) -> AssistantMessage:
|
|
252
335
|
"""Create an assistant message."""
|
|
253
336
|
if isinstance(content, str):
|
|
254
337
|
message_content: Union[str, List[MessageContent]] = content
|
|
255
338
|
else:
|
|
256
|
-
|
|
339
|
+
# Normalize content items to ensure tool input is always a dict
|
|
340
|
+
message_content = [MessageContent(**_normalize_content_item(item)) for item in content]
|
|
257
341
|
|
|
258
342
|
message = Message(
|
|
259
343
|
role=MessageRole.ASSISTANT,
|
|
@@ -271,6 +355,8 @@ def create_assistant_message(
|
|
|
271
355
|
output_tokens=output_tokens,
|
|
272
356
|
cache_read_tokens=cache_read_tokens,
|
|
273
357
|
cache_creation_tokens=cache_creation_tokens,
|
|
358
|
+
parent_tool_use_id=parent_tool_use_id,
|
|
359
|
+
error=error,
|
|
274
360
|
)
|
|
275
361
|
|
|
276
362
|
|
|
@@ -279,6 +365,7 @@ def create_progress_message(
|
|
|
279
365
|
sibling_tool_use_ids: set[str],
|
|
280
366
|
content: Any,
|
|
281
367
|
normalized_messages: Optional[List[Message]] = None,
|
|
368
|
+
is_subagent_message: bool = False,
|
|
282
369
|
) -> ProgressMessage:
|
|
283
370
|
"""Create a progress message."""
|
|
284
371
|
return ProgressMessage(
|
|
@@ -286,6 +373,7 @@ def create_progress_message(
|
|
|
286
373
|
sibling_tool_use_ids=sibling_tool_use_ids,
|
|
287
374
|
content=content,
|
|
288
375
|
normalized_messages=normalized_messages or [],
|
|
376
|
+
is_subagent_message=is_subagent_message,
|
|
289
377
|
)
|
|
290
378
|
|
|
291
379
|
|
|
@@ -454,33 +542,90 @@ def normalize_messages_for_api(
|
|
|
454
542
|
meta = _msg_metadata(msg)
|
|
455
543
|
if isinstance(user_content, list):
|
|
456
544
|
if protocol == "openai":
|
|
457
|
-
#
|
|
458
|
-
|
|
459
|
-
|
|
460
|
-
|
|
461
|
-
|
|
462
|
-
|
|
463
|
-
|
|
464
|
-
|
|
465
|
-
|
|
466
|
-
|
|
467
|
-
|
|
468
|
-
|
|
469
|
-
|
|
470
|
-
|
|
471
|
-
|
|
472
|
-
|
|
473
|
-
|
|
474
|
-
|
|
475
|
-
|
|
476
|
-
|
|
477
|
-
|
|
478
|
-
|
|
479
|
-
|
|
480
|
-
|
|
481
|
-
|
|
482
|
-
|
|
483
|
-
|
|
545
|
+
# Check if this message contains images
|
|
546
|
+
has_images = any(
|
|
547
|
+
getattr(block, "type", None) == "image" for block in user_content
|
|
548
|
+
)
|
|
549
|
+
has_text_only = all(
|
|
550
|
+
getattr(block, "type", None) in ("text", "image", "tool_result")
|
|
551
|
+
for block in user_content
|
|
552
|
+
)
|
|
553
|
+
|
|
554
|
+
# If message has images or only text/images (no tool_result), use content array format
|
|
555
|
+
if has_images or (
|
|
556
|
+
has_text_only
|
|
557
|
+
and not any(
|
|
558
|
+
getattr(block, "type", None) == "tool_result" for block in user_content
|
|
559
|
+
)
|
|
560
|
+
):
|
|
561
|
+
content_array: List[Dict[str, Any]] = []
|
|
562
|
+
for block in user_content:
|
|
563
|
+
block_type = getattr(block, "type", None)
|
|
564
|
+
if block_type == "image":
|
|
565
|
+
content_array.append(_content_block_to_openai(block))
|
|
566
|
+
elif block_type == "text":
|
|
567
|
+
content_array.append(
|
|
568
|
+
{
|
|
569
|
+
"type": "text",
|
|
570
|
+
"text": getattr(block, "text", "") or "",
|
|
571
|
+
}
|
|
572
|
+
)
|
|
573
|
+
elif block_type == "tool_result":
|
|
574
|
+
# Handle tool_result separately
|
|
575
|
+
tool_results_seen += 1
|
|
576
|
+
tool_id = getattr(block, "tool_use_id", None) or getattr(
|
|
577
|
+
block, "id", None
|
|
578
|
+
)
|
|
579
|
+
if not tool_id:
|
|
580
|
+
skipped_tool_results_no_call += 1
|
|
581
|
+
continue
|
|
582
|
+
call_pos = tool_use_positions.get(tool_id)
|
|
583
|
+
if call_pos is None or call_pos >= msg_index:
|
|
584
|
+
skipped_tool_results_no_call += 1
|
|
585
|
+
continue
|
|
586
|
+
mapped = _content_block_to_openai(block)
|
|
587
|
+
if mapped:
|
|
588
|
+
normalized.append(mapped)
|
|
589
|
+
|
|
590
|
+
if content_array:
|
|
591
|
+
user_msg: Dict[str, Any] = {
|
|
592
|
+
"role": "user",
|
|
593
|
+
"content": content_array,
|
|
594
|
+
}
|
|
595
|
+
if meta:
|
|
596
|
+
for key in ("reasoning_content", "reasoning_details", "reasoning"):
|
|
597
|
+
if key in meta and meta[key] is not None:
|
|
598
|
+
user_msg[key] = meta[key]
|
|
599
|
+
normalized.append(user_msg)
|
|
600
|
+
continue
|
|
601
|
+
else:
|
|
602
|
+
# Original behavior for tool_result messages
|
|
603
|
+
openai_msgs: List[Dict[str, Any]] = []
|
|
604
|
+
for block in user_content:
|
|
605
|
+
block_type = getattr(block, "type", None)
|
|
606
|
+
if block_type == "tool_result":
|
|
607
|
+
tool_results_seen += 1
|
|
608
|
+
# Skip tool_result blocks that lack a preceding tool_use
|
|
609
|
+
tool_id = getattr(block, "tool_use_id", None) or getattr(
|
|
610
|
+
block, "id", None
|
|
611
|
+
)
|
|
612
|
+
if not tool_id:
|
|
613
|
+
skipped_tool_results_no_call += 1
|
|
614
|
+
continue
|
|
615
|
+
call_pos = tool_use_positions.get(tool_id)
|
|
616
|
+
if call_pos is None or call_pos >= msg_index:
|
|
617
|
+
skipped_tool_results_no_call += 1
|
|
618
|
+
continue
|
|
619
|
+
mapped = _content_block_to_openai(block)
|
|
620
|
+
if mapped:
|
|
621
|
+
openai_msgs.append(mapped)
|
|
622
|
+
if meta and openai_msgs:
|
|
623
|
+
for candidate in openai_msgs:
|
|
624
|
+
for key in ("reasoning_content", "reasoning_details", "reasoning"):
|
|
625
|
+
if key in meta and meta[key] is not None:
|
|
626
|
+
candidate[key] = meta[key]
|
|
627
|
+
normalized.extend(openai_msgs)
|
|
628
|
+
continue
|
|
484
629
|
api_blocks = []
|
|
485
630
|
for block in user_content:
|
|
486
631
|
if getattr(block, "type", None) == "tool_result":
|
|
@@ -0,0 +1,50 @@
|
|
|
1
|
+
"""Thread-safe queue for pending conversation messages.
|
|
2
|
+
|
|
3
|
+
Allows background tasks or external events to enqueue user messages that
|
|
4
|
+
should be injected into the conversation once the current iteration
|
|
5
|
+
finishes. Messages are drained in FIFO order.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from collections import deque
|
|
9
|
+
import threading
|
|
10
|
+
from typing import Any, Deque, Dict, List, Optional
|
|
11
|
+
|
|
12
|
+
from ripperdoc.utils.messages import UserMessage, create_user_message
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class PendingMessageQueue:
|
|
16
|
+
"""Thread-safe queue for pending user messages."""
|
|
17
|
+
|
|
18
|
+
def __init__(self) -> None:
|
|
19
|
+
self._queue: Deque[UserMessage] = deque()
|
|
20
|
+
self._lock = threading.Lock()
|
|
21
|
+
|
|
22
|
+
def enqueue(self, message: UserMessage) -> None:
|
|
23
|
+
"""Add a pre-built UserMessage to the queue."""
|
|
24
|
+
with self._lock:
|
|
25
|
+
self._queue.append(message)
|
|
26
|
+
|
|
27
|
+
def enqueue_text(self, text: str, metadata: Optional[Dict[str, Any]] = None) -> None:
|
|
28
|
+
"""Create and enqueue a UserMessage with optional metadata."""
|
|
29
|
+
message = create_user_message(text)
|
|
30
|
+
if metadata:
|
|
31
|
+
try:
|
|
32
|
+
message.message.metadata.update(metadata)
|
|
33
|
+
except Exception:
|
|
34
|
+
# Best-effort metadata attachment; ignore failures.
|
|
35
|
+
pass
|
|
36
|
+
self.enqueue(message)
|
|
37
|
+
|
|
38
|
+
def drain(self) -> List[UserMessage]:
|
|
39
|
+
"""Drain all pending messages in FIFO order."""
|
|
40
|
+
with self._lock:
|
|
41
|
+
if not self._queue:
|
|
42
|
+
return []
|
|
43
|
+
messages = list(self._queue)
|
|
44
|
+
self._queue.clear()
|
|
45
|
+
return messages
|
|
46
|
+
|
|
47
|
+
def has_messages(self) -> bool:
|
|
48
|
+
"""Check if there are pending messages."""
|
|
49
|
+
with self._lock:
|
|
50
|
+
return bool(self._queue)
|
|
@@ -662,7 +662,7 @@ def validate_shell_command(shell_command: str) -> ValidationResult:
|
|
|
662
662
|
lex = shlex.shlex(cmd, posix=True)
|
|
663
663
|
lex.whitespace_split = True # Split on whitespace, better for argument parsing
|
|
664
664
|
lex.commenters = "" # Don't treat # as comment for security analysis
|
|
665
|
-
|
|
665
|
+
|
|
666
666
|
tokens = []
|
|
667
667
|
try:
|
|
668
668
|
# Get all tokens
|
|
@@ -691,7 +691,7 @@ def validate_shell_command(shell_command: str) -> ValidationResult:
|
|
|
691
691
|
# Single ; & | are dangerous
|
|
692
692
|
return True
|
|
693
693
|
i += 1
|
|
694
|
-
|
|
694
|
+
|
|
695
695
|
# Also check for find -exec escaped semicolon pattern
|
|
696
696
|
# shlex will have already parsed \; as separate token ';' (since escaped)
|
|
697
697
|
# We need to check if this ; is part of find -exec pattern
|
|
@@ -716,7 +716,7 @@ def validate_shell_command(shell_command: str) -> ValidationResult:
|
|
|
716
716
|
continue
|
|
717
717
|
# Not part of find -exec, so it's dangerous
|
|
718
718
|
return True
|
|
719
|
-
|
|
719
|
+
|
|
720
720
|
return False
|
|
721
721
|
|
|
722
722
|
if has_metachars_outside_quotes(sanitized_for_metachar_check):
|
|
@@ -64,7 +64,9 @@ def create_wildcard_tool_rule(rule_name: str, use_glob_style: bool = False) -> L
|
|
|
64
64
|
Returns:
|
|
65
65
|
List containing a single ToolRule with wildcard pattern
|
|
66
66
|
"""
|
|
67
|
-
return [
|
|
67
|
+
return [
|
|
68
|
+
ToolRule(tool_name="Bash", rule_content=create_wildcard_rule(rule_name, use_glob_style))
|
|
69
|
+
]
|
|
68
70
|
|
|
69
71
|
|
|
70
72
|
def extract_rule_prefix(rule_string: str) -> Optional[str]:
|
|
@@ -273,11 +275,15 @@ def _collect_rule_suggestions(command: str) -> List[ToolRule]:
|
|
|
273
275
|
if tokens:
|
|
274
276
|
# Legacy prefix format
|
|
275
277
|
suggestions.append(
|
|
276
|
-
ToolRule(
|
|
278
|
+
ToolRule(
|
|
279
|
+
tool_name="Bash", rule_content=create_wildcard_rule(tokens[0], use_glob_style=False)
|
|
280
|
+
)
|
|
277
281
|
)
|
|
278
282
|
# New glob-style format
|
|
279
283
|
suggestions.append(
|
|
280
|
-
ToolRule(
|
|
284
|
+
ToolRule(
|
|
285
|
+
tool_name="Bash", rule_content=create_wildcard_rule(tokens[0], use_glob_style=True)
|
|
286
|
+
)
|
|
281
287
|
)
|
|
282
288
|
|
|
283
289
|
return suggestions
|
|
@@ -0,0 +1,198 @@
|
|
|
1
|
+
"""Platform detection utilities.
|
|
2
|
+
|
|
3
|
+
This module provides a unified interface for detecting the current operating system
|
|
4
|
+
and platform-specific capabilities. It should be used instead of direct checks
|
|
5
|
+
like `sys.platform == "win32"` or `os.name == "nt"`.
|
|
6
|
+
|
|
7
|
+
Usage:
|
|
8
|
+
from ripperdoc.utils.platform import (
|
|
9
|
+
is_windows,
|
|
10
|
+
is_linux,
|
|
11
|
+
is_macos,
|
|
12
|
+
is_unix,
|
|
13
|
+
Platform,
|
|
14
|
+
)
|
|
15
|
+
|
|
16
|
+
if is_windows():
|
|
17
|
+
# Windows-specific code
|
|
18
|
+
elif is_macos():
|
|
19
|
+
# macOS-specific code
|
|
20
|
+
else:
|
|
21
|
+
# Linux or other Unix-specific code
|
|
22
|
+
"""
|
|
23
|
+
|
|
24
|
+
import os
|
|
25
|
+
import sys
|
|
26
|
+
from typing import Final, Literal
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
# Platform type definitions
|
|
30
|
+
PlatformType = Literal["windows", "linux", "macos", "unknown"]
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
class Platform:
|
|
34
|
+
"""Platform detection constants and utilities.
|
|
35
|
+
|
|
36
|
+
This class provides platform detection methods and constants that should
|
|
37
|
+
be used throughout the codebase instead of direct checks.
|
|
38
|
+
"""
|
|
39
|
+
|
|
40
|
+
# Platform constants (using sys.platform for consistency)
|
|
41
|
+
WINDOWS: Final = "win32"
|
|
42
|
+
LINUX: Final = "linux"
|
|
43
|
+
MACOS: Final = "darwin"
|
|
44
|
+
FREEBSD: Final = "freebsd"
|
|
45
|
+
OPENBSD: Final = "openbsd"
|
|
46
|
+
NETBSD: Final = "netbsd"
|
|
47
|
+
|
|
48
|
+
# os.name constants
|
|
49
|
+
NAME_NT: Final = "nt" # Windows
|
|
50
|
+
NAME_POSIX: Final = "posix" # Unix-like systems
|
|
51
|
+
|
|
52
|
+
@staticmethod
|
|
53
|
+
def get_system() -> PlatformType:
|
|
54
|
+
"""Get the current operating system name.
|
|
55
|
+
|
|
56
|
+
Returns:
|
|
57
|
+
'windows', 'linux', 'macos', or 'unknown'
|
|
58
|
+
"""
|
|
59
|
+
platform = sys.platform.lower()
|
|
60
|
+
|
|
61
|
+
if platform.startswith("win"):
|
|
62
|
+
return "windows"
|
|
63
|
+
elif platform.startswith("darwin"):
|
|
64
|
+
return "macos"
|
|
65
|
+
elif platform.startswith("linux"):
|
|
66
|
+
return "linux"
|
|
67
|
+
elif platform in {"freebsd", "openbsd", "netbsd"}:
|
|
68
|
+
return "linux" # Treat BSD as Linux for most purposes
|
|
69
|
+
else:
|
|
70
|
+
return "unknown"
|
|
71
|
+
|
|
72
|
+
@staticmethod
|
|
73
|
+
def is_windows() -> bool:
|
|
74
|
+
"""Check if running on Windows."""
|
|
75
|
+
return sys.platform == Platform.WINDOWS
|
|
76
|
+
|
|
77
|
+
@staticmethod
|
|
78
|
+
def is_linux() -> bool:
|
|
79
|
+
"""Check if running on Linux."""
|
|
80
|
+
return sys.platform.startswith("linux")
|
|
81
|
+
|
|
82
|
+
@staticmethod
|
|
83
|
+
def is_macos() -> bool:
|
|
84
|
+
"""Check if running on macOS."""
|
|
85
|
+
return sys.platform == Platform.MACOS
|
|
86
|
+
|
|
87
|
+
@staticmethod
|
|
88
|
+
def is_bsd() -> bool:
|
|
89
|
+
"""Check if running on any BSD variant."""
|
|
90
|
+
return sys.platform in {Platform.FREEBSD, Platform.OPENBSD, Platform.NETBSD}
|
|
91
|
+
|
|
92
|
+
@staticmethod
|
|
93
|
+
def is_unix() -> bool:
|
|
94
|
+
"""Check if running on any Unix-like system (Linux, macOS, BSD)."""
|
|
95
|
+
return os.name == Platform.NAME_POSIX
|
|
96
|
+
|
|
97
|
+
@staticmethod
|
|
98
|
+
def is_posix() -> bool:
|
|
99
|
+
"""Check if running on a POSIX-compliant system.
|
|
100
|
+
|
|
101
|
+
This is equivalent to is_unix() but uses os.name for the check.
|
|
102
|
+
"""
|
|
103
|
+
return os.name == Platform.NAME_POSIX
|
|
104
|
+
|
|
105
|
+
@staticmethod
|
|
106
|
+
def get_raw_name() -> str:
|
|
107
|
+
"""Get the raw sys.platform value.
|
|
108
|
+
|
|
109
|
+
Returns:
|
|
110
|
+
The raw sys.platform string (e.g., 'win32', 'linux', 'darwin').
|
|
111
|
+
"""
|
|
112
|
+
return sys.platform
|
|
113
|
+
|
|
114
|
+
@staticmethod
|
|
115
|
+
def get_os_name() -> str:
|
|
116
|
+
"""Get the os.name value.
|
|
117
|
+
|
|
118
|
+
Returns:
|
|
119
|
+
'nt' for Windows, 'posix' for Unix-like systems.
|
|
120
|
+
"""
|
|
121
|
+
return os.name
|
|
122
|
+
|
|
123
|
+
|
|
124
|
+
# Convenience functions for direct import
|
|
125
|
+
def is_windows() -> bool:
|
|
126
|
+
"""Check if running on Windows."""
|
|
127
|
+
return Platform.is_windows()
|
|
128
|
+
|
|
129
|
+
|
|
130
|
+
def is_linux() -> bool:
|
|
131
|
+
"""Check if running on Linux."""
|
|
132
|
+
return Platform.is_linux()
|
|
133
|
+
|
|
134
|
+
|
|
135
|
+
def is_macos() -> bool:
|
|
136
|
+
"""Check if running on macOS."""
|
|
137
|
+
return Platform.is_macos()
|
|
138
|
+
|
|
139
|
+
|
|
140
|
+
def is_bsd() -> bool:
|
|
141
|
+
"""Check if running on any BSD variant."""
|
|
142
|
+
return Platform.is_bsd()
|
|
143
|
+
|
|
144
|
+
|
|
145
|
+
def is_unix() -> bool:
|
|
146
|
+
"""Check if running on any Unix-like system (Linux, macOS, BSD)."""
|
|
147
|
+
return Platform.is_unix()
|
|
148
|
+
|
|
149
|
+
|
|
150
|
+
def is_posix() -> bool:
|
|
151
|
+
"""Check if running on a POSIX-compliant system."""
|
|
152
|
+
return Platform.is_posix()
|
|
153
|
+
|
|
154
|
+
|
|
155
|
+
# Module-level constants for backward compatibility
|
|
156
|
+
IS_WINDOWS: Final = is_windows()
|
|
157
|
+
IS_LINUX: Final = is_linux()
|
|
158
|
+
IS_MACOS: Final = is_macos()
|
|
159
|
+
IS_BSD: Final = is_bsd()
|
|
160
|
+
IS_UNIX: Final = is_unix()
|
|
161
|
+
IS_POSIX: Final = is_posix()
|
|
162
|
+
|
|
163
|
+
|
|
164
|
+
# Platform-specific module availability
|
|
165
|
+
def has_termios() -> bool:
|
|
166
|
+
"""Check if the termios module is available (Unix-like systems only)."""
|
|
167
|
+
try:
|
|
168
|
+
import termios # noqa: F401
|
|
169
|
+
|
|
170
|
+
return True
|
|
171
|
+
except ImportError:
|
|
172
|
+
return False
|
|
173
|
+
|
|
174
|
+
|
|
175
|
+
def has_fcntl() -> bool:
|
|
176
|
+
"""Check if the fcntl module is available (Unix-like systems only)."""
|
|
177
|
+
try:
|
|
178
|
+
import fcntl # noqa: F401
|
|
179
|
+
|
|
180
|
+
return True
|
|
181
|
+
except ImportError:
|
|
182
|
+
return False
|
|
183
|
+
|
|
184
|
+
|
|
185
|
+
def has_tty() -> bool:
|
|
186
|
+
"""Check if the tty module is available (Unix-like systems only)."""
|
|
187
|
+
try:
|
|
188
|
+
import tty # noqa: F401
|
|
189
|
+
|
|
190
|
+
return True
|
|
191
|
+
except ImportError:
|
|
192
|
+
return False
|
|
193
|
+
|
|
194
|
+
|
|
195
|
+
# Module-level constants for module availability
|
|
196
|
+
HAS_TERMIOS: Final = has_termios()
|
|
197
|
+
HAS_FCNTL: Final = has_fcntl()
|
|
198
|
+
HAS_TTY: Final = has_tty()
|
|
@@ -127,9 +127,7 @@ def _get_week_grid(
|
|
|
127
127
|
return weeks, max_count
|
|
128
128
|
|
|
129
129
|
|
|
130
|
-
def render_heatmap(
|
|
131
|
-
console: Console, daily_activity: Dict[str, int], weeks_count: int = 52
|
|
132
|
-
) -> None:
|
|
130
|
+
def render_heatmap(console: Console, daily_activity: Dict[str, int], weeks_count: int = 52) -> None:
|
|
133
131
|
"""Render activity heatmap to console.
|
|
134
132
|
|
|
135
133
|
Args:
|
|
@@ -6,7 +6,7 @@ import json
|
|
|
6
6
|
from dataclasses import dataclass
|
|
7
7
|
from datetime import datetime
|
|
8
8
|
from pathlib import Path
|
|
9
|
-
from typing import List, Optional
|
|
9
|
+
from typing import List, Optional, Union
|
|
10
10
|
|
|
11
11
|
from ripperdoc.utils.log import get_logger
|
|
12
12
|
from ripperdoc.utils.messages import (
|
|
@@ -19,7 +19,7 @@ from ripperdoc.utils.path_utils import project_storage_dir
|
|
|
19
19
|
|
|
20
20
|
logger = get_logger()
|
|
21
21
|
|
|
22
|
-
ConversationMessage = UserMessage
|
|
22
|
+
ConversationMessage = Union[UserMessage, AssistantMessage, ProgressMessage]
|
|
23
23
|
|
|
24
24
|
|
|
25
25
|
@dataclass
|
ripperdoc/utils/session_stats.py
CHANGED
|
@@ -123,6 +123,7 @@ def collect_session_stats(project_path: Path, days: int = 32) -> SessionStats:
|
|
|
123
123
|
|
|
124
124
|
# Filter by date range (use timezone-aware cutoff if needed)
|
|
125
125
|
from datetime import timezone
|
|
126
|
+
|
|
126
127
|
cutoff = datetime.now(timezone.utc) - timedelta(days=days)
|
|
127
128
|
|
|
128
129
|
# Ensure comparison works with both naive and aware datetimes
|