ripperdoc 0.2.9__py3-none-any.whl → 0.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (76) hide show
  1. ripperdoc/__init__.py +1 -1
  2. ripperdoc/cli/cli.py +379 -51
  3. ripperdoc/cli/commands/__init__.py +6 -0
  4. ripperdoc/cli/commands/agents_cmd.py +128 -5
  5. ripperdoc/cli/commands/clear_cmd.py +8 -0
  6. ripperdoc/cli/commands/doctor_cmd.py +29 -0
  7. ripperdoc/cli/commands/exit_cmd.py +1 -0
  8. ripperdoc/cli/commands/memory_cmd.py +2 -1
  9. ripperdoc/cli/commands/models_cmd.py +63 -7
  10. ripperdoc/cli/commands/resume_cmd.py +5 -0
  11. ripperdoc/cli/commands/skills_cmd.py +103 -0
  12. ripperdoc/cli/commands/stats_cmd.py +244 -0
  13. ripperdoc/cli/commands/status_cmd.py +10 -0
  14. ripperdoc/cli/commands/tasks_cmd.py +6 -3
  15. ripperdoc/cli/commands/themes_cmd.py +139 -0
  16. ripperdoc/cli/ui/file_mention_completer.py +63 -13
  17. ripperdoc/cli/ui/helpers.py +6 -3
  18. ripperdoc/cli/ui/interrupt_handler.py +34 -0
  19. ripperdoc/cli/ui/panels.py +14 -8
  20. ripperdoc/cli/ui/rich_ui.py +737 -47
  21. ripperdoc/cli/ui/spinner.py +93 -18
  22. ripperdoc/cli/ui/thinking_spinner.py +1 -2
  23. ripperdoc/cli/ui/tool_renderers.py +10 -9
  24. ripperdoc/cli/ui/wizard.py +24 -19
  25. ripperdoc/core/agents.py +14 -3
  26. ripperdoc/core/config.py +238 -6
  27. ripperdoc/core/default_tools.py +91 -10
  28. ripperdoc/core/hooks/events.py +4 -0
  29. ripperdoc/core/hooks/llm_callback.py +58 -0
  30. ripperdoc/core/hooks/manager.py +6 -0
  31. ripperdoc/core/permissions.py +160 -9
  32. ripperdoc/core/providers/openai.py +84 -28
  33. ripperdoc/core/query.py +489 -87
  34. ripperdoc/core/query_utils.py +17 -14
  35. ripperdoc/core/skills.py +1 -0
  36. ripperdoc/core/theme.py +298 -0
  37. ripperdoc/core/tool.py +15 -5
  38. ripperdoc/protocol/__init__.py +14 -0
  39. ripperdoc/protocol/models.py +300 -0
  40. ripperdoc/protocol/stdio.py +1453 -0
  41. ripperdoc/tools/background_shell.py +354 -139
  42. ripperdoc/tools/bash_tool.py +117 -22
  43. ripperdoc/tools/file_edit_tool.py +228 -50
  44. ripperdoc/tools/file_read_tool.py +154 -3
  45. ripperdoc/tools/file_write_tool.py +53 -11
  46. ripperdoc/tools/grep_tool.py +98 -8
  47. ripperdoc/tools/lsp_tool.py +609 -0
  48. ripperdoc/tools/multi_edit_tool.py +26 -3
  49. ripperdoc/tools/skill_tool.py +52 -1
  50. ripperdoc/tools/task_tool.py +539 -65
  51. ripperdoc/utils/conversation_compaction.py +1 -1
  52. ripperdoc/utils/file_watch.py +216 -7
  53. ripperdoc/utils/image_utils.py +125 -0
  54. ripperdoc/utils/log.py +30 -3
  55. ripperdoc/utils/lsp.py +812 -0
  56. ripperdoc/utils/mcp.py +80 -18
  57. ripperdoc/utils/message_formatting.py +7 -4
  58. ripperdoc/utils/messages.py +198 -33
  59. ripperdoc/utils/pending_messages.py +50 -0
  60. ripperdoc/utils/permissions/shell_command_validation.py +3 -3
  61. ripperdoc/utils/permissions/tool_permission_utils.py +180 -15
  62. ripperdoc/utils/platform.py +198 -0
  63. ripperdoc/utils/session_heatmap.py +242 -0
  64. ripperdoc/utils/session_history.py +2 -2
  65. ripperdoc/utils/session_stats.py +294 -0
  66. ripperdoc/utils/shell_utils.py +8 -5
  67. ripperdoc/utils/todo.py +0 -6
  68. {ripperdoc-0.2.9.dist-info → ripperdoc-0.3.0.dist-info}/METADATA +55 -17
  69. ripperdoc-0.3.0.dist-info/RECORD +136 -0
  70. {ripperdoc-0.2.9.dist-info → ripperdoc-0.3.0.dist-info}/WHEEL +1 -1
  71. ripperdoc/sdk/__init__.py +0 -9
  72. ripperdoc/sdk/client.py +0 -333
  73. ripperdoc-0.2.9.dist-info/RECORD +0 -123
  74. {ripperdoc-0.2.9.dist-info → ripperdoc-0.3.0.dist-info}/entry_points.txt +0 -0
  75. {ripperdoc-0.2.9.dist-info → ripperdoc-0.3.0.dist-info}/licenses/LICENSE +0 -0
  76. {ripperdoc-0.2.9.dist-info → ripperdoc-0.3.0.dist-info}/top_level.txt +0 -0
ripperdoc/core/config.py CHANGED
@@ -8,7 +8,7 @@ import json
8
8
  import os
9
9
  from pathlib import Path
10
10
  from typing import Any, Dict, Optional, Literal
11
- from pydantic import BaseModel, Field, field_validator, model_validator
11
+ from pydantic import BaseModel, Field, model_validator
12
12
  from enum import Enum
13
13
 
14
14
  from ripperdoc.utils.log import get_logger
@@ -94,6 +94,69 @@ def api_base_env_candidates(provider: ProviderType) -> list[str]:
94
94
  ]
95
95
 
96
96
 
97
+ # Known vision-enabled model patterns for auto-detection
98
+ VISION_ENABLED_MODELS = {
99
+ # Anthropic Claude models
100
+ "claude-haiku-4-5-20251001",
101
+ "claude-sonnet-4-5-20250929",
102
+ "claude-opus-4-5-20251101",
103
+ "claude-haiku-4-5",
104
+ "claude-sonnet-4-5",
105
+ "claude-opus-4-5",
106
+ "claude-3-5-sonnet",
107
+ "claude-3-5-sonnet-20241022",
108
+ "claude-3-5-sonnet-20240620",
109
+ "claude-3-5-haiku",
110
+ "claude-3-5-haiku-20241022",
111
+ "claude-3-opus",
112
+ "claude-3-opus-20240229",
113
+ "claude-3-sonnet",
114
+ "claude-3-sonnet-20240229",
115
+ "claude-3-haiku",
116
+ "claude-3-haiku-20240307",
117
+ # OpenAI models
118
+ "gpt-4o",
119
+ "gpt-4o-2024-08-06",
120
+ "gpt-4o-mini",
121
+ "gpt-4o-mini-2024-07-18",
122
+ "gpt-4-turbo",
123
+ "gpt-4-turbo-2024-04-09",
124
+ "gpt-4",
125
+ "gpt-4-0314",
126
+ "gpt-4-vision-preview",
127
+ "chatgpt-4o-latest",
128
+ # Google Gemini models
129
+ "gemini-3-pro-preview",
130
+ "gemini-3-flash-preview",
131
+ "gemini-2.5-pro",
132
+ "gemini-2.5-flash-lite",
133
+ "gemini-2.5-flash",
134
+ "gemini-2.0-flash-exp",
135
+ "gemini-2.0-flash-thinking-exp",
136
+ "gemini-exp-1206",
137
+ "gemini-pro-vision",
138
+ "gemini-1.5-pro",
139
+ "gemini-1.5-pro-001",
140
+ "gemini-1.5-flash",
141
+ "gemini-1.5-flash-001",
142
+ # Alibaba Qwen models (vision)
143
+ "qwen-vl-max",
144
+ "qwen-vl-plus",
145
+ "qwen-vl-plus-latest",
146
+ "qwen2-vl-72b-instruct",
147
+ "qwen-vl-chat",
148
+ "qwen-vl-7b-chat",
149
+ # DeepSeek models (some support vision)
150
+ "deepseek-vl",
151
+ "deepseek-vl-chat",
152
+ # Other vision models
153
+ "glm-4v",
154
+ "glm-4v-plus",
155
+ "minivision-3b",
156
+ "internvl2",
157
+ }
158
+
159
+
97
160
  class ModelProfile(BaseModel):
98
161
  """Configuration for a specific AI model."""
99
162
 
@@ -113,17 +176,35 @@ class ModelProfile(BaseModel):
113
176
  # Optional override for thinking protocol handling (e.g., "deepseek", "openrouter",
114
177
  # "qwen", "gemini_openai", "openai"). When unset, provider heuristics are used.
115
178
  thinking_mode: Optional[str] = None
179
+ # Vision support flag. None = auto-detect based on model name, True/False = override.
180
+ supports_vision: Optional[bool] = None
116
181
  # Pricing (USD per 1M tokens). Leave as 0 to skip cost calculation.
117
182
  input_cost_per_million_tokens: float = 0.0
118
183
  output_cost_per_million_tokens: float = 0.0
119
184
 
120
185
 
186
+ def model_supports_vision(model_profile: ModelProfile) -> bool:
187
+ """Detect whether a model supports vision/image input.
188
+
189
+ Args:
190
+ model_profile: The model profile to check
191
+
192
+ Returns:
193
+ True if the model supports vision capabilities, False otherwise
194
+ """
195
+ # If explicitly configured, use the config value
196
+ if model_profile.supports_vision is not None:
197
+ return model_profile.supports_vision
198
+
199
+ # Auto-detect based on model name
200
+ model_name = model_profile.model.lower()
201
+ return any(pattern in model_name for pattern in VISION_ENABLED_MODELS)
202
+
203
+
121
204
  class ModelPointers(BaseModel):
122
205
  """Pointers to different model profiles for different purposes."""
123
206
 
124
207
  main: str = "default"
125
- task: str = "default"
126
- reasoning: str = "default"
127
208
  quick: str = "default"
128
209
 
129
210
 
@@ -143,6 +224,8 @@ class GlobalConfig(BaseModel):
143
224
  show_full_thinking: bool = Field(default=False)
144
225
  auto_compact_enabled: bool = True
145
226
  context_token_limit: Optional[int] = None
227
+ # Default thinking tokens budget when thinking mode is enabled (0 = disabled by default)
228
+ default_thinking_tokens: int = Field(default=10240)
146
229
 
147
230
  # User-level permission rules (applied globally)
148
231
  user_allow_rules: list[str] = Field(default_factory=list)
@@ -192,7 +275,6 @@ class ProjectConfig(BaseModel):
192
275
 
193
276
  # Project settings
194
277
  dont_crawl_directory: bool = False
195
- enable_architect_tool: bool = False
196
278
 
197
279
  # Trust
198
280
  has_trust_dialog_accepted: bool = False
@@ -517,7 +599,7 @@ class ConfigManager:
517
599
  return config
518
600
 
519
601
  def set_model_pointer(self, pointer: str, profile_name: str) -> GlobalConfig:
520
- """Point a logical model slot (e.g., main/task) to a profile name."""
602
+ """Point a logical model slot (e.g., main/quick) to a profile name."""
521
603
  if pointer not in ModelPointers.model_fields:
522
604
  raise ValueError(f"Unknown model pointer '{pointer}'.")
523
605
 
@@ -575,7 +657,7 @@ def delete_model_profile(name: str) -> GlobalConfig:
575
657
 
576
658
 
577
659
  def set_model_pointer(pointer: str, profile_name: str) -> GlobalConfig:
578
- """Update a model pointer (e.g., main/task) to target a profile."""
660
+ """Update a model pointer (e.g., main/quick) to target a profile."""
579
661
  return config_manager.set_model_pointer(pointer, profile_name)
580
662
 
581
663
 
@@ -594,3 +676,153 @@ def save_project_local_config(
594
676
  ) -> None:
595
677
  """Save project-local configuration."""
596
678
  config_manager.save_project_local_config(config, project_path)
679
+
680
+
681
+ # ==============================================================================
682
+ # RIPPERDOC_* 全局环境变量支持
683
+ # ==============================================================================
684
+
685
+ # 环境变量名称常量
686
+ RIPPERDOC_BASE_URL = "RIPPERDOC_BASE_URL"
687
+ RIPPERDOC_AUTH_TOKEN = "RIPPERDOC_AUTH_TOKEN"
688
+ RIPPERDOC_MODEL = "RIPPERDOC_MODEL"
689
+ RIPPERDOC_SMALL_FAST_MODEL = "RIPPERDOC_SMALL_FAST_MODEL"
690
+ RIPPERDOC_API_KEY = "RIPPERDOC_API_KEY"
691
+ RIPPERDOC_PROTOCOL = "RIPPERDOC_PROTOCOL"
692
+
693
+
694
+ def _infer_protocol_from_url_and_model(base_url: str, model_name: str = "") -> ProviderType:
695
+ """根据 BASE_URL 和模型名称推断协议类型.
696
+
697
+ Args:
698
+ base_url: API基础URL
699
+ model_name: 模型名称
700
+
701
+ Returns:
702
+ 推断的 ProviderType
703
+ """
704
+ base_lower = base_url.lower()
705
+ model_lower = model_name.lower()
706
+
707
+ # 显式域名检测
708
+ if "anthropic.com" in base_lower:
709
+ return ProviderType.ANTHROPIC
710
+ if "generativelanguage.googleapis.com" in base_lower or "gemini" in model_lower:
711
+ return ProviderType.GEMINI
712
+
713
+ # URL 路径检测 - 检查路径中是否包含协议标识
714
+ if "/anthropic" in base_lower or base_lower.endswith("/anthropic"):
715
+ return ProviderType.ANTHROPIC
716
+ if "/v1/" in base_lower or "/v1" in base_lower:
717
+ # 大多数 /v1/ 路径是 OpenAI 兼容格式
718
+ return ProviderType.OPENAI_COMPATIBLE
719
+
720
+ # 模型名称前缀检测
721
+ if model_lower.startswith("claude-"):
722
+ return ProviderType.ANTHROPIC
723
+ if model_lower.startswith("gemini-"):
724
+ return ProviderType.GEMINI
725
+
726
+ # 默认使用 OpenAI 兼容协议
727
+ return ProviderType.OPENAI_COMPATIBLE
728
+
729
+
730
+ def _get_ripperdoc_env_overrides() -> Dict[str, Any]:
731
+ """获取所有 RIPPERDOC_* 环境变量的值.
732
+
733
+ Returns:
734
+ 包含所有已设置环境变量的字典
735
+ """
736
+ overrides: Dict[str, Any] = {}
737
+ if base_url := os.getenv(RIPPERDOC_BASE_URL):
738
+ overrides["base_url"] = base_url
739
+ if api_key := os.getenv(RIPPERDOC_API_KEY):
740
+ overrides["api_key"] = api_key
741
+ if auth_token := os.getenv(RIPPERDOC_AUTH_TOKEN):
742
+ overrides["auth_token"] = auth_token
743
+ if model := os.getenv(RIPPERDOC_MODEL):
744
+ overrides["model"] = model
745
+ if small_fast_model := os.getenv(RIPPERDOC_SMALL_FAST_MODEL):
746
+ overrides["small_fast_model"] = small_fast_model
747
+ if protocol_str := os.getenv(RIPPERDOC_PROTOCOL):
748
+ try:
749
+ overrides["protocol"] = ProviderType(protocol_str.lower())
750
+ except ValueError:
751
+ logger.warning(
752
+ "[config] Invalid RIPPERDOC_PROTOCOL value: %s (must be anthropic, openai_compatible, or gemini)",
753
+ protocol_str,
754
+ )
755
+ return overrides
756
+
757
+
758
+ def has_ripperdoc_env_overrides() -> bool:
759
+ """检查是否设置了任何 RIPPERDOC_* 环境变量."""
760
+ return bool(_get_ripperdoc_env_overrides())
761
+
762
+
763
+ def get_effective_model_profile(pointer: str = "main") -> Optional[ModelProfile]:
764
+ """获取模型配置,应用 RIPPERDOC_* 环境变量覆盖.
765
+
766
+ 当设置了 RIPPERDOC_BASE_URL 环境变量时,完全在内存中创建 ModelProfile,
767
+ 不依赖也不写入配置文件。这是获取模型配置的新入口点,替代 get_current_model_profile()。
768
+
769
+ Args:
770
+ pointer: 模型指针名称 ("main" 或 "quick")
771
+
772
+ Returns:
773
+ 应用环境变量覆盖后的 ModelProfile,如果没有则返回 None
774
+ """
775
+ env_overrides = _get_ripperdoc_env_overrides()
776
+ base_url = env_overrides.get("base_url")
777
+
778
+ # 如果设置了 RIPPERDOC_BASE_URL,完全在内存中创建 profile
779
+ if base_url:
780
+ # 确定模型名称
781
+ if pointer == "quick":
782
+ model_name = env_overrides.get("small_fast_model") or env_overrides.get("model")
783
+ else:
784
+ model_name = env_overrides.get("model")
785
+
786
+ if not model_name:
787
+ model_name = "claude-sonnet-4-5-20250929"
788
+
789
+ # 确定协议类型
790
+ protocol = env_overrides.get("protocol")
791
+ if not protocol:
792
+ protocol = _infer_protocol_from_url_and_model(base_url, model_name)
793
+
794
+ # 在内存中创建新的 profile,不写入配置文件
795
+ return ModelProfile(
796
+ provider=protocol,
797
+ model=model_name,
798
+ api_base=base_url,
799
+ api_key=env_overrides.get("api_key"),
800
+ auth_token=env_overrides.get("auth_token"),
801
+ )
802
+
803
+ # 没有设置 RIPPERDOC_BASE_URL,返回配置文件中的 profile
804
+ return get_current_model_profile(pointer)
805
+
806
+
807
+ def get_ripperdoc_env_status() -> Dict[str, str]:
808
+ """获取 RIPPERDOC_* 环境变量状态信息用于诊断显示.
809
+
810
+ Returns:
811
+ 字典,键为环境变量名称,值为格式化的显示字符串
812
+ """
813
+ status: Dict[str, str] = {}
814
+ if base_url := os.getenv(RIPPERDOC_BASE_URL):
815
+ status["BASE_URL"] = base_url
816
+ if protocol := os.getenv(RIPPERDOC_PROTOCOL):
817
+ status["PROTOCOL"] = protocol
818
+ if model := os.getenv(RIPPERDOC_MODEL):
819
+ status["MODEL"] = model
820
+ if small_fast_model := os.getenv(RIPPERDOC_SMALL_FAST_MODEL):
821
+ status["SMALL_FAST_MODEL"] = small_fast_model
822
+ if api_key := os.getenv(RIPPERDOC_API_KEY):
823
+ masked = api_key[:4] + "…" if len(api_key) > 4 else "set"
824
+ status["API_KEY"] = f"{masked} (${RIPPERDOC_API_KEY})"
825
+ if auth_token := os.getenv(RIPPERDOC_AUTH_TOKEN):
826
+ masked = auth_token[:4] + "…" if len(auth_token) > 4 else "set"
827
+ status["AUTH_TOKEN"] = f"{masked} (${RIPPERDOC_AUTH_TOKEN})"
828
+ return status
@@ -2,7 +2,7 @@
2
2
 
3
3
  from __future__ import annotations
4
4
 
5
- from typing import Any, List
5
+ from typing import Any, List, Optional
6
6
 
7
7
  from ripperdoc.core.tool import Tool
8
8
 
@@ -17,6 +17,7 @@ from ripperdoc.tools.file_write_tool import FileWriteTool
17
17
  from ripperdoc.tools.glob_tool import GlobTool
18
18
  from ripperdoc.tools.ls_tool import LSTool
19
19
  from ripperdoc.tools.grep_tool import GrepTool
20
+ from ripperdoc.tools.lsp_tool import LspTool
20
21
  from ripperdoc.tools.skill_tool import SkillTool
21
22
  from ripperdoc.tools.todo_tool import TodoReadTool, TodoWriteTool
22
23
  from ripperdoc.tools.ask_user_question_tool import AskUserQuestionTool
@@ -34,8 +35,74 @@ from ripperdoc.utils.log import get_logger
34
35
 
35
36
  logger = get_logger()
36
37
 
38
+ # Canonical tool names for --tools filtering
39
+ BUILTIN_TOOL_NAMES = [
40
+ "Bash",
41
+ "BashOutput",
42
+ "KillBash",
43
+ "Read",
44
+ "Edit",
45
+ "MultiEdit",
46
+ "NotebookEdit",
47
+ "Write",
48
+ "Glob",
49
+ "LS",
50
+ "Grep",
51
+ "LSP",
52
+ "Skill",
53
+ "TodoRead",
54
+ "TodoWrite",
55
+ "AskUserQuestion",
56
+ "EnterPlanMode",
57
+ "ExitPlanMode",
58
+ "ToolSearch",
59
+ "ListMcpServers",
60
+ "ListMcpResources",
61
+ "ReadMcpResource",
62
+ "Task",
63
+ ]
37
64
 
38
- def get_default_tools() -> List[Tool[Any, Any]]:
65
+
66
+ def filter_tools_by_names(
67
+ tools: List[Tool[Any, Any]], tool_names: List[str]
68
+ ) -> List[Tool[Any, Any]]:
69
+ """Filter a tool list to only include tools with matching names.
70
+
71
+ Args:
72
+ tools: The full list of tools to filter.
73
+ tool_names: List of tool names to include.
74
+
75
+ Returns:
76
+ Filtered list of tools. If Task is included, it's recreated with
77
+ the filtered base tools.
78
+ """
79
+ if not tool_names:
80
+ return []
81
+
82
+ name_set = set(tool_names)
83
+ filtered: List[Tool[Any, Any]] = []
84
+ has_task = False
85
+
86
+ for tool in tools:
87
+ tool_name = getattr(tool, "name", tool.__class__.__name__)
88
+ if tool_name in name_set:
89
+ if tool_name == "Task":
90
+ has_task = True
91
+ else:
92
+ filtered.append(tool)
93
+
94
+ # If Task is requested, recreate it with the filtered base tools
95
+ if has_task:
96
+
97
+ def _filtered_base_provider() -> List[Tool[Any, Any]]:
98
+ return [t for t in filtered if getattr(t, "name", None) != "Task"]
99
+
100
+ filtered.append(TaskTool(_filtered_base_provider))
101
+
102
+ return filtered
103
+
104
+
105
+ def get_default_tools(allowed_tools: Optional[List[str]] = None) -> List[Tool[Any, Any]]:
39
106
  """Construct the default tool set (base tools + Task subagent launcher)."""
40
107
  base_tools: List[Tool[Any, Any]] = [
41
108
  BashTool(),
@@ -49,6 +116,7 @@ def get_default_tools() -> List[Tool[Any, Any]]:
49
116
  GlobTool(),
50
117
  LSTool(),
51
118
  GrepTool(),
119
+ LspTool(),
52
120
  SkillTool(),
53
121
  TodoReadTool(),
54
122
  TodoWriteTool(),
@@ -86,12 +154,25 @@ def get_default_tools() -> List[Tool[Any, Any]]:
86
154
 
87
155
  task_tool = TaskTool(lambda: base_tools)
88
156
  all_tools = base_tools + [task_tool]
89
- logger.debug(
90
- "[default_tools] Built tool inventory",
91
- extra={
92
- "base_tools": len(base_tools),
93
- "dynamic_mcp_tools": len(dynamic_tools),
94
- "total_tools": len(all_tools),
95
- },
96
- )
157
+
158
+ # Apply allowed_tools filter if specified
159
+ if allowed_tools is not None:
160
+ all_tools = filter_tools_by_names(all_tools, allowed_tools)
161
+ logger.debug(
162
+ "[default_tools] Filtered tool inventory",
163
+ extra={
164
+ "allowed_tools": allowed_tools,
165
+ "filtered_tools": len(all_tools),
166
+ },
167
+ )
168
+ else:
169
+ logger.debug(
170
+ "[default_tools] Built tool inventory",
171
+ extra={
172
+ "base_tools": len(base_tools),
173
+ "dynamic_mcp_tools": len(dynamic_tools),
174
+ "total_tools": len(all_tools),
175
+ },
176
+ )
177
+
97
178
  return all_tools
@@ -515,6 +515,10 @@ class HookOutput(BaseModel):
515
515
  """Get updated input from PreToolUse hook."""
516
516
  if isinstance(self.hook_specific_output, PreToolUseHookOutput):
517
517
  return self.hook_specific_output.updated_input
518
+ if isinstance(self.hook_specific_output, PermissionRequestHookOutput):
519
+ decision = self.hook_specific_output.decision
520
+ if decision and decision.updated_input:
521
+ return decision.updated_input
518
522
  if isinstance(self.hook_specific_output, dict):
519
523
  return self.hook_specific_output.get("updatedInput")
520
524
  return None
@@ -0,0 +1,58 @@
1
+ """LLM callback helper for prompt-based hooks."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing import Optional
6
+
7
+ from ripperdoc.core.hooks.executor import LLMCallback
8
+ from ripperdoc.core.query import query_llm
9
+ from ripperdoc.utils.log import get_logger
10
+ from ripperdoc.utils.messages import AssistantMessage, create_user_message
11
+
12
+ logger = get_logger()
13
+
14
+
15
+ def _extract_text(message: AssistantMessage) -> str:
16
+ content = message.message.content
17
+ if isinstance(content, str):
18
+ return content
19
+ if isinstance(content, list):
20
+ parts = []
21
+ for block in content:
22
+ text = getattr(block, "text", None) or (
23
+ block.get("text") if isinstance(block, dict) else None
24
+ )
25
+ if text:
26
+ parts.append(str(text))
27
+ return "\n".join(parts)
28
+ return ""
29
+
30
+
31
+ def build_hook_llm_callback(
32
+ *,
33
+ model: str = "quick",
34
+ max_thinking_tokens: int = 0,
35
+ system_prompt: Optional[str] = None,
36
+ ) -> LLMCallback:
37
+ """Build an async callback for prompt hooks using the configured model."""
38
+
39
+ async def _callback(prompt: str) -> str:
40
+ try:
41
+ assistant = await query_llm(
42
+ [create_user_message(prompt)],
43
+ system_prompt or "",
44
+ [],
45
+ max_thinking_tokens=max_thinking_tokens,
46
+ model=model,
47
+ stream=False,
48
+ )
49
+ return _extract_text(assistant).strip()
50
+ except Exception as exc:
51
+ logger.warning(
52
+ "[hooks] Prompt hook LLM callback failed: %s: %s",
53
+ type(exc).__name__,
54
+ exc,
55
+ )
56
+ return f"Prompt hook evaluation failed: {exc}"
57
+
58
+ return _callback
@@ -510,10 +510,14 @@ class HookManager:
510
510
  stop_sequence: Optional[str] = None,
511
511
  ) -> HookResult:
512
512
  """Run Stop hooks asynchronously."""
513
+ logger.debug("[hook_manager] run_stop_async ENTER")
513
514
  hooks = self._get_hooks(HookEvent.STOP)
515
+ logger.debug(f"[hook_manager] run_stop_async: got {len(hooks)} hooks")
514
516
  if not hooks:
517
+ logger.debug("[hook_manager] run_stop_async: no hooks, returning empty HookResult")
515
518
  return HookResult([])
516
519
 
520
+ logger.debug("[hook_manager] run_stop_async: creating StopInput")
517
521
  input_data = StopInput(
518
522
  stop_hook_active=stop_hook_active,
519
523
  reason=reason,
@@ -524,7 +528,9 @@ class HookManager:
524
528
  permission_mode=self.permission_mode,
525
529
  )
526
530
 
531
+ logger.debug("[hook_manager] run_stop_async: calling executor.execute_hooks_async")
527
532
  outputs = await self.executor.execute_hooks_async(hooks, input_data)
533
+ logger.debug("[hook_manager] run_stop_async: execute_hooks_async returned")
528
534
  return HookResult(outputs)
529
535
 
530
536
  # --- Subagent Stop ---