ripperdoc 0.2.9__py3-none-any.whl → 0.2.10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (45) hide show
  1. ripperdoc/__init__.py +1 -1
  2. ripperdoc/cli/cli.py +235 -14
  3. ripperdoc/cli/commands/__init__.py +2 -0
  4. ripperdoc/cli/commands/agents_cmd.py +132 -5
  5. ripperdoc/cli/commands/clear_cmd.py +8 -0
  6. ripperdoc/cli/commands/exit_cmd.py +1 -0
  7. ripperdoc/cli/commands/models_cmd.py +3 -3
  8. ripperdoc/cli/commands/resume_cmd.py +4 -0
  9. ripperdoc/cli/commands/stats_cmd.py +244 -0
  10. ripperdoc/cli/ui/panels.py +1 -0
  11. ripperdoc/cli/ui/rich_ui.py +295 -24
  12. ripperdoc/cli/ui/spinner.py +30 -18
  13. ripperdoc/cli/ui/thinking_spinner.py +1 -2
  14. ripperdoc/cli/ui/wizard.py +6 -8
  15. ripperdoc/core/agents.py +10 -3
  16. ripperdoc/core/config.py +3 -6
  17. ripperdoc/core/default_tools.py +90 -10
  18. ripperdoc/core/hooks/events.py +4 -0
  19. ripperdoc/core/hooks/llm_callback.py +59 -0
  20. ripperdoc/core/permissions.py +78 -4
  21. ripperdoc/core/providers/openai.py +29 -19
  22. ripperdoc/core/query.py +192 -31
  23. ripperdoc/core/tool.py +9 -4
  24. ripperdoc/sdk/client.py +77 -2
  25. ripperdoc/tools/background_shell.py +305 -134
  26. ripperdoc/tools/bash_tool.py +42 -13
  27. ripperdoc/tools/file_edit_tool.py +159 -50
  28. ripperdoc/tools/file_read_tool.py +20 -0
  29. ripperdoc/tools/file_write_tool.py +7 -8
  30. ripperdoc/tools/lsp_tool.py +615 -0
  31. ripperdoc/tools/task_tool.py +514 -65
  32. ripperdoc/utils/conversation_compaction.py +1 -1
  33. ripperdoc/utils/file_watch.py +206 -3
  34. ripperdoc/utils/lsp.py +806 -0
  35. ripperdoc/utils/message_formatting.py +5 -2
  36. ripperdoc/utils/messages.py +21 -1
  37. ripperdoc/utils/permissions/tool_permission_utils.py +174 -15
  38. ripperdoc/utils/session_heatmap.py +244 -0
  39. ripperdoc/utils/session_stats.py +293 -0
  40. {ripperdoc-0.2.9.dist-info → ripperdoc-0.2.10.dist-info}/METADATA +8 -2
  41. {ripperdoc-0.2.9.dist-info → ripperdoc-0.2.10.dist-info}/RECORD +45 -39
  42. {ripperdoc-0.2.9.dist-info → ripperdoc-0.2.10.dist-info}/WHEEL +0 -0
  43. {ripperdoc-0.2.9.dist-info → ripperdoc-0.2.10.dist-info}/entry_points.txt +0 -0
  44. {ripperdoc-0.2.9.dist-info → ripperdoc-0.2.10.dist-info}/licenses/LICENSE +0 -0
  45. {ripperdoc-0.2.9.dist-info → ripperdoc-0.2.10.dist-info}/top_level.txt +0 -0
ripperdoc/core/query.py CHANGED
@@ -43,7 +43,11 @@ from ripperdoc.core.query_utils import (
43
43
  from ripperdoc.core.tool import Tool, ToolProgress, ToolResult, ToolUseContext
44
44
  from ripperdoc.utils.coerce import parse_optional_int
45
45
  from ripperdoc.utils.context_length_errors import detect_context_length_error
46
- from ripperdoc.utils.file_watch import ChangedFileNotice, FileSnapshot, detect_changed_files
46
+ from ripperdoc.utils.file_watch import (
47
+ BoundedFileCache,
48
+ ChangedFileNotice,
49
+ detect_changed_files,
50
+ )
47
51
  from ripperdoc.utils.log import get_logger
48
52
  from ripperdoc.utils.messages import (
49
53
  AssistantMessage,
@@ -131,7 +135,7 @@ async def _check_tool_permissions(
131
135
  parsed_input: Any,
132
136
  query_context: "QueryContext",
133
137
  can_use_tool_fn: Optional[ToolPermissionCallable],
134
- ) -> tuple[bool, Optional[str]]:
138
+ ) -> tuple[bool, Optional[str], Optional[Any]]:
135
139
  """Evaluate whether a tool call is allowed."""
136
140
  try:
137
141
  if can_use_tool_fn is not None:
@@ -139,12 +143,14 @@ async def _check_tool_permissions(
139
143
  if inspect.isawaitable(decision):
140
144
  decision = await decision
141
145
  if isinstance(decision, PermissionResult):
142
- return decision.result, decision.message
146
+ return decision.result, decision.message, decision.updated_input
143
147
  if isinstance(decision, dict) and "result" in decision:
144
- return bool(decision.get("result")), decision.get("message")
148
+ return bool(decision.get("result")), decision.get("message"), decision.get(
149
+ "updated_input"
150
+ )
145
151
  if isinstance(decision, tuple) and len(decision) == 2:
146
- return bool(decision[0]), decision[1]
147
- return bool(decision), None
152
+ return bool(decision[0]), decision[1], None
153
+ return bool(decision), None, None
148
154
 
149
155
  if not query_context.yolo_mode and tool.needs_permissions(parsed_input):
150
156
  loop = asyncio.get_running_loop()
@@ -155,15 +161,15 @@ async def _check_tool_permissions(
155
161
  )
156
162
  prompt = f"Allow tool '{tool.name}' with input {input_preview}? [y/N]: "
157
163
  response = await loop.run_in_executor(None, lambda: input(prompt))
158
- return response.strip().lower() in ("y", "yes"), None
164
+ return response.strip().lower() in ("y", "yes"), None, None
159
165
 
160
- return True, None
166
+ return True, None, None
161
167
  except (TypeError, AttributeError, ValueError) as exc:
162
168
  logger.warning(
163
169
  f"Error checking permissions for tool '{tool.name}': {type(exc).__name__}: {exc}",
164
170
  extra={"tool": getattr(tool, "name", None), "error_type": type(exc).__name__},
165
171
  )
166
- return False, None
172
+ return False, None, None
167
173
 
168
174
 
169
175
  def _format_changed_file_notice(notices: List[ChangedFileNotice]) -> str:
@@ -182,6 +188,18 @@ def _format_changed_file_notice(notices: List[ChangedFileNotice]) -> str:
182
188
  return "\n".join(lines)
183
189
 
184
190
 
191
+ def _append_hook_context(context: Dict[str, str], label: str, payload: Optional[str]) -> None:
192
+ """Append hook-supplied context to the shared context dict."""
193
+ if not payload:
194
+ return
195
+ key = f"Hook:{label}"
196
+ existing = context.get(key)
197
+ if existing:
198
+ context[key] = f"{existing}\n{payload}"
199
+ else:
200
+ context[key] = payload
201
+
202
+
185
203
  async def _run_tool_use_generator(
186
204
  tool: Tool[Any, Any],
187
205
  tool_use_id: str,
@@ -189,6 +207,7 @@ async def _run_tool_use_generator(
189
207
  parsed_input: Any,
190
208
  sibling_ids: set[str],
191
209
  tool_context: ToolUseContext,
210
+ context: Dict[str, str],
192
211
  ) -> AsyncGenerator[Union[UserMessage, ProgressMessage], None]:
193
212
  """Execute a single tool_use and yield progress/results."""
194
213
  # Get tool input as dict for hooks
@@ -235,6 +254,9 @@ async def _run_tool_use_generator(
235
254
  f"[query] PreToolUse hook added context for {tool_name}",
236
255
  extra={"context": pre_result.additional_context[:100]},
237
256
  )
257
+ _append_hook_context(context, f"PreToolUse:{tool_name}", pre_result.additional_context)
258
+ if pre_result.system_message:
259
+ _append_hook_context(context, f"PreToolUse:{tool_name}:system", pre_result.system_message)
238
260
 
239
261
  tool_output = None
240
262
 
@@ -271,9 +293,18 @@ async def _run_tool_use_generator(
271
293
  yield tool_result_message(tool_use_id, f"Error executing tool: {str(exc)}", is_error=True)
272
294
 
273
295
  # Run PostToolUse hooks
274
- await hook_manager.run_post_tool_use_async(
296
+ post_result = await hook_manager.run_post_tool_use_async(
275
297
  tool_name, tool_input_dict, tool_response=tool_output, tool_use_id=tool_use_id
276
298
  )
299
+ if post_result.additional_context:
300
+ _append_hook_context(context, f"PostToolUse:{tool_name}", post_result.additional_context)
301
+ if post_result.system_message:
302
+ _append_hook_context(
303
+ context, f"PostToolUse:{tool_name}:system", post_result.system_message
304
+ )
305
+ if post_result.should_block:
306
+ reason = post_result.block_reason or post_result.stop_reason or "Blocked by hook."
307
+ yield create_user_message(f"PostToolUse hook blocked: {reason}")
277
308
 
278
309
 
279
310
  def _group_tool_calls_by_concurrency(prepared_calls: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
@@ -345,6 +376,7 @@ async def _run_concurrent_tool_uses(
345
376
  """Drain multiple tool generators concurrently and stream outputs."""
346
377
  if not generators:
347
378
  return
379
+ yield # Make this a proper async generator that yields nothing
348
380
 
349
381
  queue: asyncio.Queue[Optional[Union[UserMessage, ProgressMessage]]] = asyncio.Queue()
350
382
 
@@ -528,6 +560,14 @@ def _apply_skill_context_updates(
528
560
  class QueryContext:
529
561
  """Context for a query session."""
530
562
 
563
+ # Thresholds for memory warnings
564
+ MESSAGE_COUNT_WARNING_THRESHOLD = int(
565
+ os.getenv("RIPPERDOC_MESSAGE_WARNING_THRESHOLD", "500")
566
+ )
567
+ MESSAGE_COUNT_CRITICAL_THRESHOLD = int(
568
+ os.getenv("RIPPERDOC_MESSAGE_CRITICAL_THRESHOLD", "1000")
569
+ )
570
+
531
571
  def __init__(
532
572
  self,
533
573
  tools: List[Tool[Any, Any]],
@@ -537,6 +577,9 @@ class QueryContext:
537
577
  verbose: bool = False,
538
578
  pause_ui: Optional[Callable[[], None]] = None,
539
579
  resume_ui: Optional[Callable[[], None]] = None,
580
+ stop_hook: str = "stop",
581
+ file_cache_max_entries: int = 500,
582
+ file_cache_max_memory_mb: float = 50.0,
540
583
  ) -> None:
541
584
  self.tool_registry = ToolRegistry(tools)
542
585
  self.max_thinking_tokens = max_thinking_tokens
@@ -544,9 +587,16 @@ class QueryContext:
544
587
  self.model = model
545
588
  self.verbose = verbose
546
589
  self.abort_controller = asyncio.Event()
547
- self.file_state_cache: Dict[str, FileSnapshot] = {}
590
+ # Use BoundedFileCache instead of plain Dict to prevent unbounded growth
591
+ self.file_state_cache: BoundedFileCache = BoundedFileCache(
592
+ max_entries=file_cache_max_entries,
593
+ max_memory_mb=file_cache_max_memory_mb,
594
+ )
548
595
  self.pause_ui = pause_ui
549
596
  self.resume_ui = resume_ui
597
+ self.stop_hook = stop_hook
598
+ self.stop_hook_active = False
599
+ self._last_message_warning_count = 0
550
600
 
551
601
  @property
552
602
  def tools(self) -> List[Tool[Any, Any]]:
@@ -566,6 +616,44 @@ class QueryContext:
566
616
  """Return all known tools (active + deferred)."""
567
617
  return self.tool_registry.all_tools
568
618
 
619
+ def check_message_count(self, message_count: int) -> None:
620
+ """Check message count and log warnings if thresholds are exceeded.
621
+
622
+ This helps detect potential memory issues in long sessions.
623
+ """
624
+ if message_count >= self.MESSAGE_COUNT_CRITICAL_THRESHOLD:
625
+ if self._last_message_warning_count < self.MESSAGE_COUNT_CRITICAL_THRESHOLD:
626
+ logger.warning(
627
+ "[query] Critical: Message history is very large. "
628
+ "Consider compacting or starting a new session.",
629
+ extra={
630
+ "message_count": message_count,
631
+ "threshold": self.MESSAGE_COUNT_CRITICAL_THRESHOLD,
632
+ "file_cache_stats": self.file_state_cache.stats(),
633
+ },
634
+ )
635
+ self._last_message_warning_count = message_count
636
+ elif message_count >= self.MESSAGE_COUNT_WARNING_THRESHOLD:
637
+ # Only warn once per threshold crossing
638
+ if self._last_message_warning_count < self.MESSAGE_COUNT_WARNING_THRESHOLD:
639
+ logger.info(
640
+ "[query] Message history growing large; automatic compaction may trigger soon",
641
+ extra={
642
+ "message_count": message_count,
643
+ "threshold": self.MESSAGE_COUNT_WARNING_THRESHOLD,
644
+ "file_cache_stats": self.file_state_cache.stats(),
645
+ },
646
+ )
647
+ self._last_message_warning_count = message_count
648
+
649
+ def get_memory_stats(self) -> Dict[str, Any]:
650
+ """Return memory usage statistics for monitoring."""
651
+ return {
652
+ "file_cache": self.file_state_cache.stats(),
653
+ "tool_count": len(self.tool_registry.all_tools),
654
+ "active_tool_count": len(self.tool_registry.active_tools),
655
+ }
656
+
569
657
 
570
658
  async def query_llm(
571
659
  messages: List[Union[UserMessage, AssistantMessage, ProgressMessage]],
@@ -598,7 +686,6 @@ async def query_llm(
598
686
  AssistantMessage with the model's response
599
687
  """
600
688
  request_timeout = request_timeout or DEFAULT_REQUEST_TIMEOUT_SEC
601
- request_timeout = request_timeout or DEFAULT_REQUEST_TIMEOUT_SEC
602
689
  model_profile = resolve_model_profile(model)
603
690
 
604
691
  # Normalize messages based on protocol family (Anthropic allows tool blocks; OpenAI-style prefers text-only)
@@ -657,13 +744,25 @@ async def query_llm(
657
744
  start_time = time.time()
658
745
 
659
746
  try:
660
- client: Optional[ProviderClient] = get_provider_client(model_profile.provider)
747
+ try:
748
+ client: Optional[ProviderClient] = get_provider_client(model_profile.provider)
749
+ except RuntimeError as exc:
750
+ duration_ms = (time.time() - start_time) * 1000
751
+ error_msg = create_assistant_message(
752
+ content=str(exc),
753
+ duration_ms=duration_ms,
754
+ )
755
+ error_msg.is_api_error_message = True
756
+ return error_msg
661
757
  if client is None:
662
758
  duration_ms = (time.time() - start_time) * 1000
759
+ provider_label = getattr(model_profile.provider, "value", None) or str(
760
+ model_profile.provider
761
+ )
663
762
  error_msg = create_assistant_message(
664
763
  content=(
665
- "Gemini protocol is not supported yet in Ripperdoc. "
666
- "Please configure an Anthropic or OpenAI-compatible model."
764
+ f"No provider client available for '{provider_label}'. "
765
+ "Check your model configuration and provider dependencies."
667
766
  ),
668
767
  duration_ms=duration_ms,
669
768
  )
@@ -715,6 +814,13 @@ async def query_llm(
715
814
  cost_usd=provider_response.cost_usd,
716
815
  duration_ms=provider_response.duration_ms,
717
816
  metadata=provider_response.metadata,
817
+ model=model_profile.model,
818
+ input_tokens=provider_response.usage_tokens.get("input_tokens", 0),
819
+ output_tokens=provider_response.usage_tokens.get("output_tokens", 0),
820
+ cache_read_tokens=provider_response.usage_tokens.get("cache_read_input_tokens", 0),
821
+ cache_creation_tokens=provider_response.usage_tokens.get(
822
+ "cache_creation_input_tokens", 0
823
+ ),
718
824
  )
719
825
 
720
826
  except CancelledError:
@@ -830,7 +936,7 @@ async def _run_query_iteration(
830
936
  )
831
937
 
832
938
  # Stream LLM response
833
- progress_queue: asyncio.Queue[Optional[ProgressMessage]] = asyncio.Queue()
939
+ progress_queue: asyncio.Queue[Optional[ProgressMessage]] = asyncio.Queue(maxsize=1000)
834
940
 
835
941
  async def _stream_progress(chunk: str) -> None:
836
942
  if not chunk:
@@ -883,23 +989,23 @@ async def _run_query_iteration(
883
989
  progress = progress_queue.get_nowait()
884
990
  except asyncio.QueueEmpty:
885
991
  waiter = asyncio.create_task(progress_queue.get())
886
- # Use timeout to periodically check abort_controller during LLM request
992
+ abort_waiter = asyncio.create_task(query_context.abort_controller.wait())
887
993
  done, pending = await asyncio.wait(
888
- {assistant_task, waiter},
994
+ {assistant_task, waiter, abort_waiter},
889
995
  return_when=asyncio.FIRST_COMPLETED,
890
- timeout=0.1, # Check abort_controller every 100ms
891
996
  )
892
- if not done:
893
- # Timeout - cancel waiter and continue loop to check abort_controller
894
- waiter.cancel()
895
- try:
896
- await waiter
897
- except asyncio.CancelledError:
898
- pass
997
+ for task in pending:
998
+ # Don't cancel assistant_task here - it should only be cancelled
999
+ # through abort_controller in the main loop
1000
+ if task is not assistant_task:
1001
+ task.cancel()
1002
+ try:
1003
+ await task
1004
+ except asyncio.CancelledError:
1005
+ pass
1006
+ if abort_waiter in done:
899
1007
  continue
900
1008
  if assistant_task in done:
901
- for task in pending:
902
- task.cancel()
903
1009
  assistant_message = await assistant_task
904
1010
  break
905
1011
  progress = waiter.result()
@@ -912,7 +1018,8 @@ async def _run_query_iteration(
912
1018
  if residual:
913
1019
  yield residual
914
1020
 
915
- assert assistant_message is not None
1021
+ if assistant_message is None:
1022
+ raise RuntimeError("assistant_message was unexpectedly None after LLM query")
916
1023
  result.assistant_message = assistant_message
917
1024
 
918
1025
  # Check for abort
@@ -937,6 +1044,27 @@ async def _run_query_iteration(
937
1044
 
938
1045
  if not tool_use_blocks:
939
1046
  logger.debug("[query] No tool_use blocks; returning response to user.")
1047
+ stop_hook = query_context.stop_hook
1048
+ stop_result = (
1049
+ await hook_manager.run_subagent_stop_async(
1050
+ stop_hook_active=query_context.stop_hook_active
1051
+ )
1052
+ if stop_hook == "subagent"
1053
+ else await hook_manager.run_stop_async(stop_hook_active=query_context.stop_hook_active)
1054
+ )
1055
+ if stop_result.additional_context:
1056
+ _append_hook_context(context, f"{stop_hook}:context", stop_result.additional_context)
1057
+ if stop_result.system_message:
1058
+ _append_hook_context(context, f"{stop_hook}:system", stop_result.system_message)
1059
+ if stop_result.should_block:
1060
+ reason = stop_result.block_reason or stop_result.stop_reason or "Blocked by hook."
1061
+ result.tool_results = [create_user_message(f"{stop_hook} hook blocked: {reason}")]
1062
+ for msg in result.tool_results:
1063
+ yield msg
1064
+ query_context.stop_hook_active = True
1065
+ result.should_stop = False
1066
+ return
1067
+ query_context.stop_hook_active = False
940
1068
  result.should_stop = True
941
1069
  return
942
1070
 
@@ -962,7 +1090,8 @@ async def _run_query_iteration(
962
1090
  tool_results.append(missing_msg)
963
1091
  yield missing_msg
964
1092
  continue
965
- assert tool is not None
1093
+ if tool is None:
1094
+ raise RuntimeError(f"Tool '{tool_name}' resolved to None unexpectedly")
966
1095
 
967
1096
  try:
968
1097
  parsed_input = tool.input_schema(**tool_input)
@@ -977,6 +1106,7 @@ async def _run_query_iteration(
977
1106
  permission_checker=can_use_tool_fn,
978
1107
  tool_registry=query_context.tool_registry,
979
1108
  file_state_cache=query_context.file_state_cache,
1109
+ conversation_messages=messages,
980
1110
  abort_signal=query_context.abort_controller,
981
1111
  pause_ui=query_context.pause_ui,
982
1112
  resume_ui=query_context.resume_ui,
@@ -997,7 +1127,7 @@ async def _run_query_iteration(
997
1127
  continue
998
1128
 
999
1129
  if not query_context.yolo_mode or can_use_tool_fn is not None:
1000
- allowed, denial_message = await _check_tool_permissions(
1130
+ allowed, denial_message, updated_input = await _check_tool_permissions(
1001
1131
  tool, parsed_input, query_context, can_use_tool_fn
1002
1132
  )
1003
1133
  if not allowed:
@@ -1010,6 +1140,29 @@ async def _run_query_iteration(
1010
1140
  yield denial_msg
1011
1141
  permission_denied = True
1012
1142
  break
1143
+ if updated_input:
1144
+ try:
1145
+ parsed_input = tool.input_schema(**updated_input)
1146
+ except ValidationError as ve:
1147
+ detail_text = format_pydantic_errors(ve)
1148
+ error_msg = tool_result_message(
1149
+ tool_use_id,
1150
+ f"Invalid permission-updated input for tool '{tool_name}': {detail_text}",
1151
+ is_error=True,
1152
+ )
1153
+ tool_results.append(error_msg)
1154
+ yield error_msg
1155
+ continue
1156
+ validation = await tool.validate_input(parsed_input, tool_context)
1157
+ if not validation.result:
1158
+ error_msg = tool_result_message(
1159
+ tool_use_id,
1160
+ validation.message or "Tool input validation failed.",
1161
+ is_error=True,
1162
+ )
1163
+ tool_results.append(error_msg)
1164
+ yield error_msg
1165
+ continue
1013
1166
 
1014
1167
  prepared_calls.append(
1015
1168
  {
@@ -1021,6 +1174,7 @@ async def _run_query_iteration(
1021
1174
  parsed_input,
1022
1175
  sibling_ids,
1023
1176
  tool_context,
1177
+ context,
1024
1178
  ),
1025
1179
  }
1026
1180
  )
@@ -1122,6 +1276,9 @@ async def query(
1122
1276
  # do not interfere with the loop or normalization.
1123
1277
  messages = list(messages)
1124
1278
 
1279
+ # Check initial message count for memory warnings
1280
+ query_context.check_message_count(len(messages))
1281
+
1125
1282
  for iteration in range(1, MAX_QUERY_ITERATIONS + 1):
1126
1283
  result = IterationResult()
1127
1284
 
@@ -1144,6 +1301,10 @@ async def query(
1144
1301
  messages = messages + [result.assistant_message] + result.tool_results # type: ignore[operator]
1145
1302
  else:
1146
1303
  messages = messages + result.tool_results # type: ignore[operator]
1304
+
1305
+ # Check message count after each iteration for memory warnings
1306
+ query_context.check_message_count(len(messages))
1307
+
1147
1308
  logger.debug(
1148
1309
  f"[query] Continuing loop with {len(messages)} messages after tools; "
1149
1310
  f"tool_results_count={len(result.tool_results)}"
ripperdoc/core/tool.py CHANGED
@@ -8,7 +8,7 @@ import json
8
8
  from abc import ABC, abstractmethod
9
9
  from typing import Annotated, Any, AsyncGenerator, Dict, List, Optional, TypeVar, Generic, Union
10
10
  from pydantic import BaseModel, ConfigDict, Field, SkipValidation
11
- from ripperdoc.utils.file_watch import FileSnapshot
11
+ from ripperdoc.utils.file_watch import FileCacheType
12
12
  from ripperdoc.utils.log import get_logger
13
13
 
14
14
 
@@ -41,11 +41,16 @@ class ToolUseContext(BaseModel):
41
41
  verbose: bool = False
42
42
  permission_checker: Optional[Any] = None
43
43
  read_file_timestamps: Dict[str, float] = Field(default_factory=dict)
44
- # SkipValidation prevents Pydantic from copying the dict during validation,
45
- # ensuring Read and Edit tools share the same cache instance
46
- file_state_cache: Annotated[Dict[str, FileSnapshot], SkipValidation] = Field(
44
+ # SkipValidation prevents Pydantic from copying the cache during validation,
45
+ # ensuring Read and Edit tools share the same cache instance.
46
+ # FileCacheType supports both Dict[str, FileSnapshot] and BoundedFileCache.
47
+ file_state_cache: Annotated[FileCacheType, SkipValidation] = Field(
47
48
  default_factory=dict
48
49
  )
50
+ conversation_messages: Annotated[Optional[List[Any]], SkipValidation] = Field(
51
+ default=None,
52
+ description="Full conversation history for tools that need parent context.",
53
+ )
49
54
  tool_registry: Optional[Any] = None
50
55
  abort_signal: Optional[Any] = None
51
56
  # UI control callbacks for tools that need user interaction
ripperdoc/sdk/client.py CHANGED
@@ -8,6 +8,8 @@ from __future__ import annotations
8
8
 
9
9
  import asyncio
10
10
  import os
11
+ import time
12
+ import uuid
11
13
  from dataclasses import dataclass, field
12
14
  from pathlib import Path
13
15
  from typing import (
@@ -24,6 +26,8 @@ from typing import (
24
26
  )
25
27
 
26
28
  from ripperdoc.core.default_tools import get_default_tools
29
+ from ripperdoc.core.hooks.llm_callback import build_hook_llm_callback
30
+ from ripperdoc.core.hooks.manager import hook_manager
27
31
  from ripperdoc.core.query import QueryContext, query as _core_query
28
32
  from ripperdoc.core.permissions import PermissionResult
29
33
  from ripperdoc.core.system_prompt import build_system_prompt
@@ -36,6 +40,7 @@ from ripperdoc.utils.messages import (
36
40
  AssistantMessage,
37
41
  ProgressMessage,
38
42
  UserMessage,
43
+ create_assistant_message,
39
44
  create_user_message,
40
45
  )
41
46
  from ripperdoc.utils.mcp import (
@@ -159,6 +164,10 @@ class RipperdocClient:
159
164
  self._current_context: Optional[QueryContext] = None
160
165
  self._connected = False
161
166
  self._previous_cwd: Optional[Path] = None
167
+ self._session_hook_contexts: List[str] = []
168
+ self._session_id: Optional[str] = None
169
+ self._session_start_time: Optional[float] = None
170
+ self._session_end_sent: bool = False
162
171
 
163
172
  @property
164
173
  def tools(self) -> List[Tool[Any, Any]]:
@@ -182,6 +191,22 @@ class RipperdocClient:
182
191
  self._previous_cwd = Path.cwd()
183
192
  os.chdir(_coerce_to_path(self.options.cwd))
184
193
  self._connected = True
194
+ project_path = _coerce_to_path(self.options.cwd or Path.cwd())
195
+ hook_manager.set_project_dir(project_path)
196
+ self._session_id = self._session_id or str(uuid.uuid4())
197
+ hook_manager.set_session_id(self._session_id)
198
+ hook_manager.set_llm_callback(build_hook_llm_callback())
199
+ try:
200
+ result = await hook_manager.run_session_start_async("startup")
201
+ self._session_hook_contexts = self._collect_hook_contexts(result)
202
+ self._session_start_time = time.time()
203
+ self._session_end_sent = False
204
+ except (OSError, RuntimeError, ConnectionError, ValueError, TypeError) as exc:
205
+ logger.warning(
206
+ "[sdk] SessionStart hook failed: %s: %s",
207
+ type(exc).__name__,
208
+ exc,
209
+ )
185
210
 
186
211
  if prompt:
187
212
  await self.query(prompt)
@@ -203,6 +228,25 @@ class RipperdocClient:
203
228
  self._previous_cwd = None
204
229
 
205
230
  self._connected = False
231
+ if not self._session_end_sent:
232
+ duration = (
233
+ max(time.time() - self._session_start_time, 0.0)
234
+ if self._session_start_time is not None
235
+ else None
236
+ )
237
+ try:
238
+ await hook_manager.run_session_end_async(
239
+ "other",
240
+ duration_seconds=duration,
241
+ message_count=len(self._history),
242
+ )
243
+ except (OSError, RuntimeError, ConnectionError, ValueError, TypeError) as exc:
244
+ logger.warning(
245
+ "[sdk] SessionEnd hook failed: %s: %s",
246
+ type(exc).__name__,
247
+ exc,
248
+ )
249
+ self._session_end_sent = True
206
250
  await shutdown_mcp_runtime()
207
251
 
208
252
  async def query(self, prompt: str) -> None:
@@ -217,11 +261,26 @@ class RipperdocClient:
217
261
 
218
262
  self._queue = asyncio.Queue()
219
263
 
264
+ hook_result = await hook_manager.run_user_prompt_submit_async(prompt)
265
+ if hook_result.should_block or not hook_result.should_continue:
266
+ reason = (
267
+ hook_result.block_reason
268
+ or hook_result.stop_reason
269
+ or "Prompt blocked by hook."
270
+ )
271
+ blocked_message = create_assistant_message(str(reason))
272
+ self._history.append(blocked_message)
273
+ await self._queue.put(blocked_message)
274
+ await self._queue.put(_END_OF_STREAM)
275
+ self._current_task = asyncio.create_task(asyncio.sleep(0))
276
+ return
277
+ hook_instructions = self._collect_hook_contexts(hook_result)
278
+
220
279
  user_message = create_user_message(prompt)
221
280
  history = list(self._history) + [user_message]
222
281
  self._history.append(user_message)
223
282
 
224
- system_prompt = await self._build_system_prompt(prompt)
283
+ system_prompt = await self._build_system_prompt(prompt, hook_instructions)
225
284
  context = dict(self.options.context)
226
285
 
227
286
  query_context = QueryContext(
@@ -280,7 +339,9 @@ class RipperdocClient:
280
339
 
281
340
  await self._queue.put(_END_OF_STREAM)
282
341
 
283
- async def _build_system_prompt(self, user_prompt: str) -> str:
342
+ async def _build_system_prompt(
343
+ self, user_prompt: str, hook_instructions: Optional[List[str]] = None
344
+ ) -> str:
284
345
  if self.options.system_prompt:
285
346
  return self.options.system_prompt
286
347
 
@@ -299,6 +360,10 @@ class RipperdocClient:
299
360
  memory = build_memory_instructions()
300
361
  if memory:
301
362
  instructions.append(memory)
363
+ if self._session_hook_contexts:
364
+ instructions.extend(self._session_hook_contexts)
365
+ if hook_instructions:
366
+ instructions.extend([text for text in hook_instructions if text])
302
367
 
303
368
  dynamic_tools = await load_dynamic_mcp_tools_async(project_path)
304
369
  if dynamic_tools:
@@ -315,6 +380,16 @@ class RipperdocClient:
315
380
  mcp_instructions=mcp_instructions,
316
381
  )
317
382
 
383
+ def _collect_hook_contexts(self, hook_result: Any) -> List[str]:
384
+ contexts: List[str] = []
385
+ system_message = getattr(hook_result, "system_message", None)
386
+ additional_context = getattr(hook_result, "additional_context", None)
387
+ if system_message:
388
+ contexts.append(str(system_message))
389
+ if additional_context:
390
+ contexts.append(str(additional_context))
391
+ return contexts
392
+
318
393
 
319
394
  async def query(
320
395
  prompt: str,