ripperdoc 0.2.0__py3-none-any.whl → 0.2.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (51) hide show
  1. ripperdoc/__init__.py +1 -1
  2. ripperdoc/cli/cli.py +66 -8
  3. ripperdoc/cli/commands/__init__.py +4 -0
  4. ripperdoc/cli/commands/agents_cmd.py +22 -0
  5. ripperdoc/cli/commands/context_cmd.py +11 -1
  6. ripperdoc/cli/commands/doctor_cmd.py +200 -0
  7. ripperdoc/cli/commands/memory_cmd.py +209 -0
  8. ripperdoc/cli/commands/models_cmd.py +25 -0
  9. ripperdoc/cli/commands/tasks_cmd.py +27 -0
  10. ripperdoc/cli/ui/rich_ui.py +156 -9
  11. ripperdoc/core/agents.py +4 -2
  12. ripperdoc/core/config.py +48 -3
  13. ripperdoc/core/default_tools.py +16 -2
  14. ripperdoc/core/permissions.py +19 -0
  15. ripperdoc/core/query.py +231 -297
  16. ripperdoc/core/query_utils.py +537 -0
  17. ripperdoc/core/system_prompt.py +2 -1
  18. ripperdoc/core/tool.py +13 -0
  19. ripperdoc/tools/background_shell.py +9 -3
  20. ripperdoc/tools/bash_tool.py +15 -0
  21. ripperdoc/tools/file_edit_tool.py +7 -0
  22. ripperdoc/tools/file_read_tool.py +7 -0
  23. ripperdoc/tools/file_write_tool.py +7 -0
  24. ripperdoc/tools/glob_tool.py +55 -15
  25. ripperdoc/tools/grep_tool.py +7 -0
  26. ripperdoc/tools/ls_tool.py +242 -73
  27. ripperdoc/tools/mcp_tools.py +32 -10
  28. ripperdoc/tools/multi_edit_tool.py +11 -0
  29. ripperdoc/tools/notebook_edit_tool.py +6 -3
  30. ripperdoc/tools/task_tool.py +7 -0
  31. ripperdoc/tools/todo_tool.py +159 -25
  32. ripperdoc/tools/tool_search_tool.py +9 -0
  33. ripperdoc/utils/git_utils.py +276 -0
  34. ripperdoc/utils/json_utils.py +28 -0
  35. ripperdoc/utils/log.py +130 -29
  36. ripperdoc/utils/mcp.py +71 -6
  37. ripperdoc/utils/memory.py +14 -1
  38. ripperdoc/utils/message_compaction.py +26 -5
  39. ripperdoc/utils/messages.py +63 -4
  40. ripperdoc/utils/output_utils.py +36 -9
  41. ripperdoc/utils/permissions/path_validation_utils.py +6 -0
  42. ripperdoc/utils/safe_get_cwd.py +4 -0
  43. ripperdoc/utils/session_history.py +27 -9
  44. ripperdoc/utils/todo.py +2 -2
  45. {ripperdoc-0.2.0.dist-info → ripperdoc-0.2.2.dist-info}/METADATA +4 -2
  46. ripperdoc-0.2.2.dist-info/RECORD +86 -0
  47. ripperdoc-0.2.0.dist-info/RECORD +0 -81
  48. {ripperdoc-0.2.0.dist-info → ripperdoc-0.2.2.dist-info}/WHEEL +0 -0
  49. {ripperdoc-0.2.0.dist-info → ripperdoc-0.2.2.dist-info}/entry_points.txt +0 -0
  50. {ripperdoc-0.2.0.dist-info → ripperdoc-0.2.2.dist-info}/licenses/LICENSE +0 -0
  51. {ripperdoc-0.2.0.dist-info → ripperdoc-0.2.2.dist-info}/top_level.txt +0 -0
@@ -13,15 +13,22 @@ from ripperdoc.core.config import (
13
13
  get_global_config,
14
14
  set_model_pointer,
15
15
  )
16
+ from ripperdoc.utils.log import get_logger
16
17
 
17
18
  from .base import SlashCommand
18
19
 
20
+ logger = get_logger()
21
+
19
22
 
20
23
  def _handle(ui: Any, trimmed_arg: str) -> bool:
21
24
  console = ui.console
22
25
  tokens = trimmed_arg.split()
23
26
  subcmd = tokens[0].lower() if tokens else ""
24
27
  config = get_global_config()
28
+ logger.info(
29
+ "[models_cmd] Handling /models command",
30
+ extra={"subcommand": subcmd or "list", "session_id": getattr(ui, "session_id", None)},
31
+ )
25
32
 
26
33
  def print_models_usage() -> None:
27
34
  console.print("[bold]/models[/bold] — list configured models")
@@ -167,6 +174,10 @@ def _handle(ui: Any, trimmed_arg: str) -> bool:
167
174
  )
168
175
  except Exception as exc:
169
176
  console.print(f"[red]Failed to save model: {escape(str(exc))}[/red]")
177
+ logger.exception(
178
+ "[models_cmd] Failed to save model profile",
179
+ extra={"profile": profile_name, "session_id": getattr(ui, "session_id", None)},
180
+ )
170
181
  return True
171
182
 
172
183
  marker = " (main)" if set_as_main else ""
@@ -255,6 +266,10 @@ def _handle(ui: Any, trimmed_arg: str) -> bool:
255
266
  )
256
267
  except Exception as exc:
257
268
  console.print(f"[red]Failed to update model: {escape(str(exc))}[/red]")
269
+ logger.exception(
270
+ "[models_cmd] Failed to update model profile",
271
+ extra={"profile": profile_name, "session_id": getattr(ui, "session_id", None)},
272
+ )
258
273
  return True
259
274
 
260
275
  console.print(f"[green]✓ Model '{escape(profile_name)}' updated[/green]")
@@ -274,6 +289,10 @@ def _handle(ui: Any, trimmed_arg: str) -> bool:
274
289
  except Exception as exc:
275
290
  console.print(f"[red]Failed to delete model: {escape(str(exc))}[/red]")
276
291
  print_models_usage()
292
+ logger.exception(
293
+ "[models_cmd] Failed to delete model profile",
294
+ extra={"profile": target, "session_id": getattr(ui, "session_id", None)},
295
+ )
277
296
  return True
278
297
 
279
298
  if subcmd in ("use", "main", "set-main"):
@@ -288,6 +307,10 @@ def _handle(ui: Any, trimmed_arg: str) -> bool:
288
307
  except Exception as exc:
289
308
  console.print(f"[red]{escape(str(exc))}[/red]")
290
309
  print_models_usage()
310
+ logger.exception(
311
+ "[models_cmd] Failed to set main model pointer",
312
+ extra={"profile": target, "session_id": getattr(ui, "session_id", None)},
313
+ )
291
314
  return True
292
315
 
293
316
  print_models_usage()
@@ -312,6 +335,8 @@ def _handle(ui: Any, trimmed_arg: str) -> bool:
312
335
  markup=False,
313
336
  )
314
337
  console.print(f" api_key: {'***' if profile.api_key else 'Not set'}", markup=False)
338
+ if profile.openai_tool_mode:
339
+ console.print(f" openai_tool_mode: {profile.openai_tool_mode}", markup=False)
315
340
  pointer_labels = ", ".join(f"{p}->{v or '-'}" for p, v in pointer_map.items())
316
341
  console.print(f"[dim]Pointers: {escape(pointer_labels)}[/dim]")
317
342
  return True
@@ -13,11 +13,15 @@ from ripperdoc.tools.background_shell import (
13
13
  kill_background_task,
14
14
  list_background_tasks,
15
15
  )
16
+ from ripperdoc.utils.log import get_logger
16
17
 
17
18
  from typing import Any, Optional
18
19
  from .base import SlashCommand
19
20
 
20
21
 
22
+ logger = get_logger()
23
+
24
+
21
25
  def _format_duration(duration_ms: Optional[float]) -> str:
22
26
  """Render milliseconds into a short human-readable duration."""
23
27
  if duration_ms is None:
@@ -103,6 +107,10 @@ def _list_tasks(ui: Any) -> bool:
103
107
  status = get_background_status(task_id, consume=False)
104
108
  except Exception as exc:
105
109
  table.add_row(escape(task_id), "[red]error[/]", escape(str(exc)), "-")
110
+ logger.exception(
111
+ "[tasks_cmd] Failed to read background task status",
112
+ extra={"task_id": task_id, "session_id": getattr(ui, "session_id", None)},
113
+ )
106
114
  continue
107
115
 
108
116
  command = status.get("command") or ""
@@ -137,6 +145,10 @@ def _kill_task(ui: Any, task_id: str) -> bool:
137
145
  return True
138
146
  except Exception as exc:
139
147
  console.print(f"[red]Failed to read task '{escape(task_id)}': {escape(str(exc))}[/red]")
148
+ logger.exception(
149
+ "[tasks_cmd] Failed to read task before kill",
150
+ extra={"task_id": task_id, "session_id": getattr(ui, "session_id", None)},
151
+ )
140
152
  return True
141
153
 
142
154
  if status.get("status") != "running":
@@ -149,6 +161,10 @@ def _kill_task(ui: Any, task_id: str) -> bool:
149
161
  killed = asyncio.run(kill_background_task(task_id))
150
162
  except Exception as exc:
151
163
  console.print(f"[red]Error stopping task {escape(task_id)}: {escape(str(exc))}[/red]")
164
+ logger.exception(
165
+ "[tasks_cmd] Error stopping background task",
166
+ extra={"task_id": task_id, "session_id": getattr(ui, "session_id", None)},
167
+ )
152
168
  return True
153
169
 
154
170
  if killed:
@@ -169,6 +185,10 @@ def _show_task(ui: Any, task_id: str) -> bool:
169
185
  return True
170
186
  except Exception as exc:
171
187
  console.print(f"[red]Failed to read task '{escape(task_id)}': {escape(str(exc))}[/red]")
188
+ logger.exception(
189
+ "[tasks_cmd] Failed to read task for detail view",
190
+ extra={"task_id": task_id, "session_id": getattr(ui, "session_id", None)},
191
+ )
172
192
  return True
173
193
 
174
194
  details = Table(box=box.SIMPLE_HEAVY, show_header=False)
@@ -208,6 +228,13 @@ def _show_task(ui: Any, task_id: str) -> bool:
208
228
 
209
229
  def _handle(ui: Any, args: str) -> bool:
210
230
  parts = args.split()
231
+ logger.info(
232
+ "[tasks_cmd] Handling /tasks command",
233
+ extra={
234
+ "session_id": getattr(ui, "session_id", None),
235
+ "raw_args": args,
236
+ },
237
+ )
211
238
  if not parts:
212
239
  return _list_tasks(ui)
213
240
 
@@ -59,12 +59,14 @@ from ripperdoc.utils.messages import (
59
59
  create_user_message,
60
60
  create_assistant_message,
61
61
  )
62
+ from ripperdoc.utils.log import enable_session_file_logging, get_logger
62
63
 
63
64
  # Type alias for conversation messages
64
65
  ConversationMessage = Union[UserMessage, AssistantMessage, ProgressMessage]
65
66
 
66
67
 
67
68
  console = Console()
69
+ logger = get_logger()
68
70
 
69
71
  # Keep a small window of recent messages alongside the summary after /compact so
70
72
  # the model retains immediate context.
@@ -110,7 +112,13 @@ def create_status_bar() -> Text:
110
112
  class RichUI:
111
113
  """Rich-based UI for Ripperdoc."""
112
114
 
113
- def __init__(self, safe_mode: bool = False, verbose: bool = False):
115
+ def __init__(
116
+ self,
117
+ safe_mode: bool = False,
118
+ verbose: bool = False,
119
+ session_id: Optional[str] = None,
120
+ log_file_path: Optional[Path] = None,
121
+ ):
114
122
  self.console = console
115
123
  self.safe_mode = safe_mode
116
124
  self.verbose = verbose
@@ -124,7 +132,22 @@ class RichUI:
124
132
  self._prompt_session: Optional[PromptSession] = None
125
133
  self.project_path = Path.cwd()
126
134
  # Track a stable session identifier for the current UI run.
127
- self.session_id = str(uuid.uuid4())
135
+ self.session_id = session_id or str(uuid.uuid4())
136
+ if log_file_path:
137
+ self.log_file_path = log_file_path
138
+ logger.attach_file_handler(self.log_file_path)
139
+ else:
140
+ self.log_file_path = enable_session_file_logging(self.project_path, self.session_id)
141
+ logger.info(
142
+ "[ui] Initialized Rich UI session",
143
+ extra={
144
+ "session_id": self.session_id,
145
+ "project_path": str(self.project_path),
146
+ "log_file": str(self.log_file_path),
147
+ "safe_mode": self.safe_mode,
148
+ "verbose": self.verbose,
149
+ },
150
+ )
128
151
  self._session_history = SessionHistory(self.project_path, self.session_id)
129
152
  self._permission_checker = (
130
153
  make_permission_checker(self.project_path, safe_mode) if safe_mode else None
@@ -138,6 +161,15 @@ class RichUI:
138
161
  def _set_session(self, session_id: str) -> None:
139
162
  """Switch to a different session id and reset logging."""
140
163
  self.session_id = session_id
164
+ self.log_file_path = enable_session_file_logging(self.project_path, self.session_id)
165
+ logger.info(
166
+ "[ui] Switched session",
167
+ extra={
168
+ "session_id": self.session_id,
169
+ "project_path": str(self.project_path),
170
+ "log_file": str(self.log_file_path),
171
+ },
172
+ )
141
173
  self._session_history = SessionHistory(self.project_path, session_id)
142
174
 
143
175
  def _log_message(self, message: Any) -> None:
@@ -146,7 +178,10 @@ class RichUI:
146
178
  self._session_history.append(message)
147
179
  except Exception:
148
180
  # Logging failures should never interrupt the UI flow
149
- return
181
+ logger.exception(
182
+ "[ui] Failed to append message to session history",
183
+ extra={"session_id": self.session_id},
184
+ )
150
185
 
151
186
  def _append_prompt_history(self, text: str) -> None:
152
187
  """Append text to the interactive prompt history."""
@@ -156,7 +191,10 @@ class RichUI:
156
191
  try:
157
192
  session.history.append_string(text)
158
193
  except Exception:
159
- return
194
+ logger.exception(
195
+ "[ui] Failed to append prompt history",
196
+ extra={"session_id": self.session_id},
197
+ )
160
198
 
161
199
  def replay_conversation(self, messages: List[Dict[str, Any]]) -> None:
162
200
  """Render a conversation history in the console and seed prompt history."""
@@ -200,6 +238,7 @@ class RichUI:
200
238
  tool_type: Optional[str] = None,
201
239
  tool_args: Optional[dict] = None,
202
240
  tool_data: Any = None,
241
+ tool_error: bool = False,
203
242
  ) -> None:
204
243
  """Display a message in the conversation."""
205
244
  if not is_tool:
@@ -211,7 +250,7 @@ class RichUI:
211
250
  return
212
251
 
213
252
  if tool_type == "result":
214
- self._print_tool_result(sender, content, tool_data)
253
+ self._print_tool_result(sender, content, tool_data, tool_error)
215
254
  return
216
255
 
217
256
  self._print_generic_tool(sender, content)
@@ -307,8 +346,25 @@ class RichUI:
307
346
 
308
347
  self.console.print(f"[dim cyan]{escape(tool_display)}[/]")
309
348
 
310
- def _print_tool_result(self, sender: str, content: str, tool_data: Any) -> None:
349
+ def _print_tool_result(
350
+ self, sender: str, content: str, tool_data: Any, tool_error: bool = False
351
+ ) -> None:
311
352
  """Render a tool result summary."""
353
+ failed = tool_error
354
+ if tool_data is not None:
355
+ if isinstance(tool_data, dict):
356
+ failed = failed or (tool_data.get("success") is False)
357
+ else:
358
+ success = getattr(tool_data, "success", None)
359
+ failed = failed or (success is False)
360
+
361
+ if failed:
362
+ if content:
363
+ self.console.print(f" ⎿ [red]{escape(content)}[/red]")
364
+ else:
365
+ self.console.print(f" ⎿ [red]{escape(sender)} failed[/red]")
366
+ return
367
+
312
368
  if not content:
313
369
  self.console.print(" ⎿ [dim]Tool completed[/]")
314
370
  return
@@ -576,6 +632,20 @@ class RichUI:
576
632
  self.query_context = QueryContext(
577
633
  tools=self.get_default_tools(), safe_mode=self.safe_mode, verbose=self.verbose
578
634
  )
635
+ else:
636
+ # Clear any prior abort so new queries aren't immediately interrupted.
637
+ abort_controller = getattr(self.query_context, "abort_controller", None)
638
+ if abort_controller is not None:
639
+ abort_controller.clear()
640
+
641
+ logger.info(
642
+ "[ui] Starting query processing",
643
+ extra={
644
+ "session_id": self.session_id,
645
+ "prompt_length": len(user_input),
646
+ "prompt_preview": user_input[:200],
647
+ },
648
+ )
579
649
 
580
650
  try:
581
651
  context: Dict[str, str] = {}
@@ -585,6 +655,15 @@ class RichUI:
585
655
  self.query_context.tools = merge_tools_with_dynamic(
586
656
  self.query_context.tools, dynamic_tools
587
657
  )
658
+ logger.debug(
659
+ "[ui] Prepared tools and MCP servers",
660
+ extra={
661
+ "session_id": self.session_id,
662
+ "tool_count": len(self.query_context.tools),
663
+ "mcp_servers": len(servers),
664
+ "dynamic_tools": len(dynamic_tools),
665
+ },
666
+ )
588
667
  mcp_instructions = format_mcp_instructions(servers)
589
668
  base_system_prompt = build_system_prompt(
590
669
  self.query_context.tools,
@@ -617,6 +696,16 @@ class RichUI:
617
696
  usage_status = get_context_usage_status(
618
697
  used_tokens, max_context_tokens, auto_compact_enabled
619
698
  )
699
+ logger.debug(
700
+ "[ui] Context usage snapshot",
701
+ extra={
702
+ "session_id": self.session_id,
703
+ "used_tokens": used_tokens,
704
+ "max_context_tokens": max_context_tokens,
705
+ "percent_used": round(usage_status.percent_used, 2),
706
+ "auto_compact_enabled": auto_compact_enabled,
707
+ },
708
+ )
620
709
 
621
710
  if usage_status.is_above_warning:
622
711
  console.print(
@@ -639,6 +728,16 @@ class RichUI:
639
728
  f"[yellow]Auto-compacted conversation (saved ~{compaction.tokens_saved} tokens). "
640
729
  f"Estimated usage: {compaction.tokens_after}/{max_context_tokens} tokens.[/yellow]"
641
730
  )
731
+ logger.info(
732
+ "[ui] Auto-compacted conversation",
733
+ extra={
734
+ "session_id": self.session_id,
735
+ "tokens_before": compaction.tokens_before,
736
+ "tokens_after": compaction.tokens_after,
737
+ "tokens_saved": compaction.tokens_saved,
738
+ "cleared_tool_ids": list(compaction.cleared_tool_ids),
739
+ },
740
+ )
642
741
 
643
742
  spinner = Spinner(console, "Thinking...", spinner="dots")
644
743
  # Wrap permission checker to pause the spinner while waiting for user input.
@@ -715,6 +814,7 @@ class RichUI:
715
814
  ):
716
815
  tool_name = "Tool"
717
816
  tool_data = getattr(message, "tool_use_result", None)
817
+ is_error = bool(getattr(block, "is_error", False))
718
818
 
719
819
  tool_use_id = getattr(block, "tool_use_id", None)
720
820
  entry = tool_registry.get(tool_use_id) if tool_use_id else None
@@ -738,6 +838,7 @@ class RichUI:
738
838
  is_tool=True,
739
839
  tool_type="result",
740
840
  tool_data=tool_data,
841
+ tool_error=is_error,
741
842
  )
742
843
 
743
844
  elif message.type == "progress" and isinstance(message, ProgressMessage):
@@ -758,16 +859,30 @@ class RichUI:
758
859
  self._log_message(message)
759
860
  messages.append(message) # type: ignore[arg-type]
760
861
  except Exception as e:
862
+ logger.exception(
863
+ "[ui] Unhandled error while processing streamed query response",
864
+ extra={"session_id": self.session_id},
865
+ )
761
866
  self.display_message("System", f"Error: {str(e)}", is_tool=True)
762
867
  finally:
763
868
  # Ensure spinner stops even on exceptions
764
869
  try:
765
870
  spinner.stop()
766
871
  except Exception:
767
- pass
872
+ logger.exception(
873
+ "[ui] Failed to stop spinner", extra={"session_id": self.session_id}
874
+ )
768
875
 
769
876
  # Update conversation history
770
877
  self.conversation_messages = messages
878
+ logger.info(
879
+ "[ui] Query processing completed",
880
+ extra={
881
+ "session_id": self.session_id,
882
+ "conversation_messages": len(self.conversation_messages),
883
+ "project_path": str(self.project_path),
884
+ },
885
+ )
771
886
  finally:
772
887
  await shutdown_mcp_runtime()
773
888
  await shutdown_mcp_runtime()
@@ -838,6 +953,10 @@ class RichUI:
838
953
  console.print("[dim]Tip: type '/' then press Tab to see available commands.[/dim]\n")
839
954
 
840
955
  session = self.get_prompt_session()
956
+ logger.info(
957
+ "[ui] Starting interactive loop",
958
+ extra={"session_id": self.session_id, "log_file": str(self.log_file_path)},
959
+ )
841
960
 
842
961
  while not self._should_exit:
843
962
  try:
@@ -854,6 +973,10 @@ class RichUI:
854
973
 
855
974
  # Handle slash commands locally
856
975
  if user_input.startswith("/"):
976
+ logger.debug(
977
+ "[ui] Received slash command",
978
+ extra={"session_id": self.session_id, "command": user_input},
979
+ )
857
980
  handled = self.handle_slash_command(user_input)
858
981
  if self._should_exit:
859
982
  break
@@ -862,6 +985,14 @@ class RichUI:
862
985
  continue
863
986
 
864
987
  # Process the query
988
+ logger.info(
989
+ "[ui] Processing interactive prompt",
990
+ extra={
991
+ "session_id": self.session_id,
992
+ "prompt_length": len(user_input),
993
+ "prompt_preview": user_input[:200],
994
+ },
995
+ )
865
996
  asyncio.run(self.process_query(user_input))
866
997
 
867
998
  console.print() # Add spacing between interactions
@@ -874,6 +1005,9 @@ class RichUI:
874
1005
  break
875
1006
  except Exception as e:
876
1007
  console.print(f"[red]Error: {escape(str(e))}[/]")
1008
+ logger.exception(
1009
+ "[ui] Error in interactive loop", extra={"session_id": self.session_id}
1010
+ )
877
1011
  if self.verbose:
878
1012
  import traceback
879
1013
 
@@ -903,6 +1037,9 @@ class RichUI:
903
1037
  )
904
1038
  except Exception as e:
905
1039
  console.print(f"[red]Error during compaction: {escape(str(e))}[/red]")
1040
+ logger.exception(
1041
+ "[ui] Error during manual compaction", extra={"session_id": self.session_id}
1042
+ )
906
1043
  return
907
1044
  finally:
908
1045
  spinner.stop()
@@ -1009,7 +1146,12 @@ def check_onboarding_rich() -> bool:
1009
1146
  return check_onboarding()
1010
1147
 
1011
1148
 
1012
- def main_rich(safe_mode: bool = False, verbose: bool = False) -> None:
1149
+ def main_rich(
1150
+ safe_mode: bool = False,
1151
+ verbose: bool = False,
1152
+ session_id: Optional[str] = None,
1153
+ log_file_path: Optional[Path] = None,
1154
+ ) -> None:
1013
1155
  """Main entry point for Rich interface."""
1014
1156
 
1015
1157
  # Ensure onboarding is complete
@@ -1017,7 +1159,12 @@ def main_rich(safe_mode: bool = False, verbose: bool = False) -> None:
1017
1159
  sys.exit(1)
1018
1160
 
1019
1161
  # Run the Rich UI
1020
- ui = RichUI(safe_mode=safe_mode, verbose=verbose)
1162
+ ui = RichUI(
1163
+ safe_mode=safe_mode,
1164
+ verbose=verbose,
1165
+ session_id=session_id,
1166
+ log_file_path=log_file_path,
1167
+ )
1021
1168
  ui.run()
1022
1169
 
1023
1170
 
ripperdoc/core/agents.py CHANGED
@@ -109,7 +109,7 @@ def _split_frontmatter(raw_text: str) -> Tuple[Dict[str, Any], str]:
109
109
  try:
110
110
  frontmatter = yaml.safe_load(frontmatter_text) or {}
111
111
  except Exception as exc: # pragma: no cover - defensive
112
- logger.error(f"Invalid frontmatter in agent file: {exc}")
112
+ logger.exception("Invalid frontmatter in agent file", extra={"error": str(exc)})
113
113
  return {"__error__": f"Invalid frontmatter: {exc}"}, body
114
114
  return frontmatter, body
115
115
  return {}, raw_text
@@ -136,7 +136,9 @@ def _parse_agent_file(
136
136
  try:
137
137
  text = path.read_text(encoding="utf-8")
138
138
  except Exception as exc:
139
- logger.error(f"Failed to read agent file {path}: {exc}")
139
+ logger.exception(
140
+ "Failed to read agent file", extra={"error": str(exc), "path": str(path)}
141
+ )
140
142
  return None, f"Failed to read agent file {path}: {exc}"
141
143
 
142
144
  frontmatter, body = _split_frontmatter(text)
ripperdoc/core/config.py CHANGED
@@ -7,7 +7,7 @@ including API keys, model settings, and user preferences.
7
7
  import json
8
8
  import os
9
9
  from pathlib import Path
10
- from typing import Dict, Optional
10
+ from typing import Dict, Optional, Literal
11
11
  from pydantic import BaseModel, Field
12
12
  from enum import Enum
13
13
 
@@ -105,6 +105,9 @@ class ModelProfile(BaseModel):
105
105
  temperature: float = 0.7
106
106
  # Total context window in tokens (if known). Falls back to heuristics when unset.
107
107
  context_window: Optional[int] = None
108
+ # Tool handling for OpenAI-compatible providers. "native" uses tool_calls, "text" flattens tool
109
+ # interactions into plain text to support providers that reject tool roles.
110
+ openai_tool_mode: Literal["native", "text"] = "native"
108
111
 
109
112
 
110
113
  class ModelPointers(BaseModel):
@@ -185,17 +188,36 @@ class ConfigManager:
185
188
  try:
186
189
  data = json.loads(self.global_config_path.read_text())
187
190
  self._global_config = GlobalConfig(**data)
191
+ logger.debug(
192
+ "[config] Loaded global configuration",
193
+ extra={
194
+ "path": str(self.global_config_path),
195
+ "profile_count": len(self._global_config.model_profiles),
196
+ },
197
+ )
188
198
  except Exception as e:
189
- logger.error(f"Error loading global config: {e}")
199
+ logger.exception("Error loading global config", extra={"error": str(e)})
190
200
  self._global_config = GlobalConfig()
191
201
  else:
192
202
  self._global_config = GlobalConfig()
203
+ logger.debug(
204
+ "[config] Global config not found; using defaults",
205
+ extra={"path": str(self.global_config_path)},
206
+ )
193
207
  return self._global_config
194
208
 
195
209
  def save_global_config(self, config: GlobalConfig) -> None:
196
210
  """Save global configuration."""
197
211
  self._global_config = config
198
212
  self.global_config_path.write_text(config.model_dump_json(indent=2))
213
+ logger.debug(
214
+ "[config] Saved global configuration",
215
+ extra={
216
+ "path": str(self.global_config_path),
217
+ "profile_count": len(config.model_profiles),
218
+ "pointers": config.model_pointers.model_dump(),
219
+ },
220
+ )
199
221
 
200
222
  def get_project_config(self, project_path: Optional[Path] = None) -> ProjectConfig:
201
223
  """Load and return project configuration."""
@@ -215,11 +237,26 @@ class ConfigManager:
215
237
  try:
216
238
  data = json.loads(config_path.read_text())
217
239
  self._project_config = ProjectConfig(**data)
240
+ logger.debug(
241
+ "[config] Loaded project config",
242
+ extra={
243
+ "path": str(config_path),
244
+ "project_path": str(self.current_project_path),
245
+ "allowed_tools": len(self._project_config.allowed_tools),
246
+ },
247
+ )
218
248
  except Exception as e:
219
- logger.error(f"Error loading project config: {e}")
249
+ logger.exception(
250
+ "Error loading project config",
251
+ extra={"error": str(e), "path": str(config_path)},
252
+ )
220
253
  self._project_config = ProjectConfig()
221
254
  else:
222
255
  self._project_config = ProjectConfig()
256
+ logger.debug(
257
+ "[config] Project config not found; using defaults",
258
+ extra={"path": str(config_path), "project_path": str(self.current_project_path)},
259
+ )
223
260
 
224
261
  return self._project_config
225
262
 
@@ -239,6 +276,14 @@ class ConfigManager:
239
276
  config_path = config_dir / "config.json"
240
277
  self._project_config = config
241
278
  config_path.write_text(config.model_dump_json(indent=2))
279
+ logger.debug(
280
+ "[config] Saved project config",
281
+ extra={
282
+ "path": str(config_path),
283
+ "project_path": str(self.current_project_path),
284
+ "allowed_tools": len(config.allowed_tools),
285
+ },
286
+ )
242
287
 
243
288
  def get_api_key(self, provider: ProviderType) -> Optional[str]:
244
289
  """Get API key for a provider."""
@@ -26,6 +26,9 @@ from ripperdoc.tools.mcp_tools import (
26
26
  ReadMcpResourceTool,
27
27
  load_dynamic_mcp_tools_sync,
28
28
  )
29
+ from ripperdoc.utils.log import get_logger
30
+
31
+ logger = get_logger()
29
32
 
30
33
 
31
34
  def get_default_tools() -> List[Tool[Any, Any]]:
@@ -49,15 +52,26 @@ def get_default_tools() -> List[Tool[Any, Any]]:
49
52
  ListMcpResourcesTool(),
50
53
  ReadMcpResourceTool(),
51
54
  ]
55
+ dynamic_tools: List[Tool[Any, Any]] = []
52
56
  try:
53
57
  mcp_tools = load_dynamic_mcp_tools_sync()
54
58
  # Filter to ensure only Tool instances are added
55
59
  for tool in mcp_tools:
56
60
  if isinstance(tool, Tool):
57
61
  base_tools.append(tool)
62
+ dynamic_tools.append(tool)
58
63
  except Exception:
59
64
  # If MCP runtime is not available, continue with base tools only.
60
- pass
65
+ logger.exception("[default_tools] Failed to load dynamic MCP tools")
61
66
 
62
67
  task_tool = TaskTool(lambda: base_tools)
63
- return base_tools + [task_tool]
68
+ all_tools = base_tools + [task_tool]
69
+ logger.debug(
70
+ "[default_tools] Built tool inventory",
71
+ extra={
72
+ "base_tools": len(base_tools),
73
+ "dynamic_mcp_tools": len(dynamic_tools),
74
+ "total_tools": len(all_tools),
75
+ },
76
+ )
77
+ return all_tools