ripperdoc 0.2.0__py3-none-any.whl → 0.2.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (65) hide show
  1. ripperdoc/__init__.py +1 -1
  2. ripperdoc/cli/cli.py +74 -9
  3. ripperdoc/cli/commands/__init__.py +4 -0
  4. ripperdoc/cli/commands/agents_cmd.py +30 -4
  5. ripperdoc/cli/commands/context_cmd.py +11 -1
  6. ripperdoc/cli/commands/cost_cmd.py +5 -0
  7. ripperdoc/cli/commands/doctor_cmd.py +208 -0
  8. ripperdoc/cli/commands/memory_cmd.py +202 -0
  9. ripperdoc/cli/commands/models_cmd.py +61 -6
  10. ripperdoc/cli/commands/resume_cmd.py +4 -2
  11. ripperdoc/cli/commands/status_cmd.py +1 -1
  12. ripperdoc/cli/commands/tasks_cmd.py +27 -0
  13. ripperdoc/cli/ui/rich_ui.py +258 -11
  14. ripperdoc/cli/ui/thinking_spinner.py +128 -0
  15. ripperdoc/core/agents.py +14 -4
  16. ripperdoc/core/config.py +56 -3
  17. ripperdoc/core/default_tools.py +16 -2
  18. ripperdoc/core/permissions.py +19 -0
  19. ripperdoc/core/providers/__init__.py +31 -0
  20. ripperdoc/core/providers/anthropic.py +136 -0
  21. ripperdoc/core/providers/base.py +187 -0
  22. ripperdoc/core/providers/gemini.py +172 -0
  23. ripperdoc/core/providers/openai.py +142 -0
  24. ripperdoc/core/query.py +510 -386
  25. ripperdoc/core/query_utils.py +578 -0
  26. ripperdoc/core/system_prompt.py +2 -1
  27. ripperdoc/core/tool.py +16 -1
  28. ripperdoc/sdk/client.py +12 -1
  29. ripperdoc/tools/background_shell.py +63 -21
  30. ripperdoc/tools/bash_tool.py +48 -13
  31. ripperdoc/tools/file_edit_tool.py +20 -0
  32. ripperdoc/tools/file_read_tool.py +23 -0
  33. ripperdoc/tools/file_write_tool.py +20 -0
  34. ripperdoc/tools/glob_tool.py +59 -15
  35. ripperdoc/tools/grep_tool.py +7 -0
  36. ripperdoc/tools/ls_tool.py +246 -73
  37. ripperdoc/tools/mcp_tools.py +32 -10
  38. ripperdoc/tools/multi_edit_tool.py +23 -0
  39. ripperdoc/tools/notebook_edit_tool.py +18 -3
  40. ripperdoc/tools/task_tool.py +7 -0
  41. ripperdoc/tools/todo_tool.py +157 -25
  42. ripperdoc/tools/tool_search_tool.py +17 -4
  43. ripperdoc/utils/file_watch.py +134 -0
  44. ripperdoc/utils/git_utils.py +274 -0
  45. ripperdoc/utils/json_utils.py +27 -0
  46. ripperdoc/utils/log.py +129 -29
  47. ripperdoc/utils/mcp.py +71 -6
  48. ripperdoc/utils/memory.py +12 -1
  49. ripperdoc/utils/message_compaction.py +22 -5
  50. ripperdoc/utils/messages.py +72 -17
  51. ripperdoc/utils/output_utils.py +34 -9
  52. ripperdoc/utils/permissions/path_validation_utils.py +6 -0
  53. ripperdoc/utils/prompt.py +17 -0
  54. ripperdoc/utils/safe_get_cwd.py +4 -0
  55. ripperdoc/utils/session_history.py +27 -9
  56. ripperdoc/utils/session_usage.py +7 -0
  57. ripperdoc/utils/shell_utils.py +159 -0
  58. ripperdoc/utils/todo.py +2 -2
  59. {ripperdoc-0.2.0.dist-info → ripperdoc-0.2.3.dist-info}/METADATA +4 -2
  60. ripperdoc-0.2.3.dist-info/RECORD +95 -0
  61. ripperdoc-0.2.0.dist-info/RECORD +0 -81
  62. {ripperdoc-0.2.0.dist-info → ripperdoc-0.2.3.dist-info}/WHEEL +0 -0
  63. {ripperdoc-0.2.0.dist-info → ripperdoc-0.2.3.dist-info}/entry_points.txt +0 -0
  64. {ripperdoc-0.2.0.dist-info → ripperdoc-0.2.3.dist-info}/licenses/LICENSE +0 -0
  65. {ripperdoc-0.2.0.dist-info → ripperdoc-0.2.3.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,578 @@
1
+ """Utility helpers for query handling, tool schemas, and message normalization."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import json
6
+ import re
7
+ from typing import Any, Dict, List, Mapping, Optional, Union
8
+ from uuid import uuid4
9
+
10
+ from json_repair import repair_json
11
+ from pydantic import ValidationError
12
+
13
+ from ripperdoc.core.config import ModelProfile, ProviderType, get_global_config
14
+ from ripperdoc.core.tool import Tool, build_tool_description, tool_input_examples
15
+ from ripperdoc.utils.json_utils import safe_parse_json
16
+ from ripperdoc.utils.log import get_logger
17
+ from ripperdoc.utils.messages import (
18
+ AssistantMessage,
19
+ MessageContent,
20
+ ProgressMessage,
21
+ UserMessage,
22
+ create_assistant_message,
23
+ create_user_message,
24
+ )
25
+
26
+ logger = get_logger()
27
+
28
+
29
+ def _safe_int(value: object) -> int:
30
+ """Best-effort int conversion for usage counters."""
31
+ try:
32
+ if value is None:
33
+ return 0
34
+ if isinstance(value, bool):
35
+ return int(value)
36
+ if isinstance(value, (int, float)):
37
+ return int(value)
38
+ if isinstance(value, str):
39
+ return int(value)
40
+ if hasattr(value, "__int__"):
41
+ return int(value) # type: ignore[arg-type]
42
+ return 0
43
+ except (TypeError, ValueError):
44
+ return 0
45
+
46
+
47
+ def _get_usage_field(usage: Optional[Mapping[str, Any] | object], field: str) -> int:
48
+ """Fetch a usage field from either a dict or object."""
49
+ if usage is None:
50
+ return 0
51
+ if isinstance(usage, dict):
52
+ return _safe_int(usage.get(field))
53
+ return _safe_int(getattr(usage, field, 0))
54
+
55
+
56
+ def anthropic_usage_tokens(usage: Optional[Mapping[str, Any] | object]) -> Dict[str, int]:
57
+ """Extract token counts from an Anthropic response usage payload."""
58
+ return {
59
+ "input_tokens": _get_usage_field(usage, "input_tokens"),
60
+ "output_tokens": _get_usage_field(usage, "output_tokens"),
61
+ "cache_read_input_tokens": _get_usage_field(usage, "cache_read_input_tokens"),
62
+ "cache_creation_input_tokens": _get_usage_field(usage, "cache_creation_input_tokens"),
63
+ }
64
+
65
+
66
+ def openai_usage_tokens(usage: Optional[Mapping[str, Any] | object]) -> Dict[str, int]:
67
+ """Extract token counts from an OpenAI-compatible response usage payload."""
68
+ prompt_details = None
69
+ if isinstance(usage, dict):
70
+ prompt_details = usage.get("prompt_tokens_details")
71
+ else:
72
+ prompt_details = getattr(usage, "prompt_tokens_details", None)
73
+
74
+ cache_read_tokens = _get_usage_field(prompt_details, "cached_tokens") if prompt_details else 0
75
+
76
+ return {
77
+ "input_tokens": _get_usage_field(usage, "prompt_tokens"),
78
+ "output_tokens": _get_usage_field(usage, "completion_tokens"),
79
+ "cache_read_input_tokens": cache_read_tokens,
80
+ "cache_creation_input_tokens": 0,
81
+ }
82
+
83
+
84
+ def estimate_cost_usd(model_profile: ModelProfile, usage_tokens: Dict[str, int]) -> float:
85
+ """Compute USD cost using per-1M token pricing from the model profile."""
86
+ input_price = getattr(model_profile, "input_cost_per_million_tokens", 0.0) or 0.0
87
+ output_price = getattr(model_profile, "output_cost_per_million_tokens", 0.0) or 0.0
88
+
89
+ total_input_tokens = (
90
+ _safe_int(usage_tokens.get("input_tokens"))
91
+ + _safe_int(usage_tokens.get("cache_read_input_tokens"))
92
+ + _safe_int(usage_tokens.get("cache_creation_input_tokens"))
93
+ )
94
+ output_tokens = _safe_int(usage_tokens.get("output_tokens"))
95
+
96
+ cost = (total_input_tokens * input_price + output_tokens * output_price) / 1_000_000
97
+ return float(cost)
98
+
99
+
100
+ def resolve_model_profile(model: str) -> ModelProfile:
101
+ """Resolve a model pointer to a concrete profile, falling back to a safe default."""
102
+ config = get_global_config()
103
+ profile_name = getattr(config.model_pointers, model, None) or model
104
+ model_profile = config.model_profiles.get(profile_name)
105
+ if model_profile is None:
106
+ fallback_profile = getattr(config.model_pointers, "main", "default")
107
+ model_profile = config.model_profiles.get(fallback_profile)
108
+ if not model_profile:
109
+ logger.warning(
110
+ "[config] No model profile found; using built-in default profile",
111
+ extra={"model_pointer": model},
112
+ )
113
+ return ModelProfile(provider=ProviderType.OPENAI_COMPATIBLE, model="gpt-4o-mini")
114
+ return model_profile
115
+
116
+
117
+ def determine_tool_mode(model_profile: ModelProfile) -> str:
118
+ """Return configured tool mode for provider."""
119
+ if model_profile.provider != ProviderType.OPENAI_COMPATIBLE:
120
+ return "native"
121
+ configured = getattr(model_profile, "openai_tool_mode", "native") or "native"
122
+ configured = configured.lower()
123
+ if configured not in {"native", "text"}:
124
+ configured = getattr(get_global_config(), "openai_tool_mode", "native") or "native"
125
+ configured = configured.lower()
126
+ return configured if configured in {"native", "text"} else "native"
127
+
128
+
129
+ def _parse_text_mode_json_blocks(text: str) -> Optional[List[Dict[str, Any]]]:
130
+ """Parse a JSON code block or raw JSON string into content blocks for text mode."""
131
+ if not text or not isinstance(text, str):
132
+ return None
133
+
134
+ code_blocks = re.findall(r"```(?:\s*json)?\s*([\s\S]*?)\s*```", text, flags=re.IGNORECASE)
135
+ candidates = [blk.strip() for blk in code_blocks if blk.strip()]
136
+
137
+ def _normalize_blocks(parsed: object) -> Optional[List[Dict[str, Any]]]:
138
+ raw_blocks = parsed if isinstance(parsed, list) else [parsed]
139
+ normalized: List[Dict[str, Any]] = []
140
+ for raw in raw_blocks:
141
+ if not isinstance(raw, dict):
142
+ continue
143
+ block_type = raw.get("type")
144
+ if block_type == "text":
145
+ text_value = raw.get("text") or raw.get("content")
146
+ if isinstance(text_value, str) and text_value:
147
+ normalized.append({"type": "text", "text": text_value})
148
+ elif block_type == "tool_use":
149
+ tool_name = raw.get("tool") or raw.get("name")
150
+ if not isinstance(tool_name, str) or not tool_name:
151
+ continue
152
+ tool_use_id = raw.get("tool_use_id") or raw.get("id") or str(uuid4())
153
+ input_value = raw.get("input") or {}
154
+ if not isinstance(input_value, dict):
155
+ input_value = _normalize_tool_args(input_value)
156
+ normalized.append(
157
+ {
158
+ "type": "tool_use",
159
+ "tool_use_id": str(tool_use_id),
160
+ "name": tool_name,
161
+ "input": input_value,
162
+ }
163
+ )
164
+ return normalized if normalized else None
165
+
166
+ last_error: Optional[str] = None
167
+
168
+ for candidate in candidates:
169
+ if not candidate:
170
+ continue
171
+
172
+ parsed: Any = None
173
+ try:
174
+ parsed = json.loads(candidate)
175
+ except json.JSONDecodeError as exc:
176
+ last_error = str(exc)
177
+ parsed = repair_json(candidate, return_objects=True, ensure_ascii=False)
178
+
179
+ if parsed is None or parsed == "":
180
+ continue
181
+
182
+ normalized = _normalize_blocks(parsed)
183
+ if normalized:
184
+ return normalized
185
+
186
+ last_error = "Parsed JSON did not contain valid content blocks."
187
+
188
+ if last_error:
189
+ error_text = (
190
+ f"JSON parsing failed: {last_error} "
191
+ "Please resend a valid JSON array of content blocks inside a ```json``` code block."
192
+ )
193
+ return [{"type": "text", "text": error_text}]
194
+
195
+ return None
196
+
197
+
198
+ def _tool_prompt_for_text_mode(tools: List[Tool[Any, Any]]) -> str:
199
+ """Build a system hint describing available tools and the expected JSON format."""
200
+ if not tools:
201
+ return ""
202
+
203
+ lines = [
204
+ "You are in text-only tool mode. Tools are not auto-invoked by the API.",
205
+ "Respond with one Markdown `json` code block containing a JSON array of content blocks.",
206
+ 'Each block must include `type`; use {"type": "text", "text": "<message>"} for text and '
207
+ '{"type": "tool_use", "tool_use_id": "<tool_id>", "tool": "<tool_name>", "input": { ... required params ... }} '
208
+ "for tool calls. Add multiple `tool_use` blocks if you need multiple tools.",
209
+ "Include your natural language reply as a `text` block, followed by any `tool_use` blocks.",
210
+ "Only include the JSON array inside the code block - no extra prose.",
211
+ "Available tools:",
212
+ ]
213
+
214
+ for tool in tools:
215
+ required_fields: List[str] = []
216
+ try:
217
+ for fname, finfo in getattr(tool.input_schema, "model_fields", {}).items():
218
+ is_req = False
219
+ if hasattr(finfo, "is_required"):
220
+ try:
221
+ is_req = bool(finfo.is_required())
222
+ except Exception:
223
+ is_req = False
224
+ required_fields.append(f"{fname}{' (required)' if is_req else ''}")
225
+ except Exception:
226
+ required_fields = []
227
+
228
+ required_str = ", ".join(required_fields) if required_fields else "see input schema"
229
+ lines.append(f"- {tool.name}: fields {required_str}")
230
+
231
+ schema_json = ""
232
+ try:
233
+ schema_json = json.dumps(
234
+ tool.input_schema.model_json_schema(), ensure_ascii=False, indent=2
235
+ )
236
+ except (AttributeError, TypeError, ValueError) as exc:
237
+ logger.debug(
238
+ "[tool_prompt] Failed to render input_schema",
239
+ extra={"tool": getattr(tool, "name", None), "error": str(exc)},
240
+ )
241
+ if schema_json:
242
+ lines.append(" input schema (JSON):")
243
+ lines.append(" ```json")
244
+ lines.append(f" {schema_json}")
245
+ lines.append(" ```")
246
+
247
+ example_blocks = [
248
+ {"type": "text", "text": "好的,我来帮你查看一下README.md文件"},
249
+ {
250
+ "type": "tool_use",
251
+ "tool_use_id": "tool_id_000001",
252
+ "tool": "View",
253
+ "input": {"file_path": "README.md"},
254
+ },
255
+ ]
256
+ lines.append("Example:")
257
+ lines.append("```json")
258
+ lines.append(json.dumps(example_blocks, ensure_ascii=False, indent=2))
259
+ lines.append("```")
260
+
261
+ return "\n".join(lines)
262
+
263
+
264
+ def text_mode_history(
265
+ messages: List[Union[UserMessage, AssistantMessage, ProgressMessage]],
266
+ ) -> List[Union[UserMessage, AssistantMessage]]:
267
+ """Convert a message history into text-only form for text mode."""
268
+
269
+ def _normalize_block(block: Any) -> Optional[Dict[str, Any]]:
270
+ blk = MessageContent(**block) if isinstance(block, dict) else block
271
+ btype = getattr(blk, "type", None)
272
+ if btype == "text":
273
+ text_val = getattr(blk, "text", None) or getattr(blk, "content", None) or ""
274
+ return {"type": "text", "text": text_val}
275
+ if btype == "tool_use":
276
+ return {
277
+ "type": "tool_use",
278
+ "tool_use_id": getattr(blk, "tool_use_id", None) or getattr(blk, "id", None) or "",
279
+ "tool": getattr(blk, "name", None) or "",
280
+ "input": getattr(blk, "input", None) or {},
281
+ }
282
+ if btype == "tool_result":
283
+ result_block: Dict[str, Any] = {
284
+ "type": "tool_result",
285
+ "tool_use_id": getattr(blk, "tool_use_id", None) or getattr(blk, "id", None) or "",
286
+ "text": getattr(blk, "text", None) or getattr(blk, "content", None) or "",
287
+ }
288
+ is_error = getattr(blk, "is_error", None)
289
+ if is_error is not None:
290
+ result_block["is_error"] = is_error
291
+ return result_block
292
+ text_val = getattr(blk, "text", None) or getattr(blk, "content", None)
293
+ if text_val is not None:
294
+ return {"type": "text", "text": text_val}
295
+ return None
296
+
297
+ converted: List[Union[UserMessage, AssistantMessage]] = []
298
+ for msg in messages:
299
+ msg_type = getattr(msg, "type", None)
300
+ if msg_type == "progress" or msg_type is None:
301
+ continue
302
+ content = getattr(getattr(msg, "message", None), "content", None)
303
+ text_content: Optional[str] = None
304
+ if isinstance(content, list):
305
+ normalized_blocks = []
306
+ for block in content:
307
+ block_type = getattr(block, "type", None) or (
308
+ block.get("type") if isinstance(block, dict) else None
309
+ )
310
+ block_text = (
311
+ getattr(block, "text", None)
312
+ if hasattr(block, "text")
313
+ else (block.get("text") if isinstance(block, dict) else None)
314
+ )
315
+ if block_type == "text" and isinstance(block_text, str):
316
+ parsed_nested = _parse_text_mode_json_blocks(block_text)
317
+ if parsed_nested:
318
+ normalized_blocks.extend(parsed_nested)
319
+ continue
320
+ norm = _normalize_block(block)
321
+ if norm:
322
+ normalized_blocks.append(norm)
323
+ if normalized_blocks:
324
+ json_payload = json.dumps(normalized_blocks, ensure_ascii=False, indent=2)
325
+ text_content = f"```json\n{json_payload}\n```"
326
+ elif isinstance(content, str):
327
+ parsed_blocks = _parse_text_mode_json_blocks(content)
328
+ if parsed_blocks:
329
+ text_content = (
330
+ f"```json\n{json.dumps(parsed_blocks, ensure_ascii=False, indent=2)}\n```"
331
+ )
332
+ else:
333
+ text_content = content
334
+ else:
335
+ text_content = content if isinstance(content, str) else None
336
+ if not text_content:
337
+ continue
338
+ if msg_type == "user":
339
+ converted.append(create_user_message(text_content))
340
+ elif msg_type == "assistant":
341
+ converted.append(create_assistant_message(text_content))
342
+ return converted
343
+
344
+
345
+ def _maybe_convert_json_block_to_tool_use(
346
+ content_blocks: List[Dict[str, Any]],
347
+ ) -> List[Dict[str, Any]]:
348
+ """Convert any text blocks containing JSON content to structured content blocks."""
349
+ if not content_blocks:
350
+ return content_blocks
351
+
352
+ new_blocks: List[Dict[str, Any]] = []
353
+ converted_count = 0
354
+
355
+ for block in content_blocks:
356
+ if block.get("type") != "text":
357
+ new_blocks.append(block)
358
+ continue
359
+
360
+ text = block.get("text")
361
+ if not isinstance(text, str):
362
+ new_blocks.append(block)
363
+ continue
364
+
365
+ parsed_blocks = _parse_text_mode_json_blocks(text)
366
+ if not parsed_blocks:
367
+ new_blocks.append(block)
368
+ continue
369
+
370
+ for parsed in parsed_blocks:
371
+ if parsed.get("type") == "tool_use":
372
+ new_blocks.append(
373
+ {
374
+ "type": "tool_use",
375
+ "tool_use_id": parsed.get("tool_use_id") or str(uuid4()),
376
+ "name": parsed.get("name") or parsed.get("tool"),
377
+ "input": parsed.get("input") or {},
378
+ }
379
+ )
380
+ elif parsed.get("type") == "text":
381
+ new_blocks.append({"type": "text", "text": parsed.get("text") or ""})
382
+ converted_count += 1
383
+
384
+ if converted_count:
385
+ logger.debug(
386
+ "[query_llm] Converting JSON code block to structured content blocks",
387
+ extra={"block_count": len(new_blocks)},
388
+ )
389
+ return new_blocks
390
+
391
+
392
+ def _normalize_tool_args(raw_args: Any) -> Dict[str, Any]:
393
+ """Ensure tool arguments are returned as a dict, handling double-encoded strings."""
394
+ candidate = raw_args
395
+
396
+ for _ in range(2):
397
+ if isinstance(candidate, dict):
398
+ return candidate
399
+ if isinstance(candidate, str):
400
+ candidate = safe_parse_json(candidate, log_error=False)
401
+ continue
402
+ break
403
+
404
+ if isinstance(candidate, dict):
405
+ return candidate
406
+
407
+ preview = str(raw_args)
408
+ preview = preview[:200] if len(preview) > 200 else preview
409
+ logger.debug(
410
+ "[query_llm] Tool arguments not a dict; defaulting to empty object",
411
+ extra={"preview": preview},
412
+ )
413
+ return {}
414
+
415
+
416
+ def build_full_system_prompt(
417
+ system_prompt: str, context: Dict[str, str], tool_mode: str, tools: List[Tool[Any, Any]]
418
+ ) -> str:
419
+ """Compose the final system prompt including context and tool hints."""
420
+ full_prompt = system_prompt
421
+ if context:
422
+ context_str = "\n".join(f"{k}: {v}" for k, v in context.items())
423
+ full_prompt = f"{system_prompt}\n\nContext:\n{context_str}"
424
+ if tool_mode == "text":
425
+ tool_hint = _tool_prompt_for_text_mode(tools)
426
+ if tool_hint:
427
+ full_prompt = f"{full_prompt}\n\n{tool_hint}"
428
+ return full_prompt
429
+
430
+
431
+ def log_openai_messages(normalized_messages: List[Dict[str, Any]]) -> None:
432
+ """Trace normalized messages for OpenAI calls to simplify debugging."""
433
+ summary_parts = []
434
+ for idx, message in enumerate(normalized_messages):
435
+ role = message.get("role")
436
+ tool_calls = message.get("tool_calls")
437
+ tool_call_id = message.get("tool_call_id")
438
+ ids = [tc.get("id") for tc in tool_calls] if tool_calls else []
439
+ summary_parts.append(
440
+ f"{idx}:{role}"
441
+ + (f" tool_calls={ids}" if ids else "")
442
+ + (f" tool_call_id={tool_call_id}" if tool_call_id else "")
443
+ )
444
+ logger.debug(f"[query_llm] OpenAI normalized messages: {' | '.join(summary_parts)}")
445
+
446
+
447
+ async def build_anthropic_tool_schemas(tools: List[Tool[Any, Any]]) -> List[Dict[str, Any]]:
448
+ """Render tool schemas in Anthropic format."""
449
+ schemas = []
450
+ for tool in tools:
451
+ description = await build_tool_description(tool, include_examples=True, max_examples=2)
452
+ schema: Dict[str, Any] = {
453
+ "name": tool.name,
454
+ "description": description,
455
+ "input_schema": tool.input_schema.model_json_schema(),
456
+ "defer_loading": bool(getattr(tool, "defer_loading", lambda: False)()),
457
+ }
458
+ examples = tool_input_examples(tool, limit=5)
459
+ if examples:
460
+ schema["input_examples"] = examples
461
+ schemas.append(schema)
462
+ return schemas
463
+
464
+
465
+ async def build_openai_tool_schemas(tools: List[Tool[Any, Any]]) -> List[Dict[str, Any]]:
466
+ """Render tool schemas in OpenAI function-calling format."""
467
+ openai_tools = []
468
+ for tool in tools:
469
+ description = await build_tool_description(tool, include_examples=True, max_examples=2)
470
+ openai_tools.append(
471
+ {
472
+ "type": "function",
473
+ "function": {
474
+ "name": tool.name,
475
+ "description": description,
476
+ "parameters": tool.input_schema.model_json_schema(),
477
+ },
478
+ }
479
+ )
480
+ return openai_tools
481
+
482
+
483
+ def content_blocks_from_anthropic_response(response: Any, tool_mode: str) -> List[Dict[str, Any]]:
484
+ """Normalize Anthropic response content to our internal block format."""
485
+ blocks: List[Dict[str, Any]] = []
486
+ for block in getattr(response, "content", []) or []:
487
+ btype = getattr(block, "type", None)
488
+ if btype == "text":
489
+ blocks.append({"type": "text", "text": getattr(block, "text", "")})
490
+ elif btype == "tool_use":
491
+ raw_input = getattr(block, "input", {}) or {}
492
+ blocks.append(
493
+ {
494
+ "type": "tool_use",
495
+ "tool_use_id": getattr(block, "id", None) or str(uuid4()),
496
+ "name": getattr(block, "name", None),
497
+ "input": _normalize_tool_args(raw_input),
498
+ }
499
+ )
500
+
501
+ if tool_mode == "text":
502
+ blocks = _maybe_convert_json_block_to_tool_use(blocks)
503
+ return blocks
504
+
505
+
506
+ def content_blocks_from_openai_choice(choice: Any, tool_mode: str) -> List[Dict[str, Any]]:
507
+ """Normalize OpenAI-compatible choice to our internal block format."""
508
+ content_blocks = []
509
+ if getattr(choice.message, "content", None):
510
+ content_blocks.append({"type": "text", "text": choice.message.content})
511
+
512
+ if getattr(choice.message, "tool_calls", None):
513
+ for tool_call in choice.message.tool_calls:
514
+ raw_args = getattr(tool_call.function, "arguments", None)
515
+ parsed_args = safe_parse_json(raw_args)
516
+ if parsed_args is None and raw_args:
517
+ arg_preview = str(raw_args)
518
+ arg_preview = arg_preview[:200] if len(arg_preview) > 200 else arg_preview
519
+ logger.debug(
520
+ "[query_llm] Failed to parse tool arguments; falling back to empty dict",
521
+ extra={
522
+ "tool_call_id": getattr(tool_call, "id", None),
523
+ "tool_name": getattr(tool_call.function, "name", None),
524
+ "arguments_preview": arg_preview,
525
+ },
526
+ )
527
+ parsed_args = _normalize_tool_args(parsed_args if parsed_args is not None else raw_args)
528
+ content_blocks.append(
529
+ {
530
+ "type": "tool_use",
531
+ "tool_use_id": tool_call.id,
532
+ "name": tool_call.function.name,
533
+ "input": parsed_args,
534
+ }
535
+ )
536
+ elif tool_mode == "text":
537
+ content_blocks = _maybe_convert_json_block_to_tool_use(content_blocks)
538
+ return content_blocks
539
+
540
+
541
+ def extract_tool_use_blocks(
542
+ assistant_message: AssistantMessage,
543
+ ) -> List[MessageContent]:
544
+ """Return all tool_use blocks from an assistant message."""
545
+ content = getattr(assistant_message.message, "content", None)
546
+ if not isinstance(content, list):
547
+ return []
548
+
549
+ tool_blocks: List[MessageContent] = []
550
+ for block in content:
551
+ normalized = MessageContent(**block) if isinstance(block, dict) else block
552
+ if getattr(normalized, "type", None) == "tool_use":
553
+ tool_blocks.append(normalized)
554
+ return tool_blocks
555
+
556
+
557
+ def tool_result_message(
558
+ tool_use_id: str, text: str, is_error: bool = False, tool_use_result: Any = None
559
+ ) -> UserMessage:
560
+ """Build a user message representing a tool_result block."""
561
+ block: Dict[str, Any] = {"type": "tool_result", "tool_use_id": tool_use_id, "text": text}
562
+ if is_error:
563
+ block["is_error"] = True
564
+ return create_user_message([block], tool_use_result=tool_use_result)
565
+
566
+
567
+ def format_pydantic_errors(error: ValidationError) -> str:
568
+ """Render a compact validation error summary."""
569
+ details = []
570
+ for err in error.errors():
571
+ loc: list[Any] = list(err.get("loc") or [])
572
+ loc_str = ".".join(str(part) for part in loc) if loc else ""
573
+ msg = err.get("msg") or ""
574
+ if loc_str and msg:
575
+ details.append(f"{loc_str}: {msg}")
576
+ elif msg:
577
+ details.append(msg)
578
+ return "; ".join(details) or str(error)
@@ -35,6 +35,7 @@ def _detect_git_repo(cwd: Path) -> bool:
35
35
  )
36
36
  return result.returncode == 0 and result.stdout.strip().lower() == "true"
37
37
  except Exception:
38
+ logger.exception("[system_prompt] Failed to detect git repository", extra={"cwd": str(cwd)})
38
39
  return False
39
40
 
40
41
 
@@ -381,7 +382,7 @@ def build_system_prompt(
381
382
  Provide detailed prompts so the agent can work autonomously and return a concise report."""
382
383
  ).strip()
383
384
  except Exception as exc:
384
- logger.error(f"Failed to load agent definitions: {exc}")
385
+ logger.exception("Failed to load agent definitions", extra={"error": str(exc)})
385
386
  agent_section = (
386
387
  "# Subagents\nTask tool available, but agent definitions could not be loaded."
387
388
  )
ripperdoc/core/tool.py CHANGED
@@ -8,6 +8,11 @@ import json
8
8
  from abc import ABC, abstractmethod
9
9
  from typing import Any, AsyncGenerator, Dict, List, Optional, TypeVar, Generic, Union
10
10
  from pydantic import BaseModel, ConfigDict, Field
11
+ from ripperdoc.utils.file_watch import FileSnapshot
12
+ from ripperdoc.utils.log import get_logger
13
+
14
+
15
+ logger = get_logger()
11
16
 
12
17
 
13
18
  class ToolResult(BaseModel):
@@ -35,8 +40,10 @@ class ToolUseContext(BaseModel):
35
40
  safe_mode: bool = False
36
41
  verbose: bool = False
37
42
  permission_checker: Optional[Any] = None
38
- read_file_timestamps: Dict[str, float] = {}
43
+ read_file_timestamps: Dict[str, float] = Field(default_factory=dict)
44
+ file_state_cache: Dict[str, "FileSnapshot"] = Field(default_factory=dict)
39
45
  tool_registry: Optional[Any] = None
46
+ abort_signal: Optional[Any] = None
40
47
  model_config = ConfigDict(arbitrary_types_allowed=True)
41
48
 
42
49
 
@@ -195,6 +202,10 @@ async def build_tool_description(
195
202
  if parts:
196
203
  return f"{description_text}\n\nInput examples:\n" + "\n\n".join(parts)
197
204
  except Exception:
205
+ logger.exception(
206
+ "[tool] Failed to build input example section",
207
+ extra={"tool": getattr(tool, "name", None)},
208
+ )
198
209
  return description_text
199
210
 
200
211
  return description_text
@@ -210,5 +221,9 @@ def tool_input_examples(tool: Tool[Any, Any], limit: int = 5) -> List[Dict[str,
210
221
  try:
211
222
  results.append(example.example)
212
223
  except Exception:
224
+ logger.exception(
225
+ "[tool] Failed to format tool input example",
226
+ extra={"tool": getattr(tool, "name", None)},
227
+ )
213
228
  continue
214
229
  return results
ripperdoc/sdk/client.py CHANGED
@@ -19,11 +19,13 @@ from typing import (
19
19
  List,
20
20
  Optional,
21
21
  Sequence,
22
+ Tuple,
22
23
  Union,
23
24
  )
24
25
 
25
26
  from ripperdoc.core.default_tools import get_default_tools
26
27
  from ripperdoc.core.query import QueryContext, query as _core_query
28
+ from ripperdoc.core.permissions import PermissionResult
27
29
  from ripperdoc.core.system_prompt import build_system_prompt
28
30
  from ripperdoc.core.tool import Tool
29
31
  from ripperdoc.tools.task_tool import TaskTool
@@ -42,7 +44,16 @@ from ripperdoc.utils.mcp import (
42
44
  )
43
45
 
44
46
  MessageType = Union[UserMessage, AssistantMessage, ProgressMessage]
45
- PermissionChecker = Callable[[Tool[Any, Any], Any], Union[Awaitable[Any], Any]]
47
+ PermissionChecker = Callable[
48
+ [Tool[Any, Any], Any],
49
+ Union[
50
+ PermissionResult,
51
+ Dict[str, Any],
52
+ Tuple[bool, Optional[str]],
53
+ bool,
54
+ Awaitable[Union[PermissionResult, Dict[str, Any], Tuple[bool, Optional[str]], bool]],
55
+ ],
56
+ ]
46
57
  QueryRunner = Callable[
47
58
  [
48
59
  List[MessageType],