ripperdoc 0.2.0__py3-none-any.whl → 0.2.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (65) hide show
  1. ripperdoc/__init__.py +1 -1
  2. ripperdoc/cli/cli.py +74 -9
  3. ripperdoc/cli/commands/__init__.py +4 -0
  4. ripperdoc/cli/commands/agents_cmd.py +30 -4
  5. ripperdoc/cli/commands/context_cmd.py +11 -1
  6. ripperdoc/cli/commands/cost_cmd.py +5 -0
  7. ripperdoc/cli/commands/doctor_cmd.py +208 -0
  8. ripperdoc/cli/commands/memory_cmd.py +202 -0
  9. ripperdoc/cli/commands/models_cmd.py +61 -6
  10. ripperdoc/cli/commands/resume_cmd.py +4 -2
  11. ripperdoc/cli/commands/status_cmd.py +1 -1
  12. ripperdoc/cli/commands/tasks_cmd.py +27 -0
  13. ripperdoc/cli/ui/rich_ui.py +258 -11
  14. ripperdoc/cli/ui/thinking_spinner.py +128 -0
  15. ripperdoc/core/agents.py +14 -4
  16. ripperdoc/core/config.py +56 -3
  17. ripperdoc/core/default_tools.py +16 -2
  18. ripperdoc/core/permissions.py +19 -0
  19. ripperdoc/core/providers/__init__.py +31 -0
  20. ripperdoc/core/providers/anthropic.py +136 -0
  21. ripperdoc/core/providers/base.py +187 -0
  22. ripperdoc/core/providers/gemini.py +172 -0
  23. ripperdoc/core/providers/openai.py +142 -0
  24. ripperdoc/core/query.py +510 -386
  25. ripperdoc/core/query_utils.py +578 -0
  26. ripperdoc/core/system_prompt.py +2 -1
  27. ripperdoc/core/tool.py +16 -1
  28. ripperdoc/sdk/client.py +12 -1
  29. ripperdoc/tools/background_shell.py +63 -21
  30. ripperdoc/tools/bash_tool.py +48 -13
  31. ripperdoc/tools/file_edit_tool.py +20 -0
  32. ripperdoc/tools/file_read_tool.py +23 -0
  33. ripperdoc/tools/file_write_tool.py +20 -0
  34. ripperdoc/tools/glob_tool.py +59 -15
  35. ripperdoc/tools/grep_tool.py +7 -0
  36. ripperdoc/tools/ls_tool.py +246 -73
  37. ripperdoc/tools/mcp_tools.py +32 -10
  38. ripperdoc/tools/multi_edit_tool.py +23 -0
  39. ripperdoc/tools/notebook_edit_tool.py +18 -3
  40. ripperdoc/tools/task_tool.py +7 -0
  41. ripperdoc/tools/todo_tool.py +157 -25
  42. ripperdoc/tools/tool_search_tool.py +17 -4
  43. ripperdoc/utils/file_watch.py +134 -0
  44. ripperdoc/utils/git_utils.py +274 -0
  45. ripperdoc/utils/json_utils.py +27 -0
  46. ripperdoc/utils/log.py +129 -29
  47. ripperdoc/utils/mcp.py +71 -6
  48. ripperdoc/utils/memory.py +12 -1
  49. ripperdoc/utils/message_compaction.py +22 -5
  50. ripperdoc/utils/messages.py +72 -17
  51. ripperdoc/utils/output_utils.py +34 -9
  52. ripperdoc/utils/permissions/path_validation_utils.py +6 -0
  53. ripperdoc/utils/prompt.py +17 -0
  54. ripperdoc/utils/safe_get_cwd.py +4 -0
  55. ripperdoc/utils/session_history.py +27 -9
  56. ripperdoc/utils/session_usage.py +7 -0
  57. ripperdoc/utils/shell_utils.py +159 -0
  58. ripperdoc/utils/todo.py +2 -2
  59. {ripperdoc-0.2.0.dist-info → ripperdoc-0.2.3.dist-info}/METADATA +4 -2
  60. ripperdoc-0.2.3.dist-info/RECORD +95 -0
  61. ripperdoc-0.2.0.dist-info/RECORD +0 -81
  62. {ripperdoc-0.2.0.dist-info → ripperdoc-0.2.3.dist-info}/WHEEL +0 -0
  63. {ripperdoc-0.2.0.dist-info → ripperdoc-0.2.3.dist-info}/entry_points.txt +0 -0
  64. {ripperdoc-0.2.0.dist-info → ripperdoc-0.2.3.dist-info}/licenses/LICENSE +0 -0
  65. {ripperdoc-0.2.0.dist-info → ripperdoc-0.2.3.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,172 @@
1
+ """Gemini provider client."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import os
6
+ import time
7
+ from typing import Any, Dict, List, Optional
8
+
9
+ from ripperdoc.core.config import ModelProfile
10
+ from ripperdoc.core.providers.base import (
11
+ ProgressCallback,
12
+ ProviderClient,
13
+ ProviderResponse,
14
+ call_with_timeout_and_retries,
15
+ )
16
+ from ripperdoc.core.tool import Tool
17
+ from ripperdoc.utils.log import get_logger
18
+
19
+ logger = get_logger()
20
+
21
+
22
+ def _extract_usage_metadata(payload: Any) -> Dict[str, int]:
23
+ """Best-effort token extraction from Gemini responses."""
24
+ usage = getattr(payload, "usage_metadata", None) or getattr(payload, "usageMetadata", None)
25
+ if not usage:
26
+ usage = getattr(payload, "usage", None)
27
+ get = lambda key: int(getattr(usage, key, 0) or 0) if usage else 0 # noqa: E731
28
+ return {
29
+ "input_tokens": get("prompt_token_count") + get("cached_content_token_count"),
30
+ "output_tokens": get("candidates_token_count"),
31
+ "cache_read_input_tokens": get("cached_content_token_count"),
32
+ "cache_creation_input_tokens": 0,
33
+ }
34
+
35
+
36
+ def _collect_text_parts(candidate: Any) -> str:
37
+ parts = getattr(candidate, "content", None)
38
+ if not parts:
39
+ return ""
40
+ if isinstance(parts, list):
41
+ texts = []
42
+ for part in parts:
43
+ text_val = getattr(part, "text", None) or getattr(part, "content", None)
44
+ if isinstance(text_val, str):
45
+ texts.append(text_val)
46
+ return "".join(texts)
47
+ return str(parts)
48
+
49
+
50
+ class GeminiClient(ProviderClient):
51
+ """Gemini client with streaming and basic text support."""
52
+
53
+ async def call(
54
+ self,
55
+ *,
56
+ model_profile: ModelProfile,
57
+ system_prompt: str,
58
+ normalized_messages: List[Dict[str, Any]],
59
+ tools: List[Tool[Any, Any]],
60
+ tool_mode: str,
61
+ stream: bool,
62
+ progress_callback: Optional[ProgressCallback],
63
+ request_timeout: Optional[float],
64
+ max_retries: int,
65
+ ) -> ProviderResponse:
66
+ try:
67
+ import google.generativeai as genai # type: ignore
68
+ except Exception as exc: # pragma: no cover - import guard
69
+ msg = (
70
+ "Gemini client requires the 'google-generativeai' package. "
71
+ "Install it to enable Gemini support."
72
+ )
73
+ logger.warning(msg, extra={"error": str(exc)})
74
+ return ProviderResponse(
75
+ content_blocks=[{"type": "text", "text": msg}],
76
+ usage_tokens={},
77
+ cost_usd=0.0,
78
+ duration_ms=0.0,
79
+ )
80
+
81
+ if tools and tool_mode != "text":
82
+ msg = (
83
+ "Gemini client currently supports text-only responses; "
84
+ "tool/function calling is not yet implemented."
85
+ )
86
+ return ProviderResponse(
87
+ content_blocks=[{"type": "text", "text": msg}],
88
+ usage_tokens={},
89
+ cost_usd=0.0,
90
+ duration_ms=0.0,
91
+ )
92
+
93
+ api_key = (
94
+ model_profile.api_key or os.getenv("GEMINI_API_KEY") or os.getenv("GOOGLE_API_KEY")
95
+ )
96
+ genai.configure(api_key=api_key, client_options={"api_endpoint": model_profile.api_base})
97
+
98
+ # Flatten normalized messages into a single text prompt (Gemini supports multi-turn, but keep it simple).
99
+ prompt_parts: List[str] = [system_prompt]
100
+ for msg in normalized_messages: # type: ignore[assignment]
101
+ role: str = (
102
+ str(msg.get("role", "")) if isinstance(msg, dict) else str(getattr(msg, "role", "")) # type: ignore[assignment]
103
+ )
104
+ content = msg.get("content") if isinstance(msg, dict) else getattr(msg, "content", "")
105
+ if isinstance(content, list):
106
+ for item in content:
107
+ text_val = (
108
+ getattr(item, "text", None)
109
+ or item.get("text", "") # type: ignore[union-attr]
110
+ if isinstance(item, dict)
111
+ else ""
112
+ )
113
+ if text_val:
114
+ prompt_parts.append(f"{role}: {text_val}")
115
+ elif isinstance(content, str):
116
+ prompt_parts.append(f"{role}: {content}")
117
+ full_prompt = "\n".join(part for part in prompt_parts if part)
118
+
119
+ model = genai.GenerativeModel(model_profile.model)
120
+ collected_text: List[str] = []
121
+ start_time = time.time()
122
+
123
+ async def _stream_request() -> Dict[str, Dict[str, int]]:
124
+ stream_resp = model.generate_content(full_prompt, stream=True)
125
+ usage_tokens: Dict[str, int] = {}
126
+ for chunk in stream_resp:
127
+ text_delta = _collect_text_parts(chunk)
128
+ if text_delta:
129
+ collected_text.append(text_delta)
130
+ if progress_callback:
131
+ try:
132
+ await progress_callback(text_delta)
133
+ except Exception:
134
+ logger.exception("[gemini_client] Stream callback failed")
135
+ usage_tokens = _extract_usage_metadata(chunk) or usage_tokens
136
+ return {"usage": usage_tokens}
137
+
138
+ async def _non_stream_request() -> Any:
139
+ return model.generate_content(full_prompt)
140
+
141
+ response: Any = await call_with_timeout_and_retries(
142
+ _stream_request if stream and progress_callback else _non_stream_request,
143
+ request_timeout,
144
+ max_retries,
145
+ )
146
+
147
+ duration_ms = (time.time() - start_time) * 1000
148
+ usage_tokens = _extract_usage_metadata(response)
149
+ cost_usd = 0.0 # Pricing unknown; leave as 0
150
+
151
+ content_blocks = (
152
+ [{"type": "text", "text": "".join(collected_text)}]
153
+ if collected_text
154
+ else [{"type": "text", "text": _collect_text_parts(response)}]
155
+ )
156
+
157
+ logger.info(
158
+ "[gemini_client] Response received",
159
+ extra={
160
+ "model": model_profile.model,
161
+ "duration_ms": round(duration_ms, 2),
162
+ "tool_mode": tool_mode,
163
+ "stream": stream,
164
+ },
165
+ )
166
+
167
+ return ProviderResponse(
168
+ content_blocks=content_blocks,
169
+ usage_tokens=usage_tokens,
170
+ cost_usd=cost_usd,
171
+ duration_ms=duration_ms,
172
+ )
@@ -0,0 +1,142 @@
1
+ """OpenAI-compatible provider client."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import time
6
+ from typing import Any, Dict, List, Optional, cast
7
+
8
+ from openai import AsyncOpenAI
9
+
10
+ from ripperdoc.core.config import ModelProfile
11
+ from ripperdoc.core.providers.base import (
12
+ ProgressCallback,
13
+ ProviderClient,
14
+ ProviderResponse,
15
+ call_with_timeout_and_retries,
16
+ sanitize_tool_history,
17
+ )
18
+ from ripperdoc.core.query_utils import (
19
+ build_openai_tool_schemas,
20
+ content_blocks_from_openai_choice,
21
+ estimate_cost_usd,
22
+ openai_usage_tokens,
23
+ )
24
+ from ripperdoc.core.tool import Tool
25
+ from ripperdoc.utils.log import get_logger
26
+ from ripperdoc.utils.session_usage import record_usage
27
+
28
+ logger = get_logger()
29
+
30
+
31
+ class OpenAIClient(ProviderClient):
32
+ """OpenAI-compatible client with streaming and non-streaming support."""
33
+
34
+ async def call(
35
+ self,
36
+ *,
37
+ model_profile: ModelProfile,
38
+ system_prompt: str,
39
+ normalized_messages: List[Dict[str, Any]],
40
+ tools: List[Tool[Any, Any]],
41
+ tool_mode: str,
42
+ stream: bool,
43
+ progress_callback: Optional[ProgressCallback],
44
+ request_timeout: Optional[float],
45
+ max_retries: int,
46
+ ) -> ProviderResponse:
47
+ start_time = time.time()
48
+ openai_tools = await build_openai_tool_schemas(tools)
49
+ openai_messages: List[Dict[str, object]] = [
50
+ {"role": "system", "content": system_prompt}
51
+ ] + sanitize_tool_history(list(normalized_messages))
52
+ collected_text: List[str] = []
53
+
54
+ can_stream = stream and tool_mode == "text" and not openai_tools
55
+
56
+ async with AsyncOpenAI(
57
+ api_key=model_profile.api_key, base_url=model_profile.api_base
58
+ ) as client:
59
+
60
+ async def _stream_request() -> Dict[str, Dict[str, int]]:
61
+ stream_resp = await client.chat.completions.create( # type: ignore[call-overload]
62
+ model=model_profile.model,
63
+ messages=cast(Any, openai_messages),
64
+ tools=None,
65
+ temperature=model_profile.temperature,
66
+ max_tokens=model_profile.max_tokens,
67
+ stream=True,
68
+ )
69
+ usage_tokens: Dict[str, int] = {}
70
+ async for chunk in stream_resp:
71
+ delta = getattr(chunk.choices[0], "delta", None)
72
+ delta_content = getattr(delta, "content", None) if delta else None
73
+ text_delta = ""
74
+ if delta_content:
75
+ if isinstance(delta_content, list):
76
+ for part in delta_content:
77
+ text_val = getattr(part, "text", None) or getattr(
78
+ part, "content", None
79
+ )
80
+ if isinstance(text_val, str):
81
+ text_delta += text_val
82
+ elif isinstance(delta_content, str):
83
+ text_delta += delta_content
84
+ if text_delta:
85
+ collected_text.append(text_delta)
86
+ if progress_callback:
87
+ try:
88
+ await progress_callback(text_delta)
89
+ except Exception:
90
+ logger.exception("[openai_client] Stream callback failed")
91
+ if getattr(chunk, "usage", None):
92
+ usage_tokens = openai_usage_tokens(chunk.usage)
93
+ return {"usage": usage_tokens}
94
+
95
+ async def _non_stream_request() -> Any:
96
+ return await client.chat.completions.create( # type: ignore[call-overload]
97
+ model=model_profile.model,
98
+ messages=cast(Any, openai_messages),
99
+ tools=openai_tools if openai_tools else None, # type: ignore[arg-type]
100
+ temperature=model_profile.temperature,
101
+ max_tokens=model_profile.max_tokens,
102
+ )
103
+
104
+ openai_response: Any = await call_with_timeout_and_retries(
105
+ _stream_request if can_stream else _non_stream_request,
106
+ request_timeout,
107
+ max_retries,
108
+ )
109
+
110
+ duration_ms = (time.time() - start_time) * 1000
111
+ usage_tokens = openai_usage_tokens(getattr(openai_response, "usage", None))
112
+ cost_usd = estimate_cost_usd(model_profile, usage_tokens)
113
+ record_usage(
114
+ model_profile.model, duration_ms=duration_ms, cost_usd=cost_usd, **usage_tokens
115
+ )
116
+
117
+ finish_reason: Optional[str]
118
+ if can_stream:
119
+ content_blocks = [{"type": "text", "text": "".join(collected_text)}]
120
+ finish_reason = "stream"
121
+ else:
122
+ choice = openai_response.choices[0]
123
+ content_blocks = content_blocks_from_openai_choice(choice, tool_mode)
124
+ finish_reason = cast(Optional[str], getattr(choice, "finish_reason", None))
125
+
126
+ logger.info(
127
+ "[openai_client] Response received",
128
+ extra={
129
+ "model": model_profile.model,
130
+ "duration_ms": round(duration_ms, 2),
131
+ "tool_mode": tool_mode,
132
+ "tool_count": len(openai_tools),
133
+ "finish_reason": finish_reason,
134
+ },
135
+ )
136
+
137
+ return ProviderResponse(
138
+ content_blocks=content_blocks,
139
+ usage_tokens=usage_tokens,
140
+ cost_usd=cost_usd,
141
+ duration_ms=duration_ms,
142
+ )