ripperdoc 0.2.2__py3-none-any.whl → 0.2.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (61) hide show
  1. ripperdoc/__init__.py +1 -1
  2. ripperdoc/cli/cli.py +9 -2
  3. ripperdoc/cli/commands/agents_cmd.py +8 -4
  4. ripperdoc/cli/commands/context_cmd.py +3 -3
  5. ripperdoc/cli/commands/cost_cmd.py +5 -0
  6. ripperdoc/cli/commands/doctor_cmd.py +12 -4
  7. ripperdoc/cli/commands/memory_cmd.py +6 -13
  8. ripperdoc/cli/commands/models_cmd.py +36 -6
  9. ripperdoc/cli/commands/resume_cmd.py +4 -2
  10. ripperdoc/cli/commands/status_cmd.py +1 -1
  11. ripperdoc/cli/ui/rich_ui.py +135 -2
  12. ripperdoc/cli/ui/thinking_spinner.py +128 -0
  13. ripperdoc/core/agents.py +174 -6
  14. ripperdoc/core/config.py +9 -1
  15. ripperdoc/core/default_tools.py +6 -0
  16. ripperdoc/core/providers/__init__.py +47 -0
  17. ripperdoc/core/providers/anthropic.py +147 -0
  18. ripperdoc/core/providers/base.py +236 -0
  19. ripperdoc/core/providers/gemini.py +496 -0
  20. ripperdoc/core/providers/openai.py +253 -0
  21. ripperdoc/core/query.py +337 -141
  22. ripperdoc/core/query_utils.py +65 -24
  23. ripperdoc/core/system_prompt.py +67 -61
  24. ripperdoc/core/tool.py +12 -3
  25. ripperdoc/sdk/client.py +12 -1
  26. ripperdoc/tools/ask_user_question_tool.py +433 -0
  27. ripperdoc/tools/background_shell.py +104 -18
  28. ripperdoc/tools/bash_tool.py +33 -13
  29. ripperdoc/tools/enter_plan_mode_tool.py +223 -0
  30. ripperdoc/tools/exit_plan_mode_tool.py +150 -0
  31. ripperdoc/tools/file_edit_tool.py +13 -0
  32. ripperdoc/tools/file_read_tool.py +16 -0
  33. ripperdoc/tools/file_write_tool.py +13 -0
  34. ripperdoc/tools/glob_tool.py +5 -1
  35. ripperdoc/tools/ls_tool.py +14 -10
  36. ripperdoc/tools/mcp_tools.py +113 -4
  37. ripperdoc/tools/multi_edit_tool.py +12 -0
  38. ripperdoc/tools/notebook_edit_tool.py +12 -0
  39. ripperdoc/tools/task_tool.py +88 -5
  40. ripperdoc/tools/todo_tool.py +1 -3
  41. ripperdoc/tools/tool_search_tool.py +8 -4
  42. ripperdoc/utils/file_watch.py +134 -0
  43. ripperdoc/utils/git_utils.py +36 -38
  44. ripperdoc/utils/json_utils.py +1 -2
  45. ripperdoc/utils/log.py +3 -4
  46. ripperdoc/utils/mcp.py +49 -10
  47. ripperdoc/utils/memory.py +1 -3
  48. ripperdoc/utils/message_compaction.py +5 -11
  49. ripperdoc/utils/messages.py +9 -13
  50. ripperdoc/utils/output_utils.py +1 -3
  51. ripperdoc/utils/prompt.py +17 -0
  52. ripperdoc/utils/session_usage.py +7 -0
  53. ripperdoc/utils/shell_utils.py +159 -0
  54. ripperdoc/utils/token_estimation.py +33 -0
  55. {ripperdoc-0.2.2.dist-info → ripperdoc-0.2.4.dist-info}/METADATA +3 -1
  56. ripperdoc-0.2.4.dist-info/RECORD +99 -0
  57. ripperdoc-0.2.2.dist-info/RECORD +0 -86
  58. {ripperdoc-0.2.2.dist-info → ripperdoc-0.2.4.dist-info}/WHEEL +0 -0
  59. {ripperdoc-0.2.2.dist-info → ripperdoc-0.2.4.dist-info}/entry_points.txt +0 -0
  60. {ripperdoc-0.2.2.dist-info → ripperdoc-0.2.4.dist-info}/licenses/LICENSE +0 -0
  61. {ripperdoc-0.2.2.dist-info → ripperdoc-0.2.4.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,253 @@
1
+ """OpenAI-compatible provider client."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import asyncio
6
+ import time
7
+ from typing import Any, Dict, List, Optional, cast
8
+ from uuid import uuid4
9
+
10
+ from openai import AsyncOpenAI
11
+
12
+ from ripperdoc.core.config import ModelProfile
13
+ from ripperdoc.core.providers.base import (
14
+ ProgressCallback,
15
+ ProviderClient,
16
+ ProviderResponse,
17
+ call_with_timeout_and_retries,
18
+ iter_with_timeout,
19
+ sanitize_tool_history,
20
+ )
21
+ from ripperdoc.core.query_utils import (
22
+ build_openai_tool_schemas,
23
+ content_blocks_from_openai_choice,
24
+ estimate_cost_usd,
25
+ _normalize_tool_args,
26
+ openai_usage_tokens,
27
+ )
28
+ from ripperdoc.core.tool import Tool
29
+ from ripperdoc.utils.log import get_logger
30
+ from ripperdoc.utils.session_usage import record_usage
31
+
32
+ logger = get_logger()
33
+
34
+
35
+ class OpenAIClient(ProviderClient):
36
+ """OpenAI-compatible client with streaming and non-streaming support."""
37
+
38
+ async def call(
39
+ self,
40
+ *,
41
+ model_profile: ModelProfile,
42
+ system_prompt: str,
43
+ normalized_messages: List[Dict[str, Any]],
44
+ tools: List[Tool[Any, Any]],
45
+ tool_mode: str,
46
+ stream: bool,
47
+ progress_callback: Optional[ProgressCallback],
48
+ request_timeout: Optional[float],
49
+ max_retries: int,
50
+ ) -> ProviderResponse:
51
+ start_time = time.time()
52
+ openai_tools = await build_openai_tool_schemas(tools)
53
+ openai_messages: List[Dict[str, object]] = [
54
+ {"role": "system", "content": system_prompt}
55
+ ] + sanitize_tool_history(list(normalized_messages))
56
+ collected_text: List[str] = []
57
+ streamed_tool_calls: Dict[int, Dict[str, Optional[str]]] = {}
58
+ streamed_tool_text: List[str] = []
59
+ streamed_usage: Dict[str, int] = {}
60
+
61
+ can_stream_text = stream and tool_mode == "text" and not openai_tools
62
+ can_stream_tools = stream and tool_mode != "text" and bool(openai_tools)
63
+ can_stream = can_stream_text or can_stream_tools
64
+
65
+ async with AsyncOpenAI(
66
+ api_key=model_profile.api_key, base_url=model_profile.api_base
67
+ ) as client:
68
+
69
+ async def _stream_request() -> Dict[str, Dict[str, int]]:
70
+ announced_tool_indexes: set[int] = set()
71
+ stream_coro = client.chat.completions.create( # type: ignore[call-overload]
72
+ model=model_profile.model,
73
+ messages=cast(Any, openai_messages),
74
+ tools=openai_tools if can_stream_tools else None,
75
+ temperature=model_profile.temperature,
76
+ max_tokens=model_profile.max_tokens,
77
+ stream=True,
78
+ stream_options={"include_usage": True},
79
+ )
80
+ stream_resp = (
81
+ await asyncio.wait_for(stream_coro, timeout=request_timeout)
82
+ if request_timeout and request_timeout > 0
83
+ else await stream_coro
84
+ )
85
+ async for chunk in iter_with_timeout(stream_resp, request_timeout):
86
+ if getattr(chunk, "usage", None):
87
+ streamed_usage.update(openai_usage_tokens(chunk.usage))
88
+
89
+ if not getattr(chunk, "choices", None):
90
+ continue
91
+ delta = getattr(chunk.choices[0], "delta", None)
92
+ if not delta:
93
+ continue
94
+
95
+ # Text deltas (rare in native tool mode but supported)
96
+ delta_content = getattr(delta, "content", None)
97
+ text_delta = ""
98
+ if delta_content:
99
+ if isinstance(delta_content, list):
100
+ for part in delta_content:
101
+ text_val = getattr(part, "text", None) or getattr(
102
+ part, "content", None
103
+ )
104
+ if isinstance(text_val, str):
105
+ text_delta += text_val
106
+ elif isinstance(delta_content, str):
107
+ text_delta += delta_content
108
+ if text_delta:
109
+ target_collector = streamed_tool_text if can_stream_tools else collected_text
110
+ target_collector.append(text_delta)
111
+ if progress_callback:
112
+ try:
113
+ await progress_callback(text_delta)
114
+ except Exception:
115
+ logger.exception("[openai_client] Stream callback failed")
116
+
117
+ # Tool call deltas for native tool mode
118
+ if not can_stream_tools:
119
+ continue
120
+
121
+ for tool_delta in getattr(delta, "tool_calls", []) or []:
122
+ idx = getattr(tool_delta, "index", 0) or 0
123
+ state = streamed_tool_calls.get(idx, {"id": None, "name": None, "arguments": ""})
124
+
125
+ if getattr(tool_delta, "id", None):
126
+ state["id"] = tool_delta.id
127
+
128
+ function_delta = getattr(tool_delta, "function", None)
129
+ if function_delta:
130
+ fn_name = getattr(function_delta, "name", None)
131
+ if fn_name:
132
+ state["name"] = fn_name
133
+ args_delta = getattr(function_delta, "arguments", None)
134
+ if args_delta:
135
+ state["arguments"] = (state.get("arguments") or "") + args_delta
136
+ if progress_callback:
137
+ try:
138
+ await progress_callback(args_delta)
139
+ except Exception:
140
+ logger.exception("[openai_client] Stream callback failed")
141
+
142
+ if idx not in announced_tool_indexes and state.get("name"):
143
+ announced_tool_indexes.add(idx)
144
+ if progress_callback:
145
+ try:
146
+ await progress_callback(f"[tool:{state['name']}]")
147
+ except Exception:
148
+ logger.exception("[openai_client] Stream callback failed")
149
+
150
+ streamed_tool_calls[idx] = state
151
+
152
+ return {"usage": streamed_usage}
153
+
154
+ async def _non_stream_request() -> Any:
155
+ return await client.chat.completions.create( # type: ignore[call-overload]
156
+ model=model_profile.model,
157
+ messages=cast(Any, openai_messages),
158
+ tools=openai_tools if openai_tools else None, # type: ignore[arg-type]
159
+ temperature=model_profile.temperature,
160
+ max_tokens=model_profile.max_tokens,
161
+ )
162
+
163
+ timeout_for_call = None if can_stream else request_timeout
164
+ openai_response: Any = await call_with_timeout_and_retries(
165
+ _stream_request if can_stream else _non_stream_request,
166
+ timeout_for_call,
167
+ max_retries,
168
+ )
169
+
170
+ if (
171
+ can_stream_text
172
+ and not collected_text
173
+ and not streamed_tool_calls
174
+ and not streamed_tool_text
175
+ ):
176
+ logger.debug(
177
+ "[openai_client] Streaming returned no content; retrying without stream",
178
+ extra={"model": model_profile.model},
179
+ )
180
+ can_stream = False
181
+ can_stream_text = False
182
+ can_stream_tools = False
183
+ openai_response = await call_with_timeout_and_retries(
184
+ _non_stream_request, request_timeout, max_retries
185
+ )
186
+
187
+ duration_ms = (time.time() - start_time) * 1000
188
+ usage_tokens = streamed_usage if can_stream else openai_usage_tokens(
189
+ getattr(openai_response, "usage", None)
190
+ )
191
+ cost_usd = estimate_cost_usd(model_profile, usage_tokens)
192
+ record_usage(
193
+ model_profile.model, duration_ms=duration_ms, cost_usd=cost_usd, **usage_tokens
194
+ )
195
+
196
+ if not can_stream and (not openai_response or not getattr(openai_response, "choices", None)):
197
+ logger.warning(
198
+ "[openai_client] No choices returned from OpenAI response",
199
+ extra={"model": model_profile.model},
200
+ )
201
+ empty_text = "Model returned no content."
202
+ return ProviderResponse(
203
+ content_blocks=[{"type": "text", "text": empty_text}],
204
+ usage_tokens=usage_tokens,
205
+ cost_usd=cost_usd,
206
+ duration_ms=duration_ms,
207
+ )
208
+
209
+ content_blocks: List[Dict[str, Any]] = []
210
+ finish_reason: Optional[str] = None
211
+ if can_stream_text:
212
+ content_blocks = [{"type": "text", "text": "".join(collected_text)}]
213
+ finish_reason = "stream"
214
+ elif can_stream_tools:
215
+ if streamed_tool_text:
216
+ content_blocks.append({"type": "text", "text": "".join(streamed_tool_text)})
217
+ for idx in sorted(streamed_tool_calls.keys()):
218
+ call = streamed_tool_calls[idx]
219
+ name = call.get("name")
220
+ if not name:
221
+ continue
222
+ tool_use_id = call.get("id") or str(uuid4())
223
+ content_blocks.append(
224
+ {
225
+ "type": "tool_use",
226
+ "tool_use_id": tool_use_id,
227
+ "name": name,
228
+ "input": _normalize_tool_args(call.get("arguments")),
229
+ }
230
+ )
231
+ finish_reason = "stream"
232
+ else:
233
+ choice = openai_response.choices[0]
234
+ content_blocks = content_blocks_from_openai_choice(choice, tool_mode)
235
+ finish_reason = cast(Optional[str], getattr(choice, "finish_reason", None))
236
+
237
+ logger.info(
238
+ "[openai_client] Response received",
239
+ extra={
240
+ "model": model_profile.model,
241
+ "duration_ms": round(duration_ms, 2),
242
+ "tool_mode": tool_mode,
243
+ "tool_count": len(openai_tools),
244
+ "finish_reason": finish_reason,
245
+ },
246
+ )
247
+
248
+ return ProviderResponse(
249
+ content_blocks=content_blocks,
250
+ usage_tokens=usage_tokens,
251
+ cost_usd=cost_usd,
252
+ duration_ms=duration_ms,
253
+ )