ripperdoc 0.2.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (107) hide show
  1. ripperdoc/__init__.py +3 -0
  2. ripperdoc/__main__.py +20 -0
  3. ripperdoc/cli/__init__.py +1 -0
  4. ripperdoc/cli/cli.py +405 -0
  5. ripperdoc/cli/commands/__init__.py +82 -0
  6. ripperdoc/cli/commands/agents_cmd.py +263 -0
  7. ripperdoc/cli/commands/base.py +19 -0
  8. ripperdoc/cli/commands/clear_cmd.py +18 -0
  9. ripperdoc/cli/commands/compact_cmd.py +23 -0
  10. ripperdoc/cli/commands/config_cmd.py +31 -0
  11. ripperdoc/cli/commands/context_cmd.py +144 -0
  12. ripperdoc/cli/commands/cost_cmd.py +82 -0
  13. ripperdoc/cli/commands/doctor_cmd.py +221 -0
  14. ripperdoc/cli/commands/exit_cmd.py +19 -0
  15. ripperdoc/cli/commands/help_cmd.py +20 -0
  16. ripperdoc/cli/commands/mcp_cmd.py +70 -0
  17. ripperdoc/cli/commands/memory_cmd.py +202 -0
  18. ripperdoc/cli/commands/models_cmd.py +413 -0
  19. ripperdoc/cli/commands/permissions_cmd.py +302 -0
  20. ripperdoc/cli/commands/resume_cmd.py +98 -0
  21. ripperdoc/cli/commands/status_cmd.py +167 -0
  22. ripperdoc/cli/commands/tasks_cmd.py +278 -0
  23. ripperdoc/cli/commands/todos_cmd.py +69 -0
  24. ripperdoc/cli/commands/tools_cmd.py +19 -0
  25. ripperdoc/cli/ui/__init__.py +1 -0
  26. ripperdoc/cli/ui/context_display.py +298 -0
  27. ripperdoc/cli/ui/helpers.py +22 -0
  28. ripperdoc/cli/ui/rich_ui.py +1557 -0
  29. ripperdoc/cli/ui/spinner.py +49 -0
  30. ripperdoc/cli/ui/thinking_spinner.py +128 -0
  31. ripperdoc/cli/ui/tool_renderers.py +298 -0
  32. ripperdoc/core/__init__.py +1 -0
  33. ripperdoc/core/agents.py +486 -0
  34. ripperdoc/core/commands.py +33 -0
  35. ripperdoc/core/config.py +559 -0
  36. ripperdoc/core/default_tools.py +88 -0
  37. ripperdoc/core/permissions.py +252 -0
  38. ripperdoc/core/providers/__init__.py +47 -0
  39. ripperdoc/core/providers/anthropic.py +250 -0
  40. ripperdoc/core/providers/base.py +265 -0
  41. ripperdoc/core/providers/gemini.py +615 -0
  42. ripperdoc/core/providers/openai.py +487 -0
  43. ripperdoc/core/query.py +1058 -0
  44. ripperdoc/core/query_utils.py +622 -0
  45. ripperdoc/core/skills.py +295 -0
  46. ripperdoc/core/system_prompt.py +431 -0
  47. ripperdoc/core/tool.py +240 -0
  48. ripperdoc/sdk/__init__.py +9 -0
  49. ripperdoc/sdk/client.py +333 -0
  50. ripperdoc/tools/__init__.py +1 -0
  51. ripperdoc/tools/ask_user_question_tool.py +431 -0
  52. ripperdoc/tools/background_shell.py +389 -0
  53. ripperdoc/tools/bash_output_tool.py +98 -0
  54. ripperdoc/tools/bash_tool.py +1016 -0
  55. ripperdoc/tools/dynamic_mcp_tool.py +428 -0
  56. ripperdoc/tools/enter_plan_mode_tool.py +226 -0
  57. ripperdoc/tools/exit_plan_mode_tool.py +153 -0
  58. ripperdoc/tools/file_edit_tool.py +346 -0
  59. ripperdoc/tools/file_read_tool.py +203 -0
  60. ripperdoc/tools/file_write_tool.py +205 -0
  61. ripperdoc/tools/glob_tool.py +179 -0
  62. ripperdoc/tools/grep_tool.py +370 -0
  63. ripperdoc/tools/kill_bash_tool.py +136 -0
  64. ripperdoc/tools/ls_tool.py +471 -0
  65. ripperdoc/tools/mcp_tools.py +591 -0
  66. ripperdoc/tools/multi_edit_tool.py +456 -0
  67. ripperdoc/tools/notebook_edit_tool.py +386 -0
  68. ripperdoc/tools/skill_tool.py +205 -0
  69. ripperdoc/tools/task_tool.py +379 -0
  70. ripperdoc/tools/todo_tool.py +494 -0
  71. ripperdoc/tools/tool_search_tool.py +380 -0
  72. ripperdoc/utils/__init__.py +1 -0
  73. ripperdoc/utils/bash_constants.py +51 -0
  74. ripperdoc/utils/bash_output_utils.py +43 -0
  75. ripperdoc/utils/coerce.py +34 -0
  76. ripperdoc/utils/context_length_errors.py +252 -0
  77. ripperdoc/utils/exit_code_handlers.py +241 -0
  78. ripperdoc/utils/file_watch.py +135 -0
  79. ripperdoc/utils/git_utils.py +274 -0
  80. ripperdoc/utils/json_utils.py +27 -0
  81. ripperdoc/utils/log.py +176 -0
  82. ripperdoc/utils/mcp.py +560 -0
  83. ripperdoc/utils/memory.py +253 -0
  84. ripperdoc/utils/message_compaction.py +676 -0
  85. ripperdoc/utils/messages.py +519 -0
  86. ripperdoc/utils/output_utils.py +258 -0
  87. ripperdoc/utils/path_ignore.py +677 -0
  88. ripperdoc/utils/path_utils.py +46 -0
  89. ripperdoc/utils/permissions/__init__.py +27 -0
  90. ripperdoc/utils/permissions/path_validation_utils.py +174 -0
  91. ripperdoc/utils/permissions/shell_command_validation.py +552 -0
  92. ripperdoc/utils/permissions/tool_permission_utils.py +279 -0
  93. ripperdoc/utils/prompt.py +17 -0
  94. ripperdoc/utils/safe_get_cwd.py +31 -0
  95. ripperdoc/utils/sandbox_utils.py +38 -0
  96. ripperdoc/utils/session_history.py +260 -0
  97. ripperdoc/utils/session_usage.py +117 -0
  98. ripperdoc/utils/shell_token_utils.py +95 -0
  99. ripperdoc/utils/shell_utils.py +159 -0
  100. ripperdoc/utils/todo.py +203 -0
  101. ripperdoc/utils/token_estimation.py +34 -0
  102. ripperdoc-0.2.6.dist-info/METADATA +193 -0
  103. ripperdoc-0.2.6.dist-info/RECORD +107 -0
  104. ripperdoc-0.2.6.dist-info/WHEEL +5 -0
  105. ripperdoc-0.2.6.dist-info/entry_points.txt +3 -0
  106. ripperdoc-0.2.6.dist-info/licenses/LICENSE +53 -0
  107. ripperdoc-0.2.6.dist-info/top_level.txt +1 -0
@@ -0,0 +1,487 @@
1
+ """OpenAI-compatible provider client."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import asyncio
6
+ import time
7
+ from typing import Any, Dict, List, Optional, cast
8
+ from uuid import uuid4
9
+
10
+ import openai
11
+ from openai import AsyncOpenAI
12
+
13
+ from ripperdoc.core.config import ModelProfile
14
+ from ripperdoc.core.providers.base import (
15
+ ProgressCallback,
16
+ ProviderClient,
17
+ ProviderResponse,
18
+ call_with_timeout_and_retries,
19
+ iter_with_timeout,
20
+ sanitize_tool_history,
21
+ )
22
+ from ripperdoc.core.query_utils import (
23
+ build_openai_tool_schemas,
24
+ content_blocks_from_openai_choice,
25
+ estimate_cost_usd,
26
+ _normalize_tool_args,
27
+ openai_usage_tokens,
28
+ )
29
+ from ripperdoc.core.tool import Tool
30
+ from ripperdoc.utils.log import get_logger
31
+ from ripperdoc.utils.session_usage import record_usage
32
+
33
+ logger = get_logger()
34
+
35
+
36
+ def _classify_openai_error(exc: Exception) -> tuple[str, str]:
37
+ """Classify an OpenAI exception into error code and user-friendly message."""
38
+ exc_type = type(exc).__name__
39
+ exc_msg = str(exc)
40
+
41
+ if isinstance(exc, openai.AuthenticationError):
42
+ return "authentication_error", f"Authentication failed: {exc_msg}"
43
+ if isinstance(exc, openai.PermissionDeniedError):
44
+ # Check for common permission denied reasons
45
+ if "balance" in exc_msg.lower() or "insufficient" in exc_msg.lower():
46
+ return "insufficient_balance", f"Insufficient balance: {exc_msg}"
47
+ return "permission_denied", f"Permission denied: {exc_msg}"
48
+ if isinstance(exc, openai.NotFoundError):
49
+ return "model_not_found", f"Model not found: {exc_msg}"
50
+ if isinstance(exc, openai.BadRequestError):
51
+ # Check for context length errors
52
+ if "context" in exc_msg.lower() or "token" in exc_msg.lower():
53
+ return "context_length_exceeded", f"Context length exceeded: {exc_msg}"
54
+ if "content" in exc_msg.lower() and "policy" in exc_msg.lower():
55
+ return "content_policy_violation", f"Content policy violation: {exc_msg}"
56
+ return "bad_request", f"Invalid request: {exc_msg}"
57
+ if isinstance(exc, openai.RateLimitError):
58
+ return "rate_limit", f"Rate limit exceeded: {exc_msg}"
59
+ if isinstance(exc, openai.APIConnectionError):
60
+ return "connection_error", f"Connection error: {exc_msg}"
61
+ if isinstance(exc, openai.APIStatusError):
62
+ return "api_error", f"API error ({exc.status_code}): {exc_msg}"
63
+ if isinstance(exc, asyncio.TimeoutError):
64
+ return "timeout", f"Request timed out: {exc_msg}"
65
+
66
+ # Generic fallback
67
+ return "unknown_error", f"Unexpected error ({exc_type}): {exc_msg}"
68
+
69
+
70
+ def _effort_from_tokens(max_thinking_tokens: int) -> Optional[str]:
71
+ """Map a thinking token budget to a coarse effort label."""
72
+ if max_thinking_tokens <= 0:
73
+ return None
74
+ if max_thinking_tokens <= 1024:
75
+ return "low"
76
+ if max_thinking_tokens <= 8192:
77
+ return "medium"
78
+ return "high"
79
+
80
+
81
+ def _detect_openai_vendor(model_profile: ModelProfile) -> str:
82
+ """Best-effort vendor hint for OpenAI-compatible endpoints."""
83
+ override = getattr(model_profile, "thinking_mode", None)
84
+ if isinstance(override, str) and override.strip():
85
+ return override.strip().lower()
86
+ base = (model_profile.api_base or "").lower()
87
+ name = (model_profile.model or "").lower()
88
+ if "openrouter.ai" in base:
89
+ return "openrouter"
90
+ if "deepseek" in base or name.startswith("deepseek"):
91
+ return "deepseek"
92
+ if "dashscope" in base or "qwen" in name:
93
+ return "qwen"
94
+ if "generativelanguage.googleapis.com" in base or name.startswith("gemini"):
95
+ return "gemini_openai"
96
+ if "gpt-5" in name:
97
+ return "openai_reasoning"
98
+ return "openai"
99
+
100
+
101
+ def _build_thinking_kwargs(
102
+ model_profile: ModelProfile, max_thinking_tokens: int
103
+ ) -> tuple[Dict[str, Any], Dict[str, Any]]:
104
+ """Return (extra_body, top_level_kwargs) for thinking-enabled calls."""
105
+ extra_body: Dict[str, Any] = {}
106
+ top_level: Dict[str, Any] = {}
107
+ vendor = _detect_openai_vendor(model_profile)
108
+ effort = _effort_from_tokens(max_thinking_tokens)
109
+
110
+ if vendor == "deepseek":
111
+ if max_thinking_tokens != 0:
112
+ extra_body["thinking"] = {"type": "enabled"}
113
+ elif vendor == "qwen":
114
+ if max_thinking_tokens > 0:
115
+ extra_body["enable_thinking"] = True
116
+ elif max_thinking_tokens == 0:
117
+ extra_body["enable_thinking"] = False
118
+ elif vendor == "openrouter":
119
+ if max_thinking_tokens > 0:
120
+ extra_body["reasoning"] = {"max_tokens": max_thinking_tokens}
121
+ elif max_thinking_tokens == 0:
122
+ extra_body["reasoning"] = {"effort": "none"}
123
+ elif vendor == "gemini_openai":
124
+ google_cfg: Dict[str, Any] = {}
125
+ if max_thinking_tokens > 0:
126
+ google_cfg["thinking_budget"] = max_thinking_tokens
127
+ google_cfg["include_thoughts"] = True
128
+ if google_cfg:
129
+ extra_body["google"] = {"thinking_config": google_cfg}
130
+ if effort:
131
+ top_level["reasoning_effort"] = effort
132
+ extra_body.setdefault("reasoning", {"effort": effort})
133
+ elif vendor == "openai_reasoning":
134
+ if effort:
135
+ extra_body["reasoning"] = {"effort": effort}
136
+ else:
137
+ if effort:
138
+ extra_body["reasoning"] = {"effort": effort}
139
+
140
+ return extra_body, top_level
141
+
142
+
143
+ class OpenAIClient(ProviderClient):
144
+ """OpenAI-compatible client with streaming and non-streaming support."""
145
+
146
+ async def call(
147
+ self,
148
+ *,
149
+ model_profile: ModelProfile,
150
+ system_prompt: str,
151
+ normalized_messages: List[Dict[str, Any]],
152
+ tools: List[Tool[Any, Any]],
153
+ tool_mode: str,
154
+ stream: bool,
155
+ progress_callback: Optional[ProgressCallback],
156
+ request_timeout: Optional[float],
157
+ max_retries: int,
158
+ max_thinking_tokens: int,
159
+ ) -> ProviderResponse:
160
+ start_time = time.time()
161
+
162
+ try:
163
+ return await self._call_impl(
164
+ model_profile=model_profile,
165
+ system_prompt=system_prompt,
166
+ normalized_messages=normalized_messages,
167
+ tools=tools,
168
+ tool_mode=tool_mode,
169
+ stream=stream,
170
+ progress_callback=progress_callback,
171
+ request_timeout=request_timeout,
172
+ max_retries=max_retries,
173
+ max_thinking_tokens=max_thinking_tokens,
174
+ start_time=start_time,
175
+ )
176
+ except asyncio.CancelledError:
177
+ raise # Don't suppress task cancellation
178
+ except Exception as exc:
179
+ duration_ms = (time.time() - start_time) * 1000
180
+ error_code, error_message = _classify_openai_error(exc)
181
+ logger.error(
182
+ "[openai_client] API call failed",
183
+ extra={
184
+ "model": model_profile.model,
185
+ "error_code": error_code,
186
+ "error_message": error_message,
187
+ "duration_ms": round(duration_ms, 2),
188
+ },
189
+ )
190
+ return ProviderResponse.create_error(
191
+ error_code=error_code,
192
+ error_message=error_message,
193
+ duration_ms=duration_ms,
194
+ )
195
+
196
+ async def _call_impl(
197
+ self,
198
+ *,
199
+ model_profile: ModelProfile,
200
+ system_prompt: str,
201
+ normalized_messages: List[Dict[str, Any]],
202
+ tools: List[Tool[Any, Any]],
203
+ tool_mode: str,
204
+ stream: bool,
205
+ progress_callback: Optional[ProgressCallback],
206
+ request_timeout: Optional[float],
207
+ max_retries: int,
208
+ max_thinking_tokens: int,
209
+ start_time: float,
210
+ ) -> ProviderResponse:
211
+ """Internal implementation of call, may raise exceptions."""
212
+ openai_tools = await build_openai_tool_schemas(tools)
213
+ openai_messages: List[Dict[str, object]] = [
214
+ {"role": "system", "content": system_prompt}
215
+ ] + sanitize_tool_history(list(normalized_messages))
216
+ collected_text: List[str] = []
217
+ streamed_tool_calls: Dict[int, Dict[str, Optional[str]]] = {}
218
+ streamed_tool_text: List[str] = []
219
+ streamed_usage: Dict[str, int] = {}
220
+ stream_reasoning_text: List[str] = []
221
+ stream_reasoning_details: List[Any] = []
222
+ response_metadata: Dict[str, Any] = {}
223
+
224
+ can_stream_text = stream and tool_mode == "text" and not openai_tools
225
+ can_stream_tools = stream and tool_mode != "text" and bool(openai_tools)
226
+ can_stream = can_stream_text or can_stream_tools
227
+ thinking_extra_body, thinking_top_level = _build_thinking_kwargs(
228
+ model_profile, max_thinking_tokens
229
+ )
230
+
231
+ async with AsyncOpenAI(
232
+ api_key=model_profile.api_key, base_url=model_profile.api_base
233
+ ) as client:
234
+
235
+ async def _stream_request() -> Dict[str, Dict[str, int]]:
236
+ announced_tool_indexes: set[int] = set()
237
+ stream_kwargs: Dict[str, Any] = {
238
+ "model": model_profile.model,
239
+ "messages": cast(Any, openai_messages),
240
+ "tools": openai_tools if openai_tools else None,
241
+ "temperature": model_profile.temperature,
242
+ "max_tokens": model_profile.max_tokens,
243
+ "stream": True,
244
+ "stream_options": {"include_usage": True},
245
+ **thinking_top_level,
246
+ }
247
+ if thinking_extra_body:
248
+ stream_kwargs["extra_body"] = thinking_extra_body
249
+ stream_coro = client.chat.completions.create( # type: ignore[call-overload]
250
+ **stream_kwargs
251
+ )
252
+ stream_resp = (
253
+ await asyncio.wait_for(stream_coro, timeout=request_timeout)
254
+ if request_timeout and request_timeout > 0
255
+ else await stream_coro
256
+ )
257
+ async for chunk in iter_with_timeout(stream_resp, request_timeout):
258
+ if getattr(chunk, "usage", None):
259
+ streamed_usage.update(openai_usage_tokens(chunk.usage))
260
+
261
+ if not getattr(chunk, "choices", None):
262
+ continue
263
+ delta = getattr(chunk.choices[0], "delta", None)
264
+ if not delta:
265
+ continue
266
+
267
+ # Text deltas (rare in native tool mode but supported)
268
+ delta_content = getattr(delta, "content", None)
269
+ text_delta = ""
270
+ if delta_content:
271
+ if isinstance(delta_content, list):
272
+ for part in delta_content:
273
+ text_val = getattr(part, "text", None) or getattr(
274
+ part, "content", None
275
+ )
276
+ if isinstance(text_val, str):
277
+ text_delta += text_val
278
+ elif isinstance(delta_content, str):
279
+ text_delta += delta_content
280
+ delta_reasoning = getattr(delta, "reasoning_content", None) or getattr(
281
+ delta, "reasoning", None
282
+ )
283
+ if isinstance(delta_reasoning, str):
284
+ stream_reasoning_text.append(delta_reasoning)
285
+ elif isinstance(delta_reasoning, list):
286
+ for item in delta_reasoning:
287
+ if isinstance(item, str):
288
+ stream_reasoning_text.append(item)
289
+ delta_reasoning_details = getattr(delta, "reasoning_details", None)
290
+ if delta_reasoning_details:
291
+ if isinstance(delta_reasoning_details, list):
292
+ stream_reasoning_details.extend(delta_reasoning_details)
293
+ else:
294
+ stream_reasoning_details.append(delta_reasoning_details)
295
+ if text_delta:
296
+ target_collector = (
297
+ streamed_tool_text if can_stream_tools else collected_text
298
+ )
299
+ target_collector.append(text_delta)
300
+ if progress_callback:
301
+ try:
302
+ await progress_callback(text_delta)
303
+ except (RuntimeError, ValueError, TypeError, OSError) as cb_exc:
304
+ logger.warning(
305
+ "[openai_client] Stream callback failed: %s: %s",
306
+ type(cb_exc).__name__, cb_exc,
307
+ )
308
+
309
+ # Tool call deltas for native tool mode
310
+ if not can_stream_tools:
311
+ continue
312
+
313
+ for tool_delta in getattr(delta, "tool_calls", []) or []:
314
+ idx = getattr(tool_delta, "index", 0) or 0
315
+ state = streamed_tool_calls.get(
316
+ idx, {"id": None, "name": None, "arguments": ""}
317
+ )
318
+
319
+ if getattr(tool_delta, "id", None):
320
+ state["id"] = tool_delta.id
321
+
322
+ function_delta = getattr(tool_delta, "function", None)
323
+ if function_delta:
324
+ fn_name = getattr(function_delta, "name", None)
325
+ if fn_name:
326
+ state["name"] = fn_name
327
+ args_delta = getattr(function_delta, "arguments", None)
328
+ if args_delta:
329
+ state["arguments"] = (state.get("arguments") or "") + args_delta
330
+ if progress_callback:
331
+ try:
332
+ await progress_callback(args_delta)
333
+ except (RuntimeError, ValueError, TypeError, OSError) as cb_exc:
334
+ logger.warning(
335
+ "[openai_client] Stream callback failed: %s: %s",
336
+ type(cb_exc).__name__, cb_exc,
337
+ )
338
+
339
+ if idx not in announced_tool_indexes and state.get("name"):
340
+ announced_tool_indexes.add(idx)
341
+ if progress_callback:
342
+ try:
343
+ await progress_callback(f"[tool:{state['name']}]")
344
+ except (RuntimeError, ValueError, TypeError, OSError) as cb_exc:
345
+ logger.warning(
346
+ "[openai_client] Stream callback failed: %s: %s",
347
+ type(cb_exc).__name__, cb_exc,
348
+ )
349
+
350
+ streamed_tool_calls[idx] = state
351
+
352
+ return {"usage": streamed_usage}
353
+
354
+ async def _non_stream_request() -> Any:
355
+ kwargs: Dict[str, Any] = {
356
+ "model": model_profile.model,
357
+ "messages": cast(Any, openai_messages),
358
+ "tools": openai_tools if openai_tools else None, # type: ignore[arg-type]
359
+ "temperature": model_profile.temperature,
360
+ "max_tokens": model_profile.max_tokens,
361
+ **thinking_top_level,
362
+ }
363
+ if thinking_extra_body:
364
+ kwargs["extra_body"] = thinking_extra_body
365
+ return await client.chat.completions.create( # type: ignore[call-overload]
366
+ **kwargs
367
+ )
368
+
369
+ timeout_for_call = None if can_stream else request_timeout
370
+ openai_response: Any = await call_with_timeout_and_retries(
371
+ _stream_request if can_stream else _non_stream_request,
372
+ timeout_for_call,
373
+ max_retries,
374
+ )
375
+
376
+ if (
377
+ can_stream_text
378
+ and not collected_text
379
+ and not streamed_tool_calls
380
+ and not streamed_tool_text
381
+ ):
382
+ logger.debug(
383
+ "[openai_client] Streaming returned no content; retrying without stream",
384
+ extra={"model": model_profile.model},
385
+ )
386
+ can_stream = False
387
+ can_stream_text = False
388
+ can_stream_tools = False
389
+ openai_response = await call_with_timeout_and_retries(
390
+ _non_stream_request, request_timeout, max_retries
391
+ )
392
+
393
+ duration_ms = (time.time() - start_time) * 1000
394
+ usage_tokens = (
395
+ streamed_usage
396
+ if can_stream
397
+ else openai_usage_tokens(getattr(openai_response, "usage", None))
398
+ )
399
+ cost_usd = estimate_cost_usd(model_profile, usage_tokens)
400
+ record_usage(
401
+ model_profile.model, duration_ms=duration_ms, cost_usd=cost_usd, **usage_tokens
402
+ )
403
+
404
+ if not can_stream and (
405
+ not openai_response or not getattr(openai_response, "choices", None)
406
+ ):
407
+ logger.warning(
408
+ "[openai_client] No choices returned from OpenAI response",
409
+ extra={"model": model_profile.model},
410
+ )
411
+ empty_text = "Model returned no content."
412
+ return ProviderResponse(
413
+ content_blocks=[{"type": "text", "text": empty_text}],
414
+ usage_tokens=usage_tokens,
415
+ cost_usd=cost_usd,
416
+ duration_ms=duration_ms,
417
+ metadata=response_metadata,
418
+ )
419
+
420
+ content_blocks: List[Dict[str, Any]] = []
421
+ finish_reason: Optional[str] = None
422
+ if can_stream_text:
423
+ content_blocks = [{"type": "text", "text": "".join(collected_text)}]
424
+ finish_reason = "stream"
425
+ elif can_stream_tools:
426
+ if streamed_tool_text:
427
+ content_blocks.append({"type": "text", "text": "".join(streamed_tool_text)})
428
+ for idx in sorted(streamed_tool_calls.keys()):
429
+ call = streamed_tool_calls[idx]
430
+ name = call.get("name")
431
+ if not name:
432
+ continue
433
+ tool_use_id = call.get("id") or str(uuid4())
434
+ content_blocks.append(
435
+ {
436
+ "type": "tool_use",
437
+ "tool_use_id": tool_use_id,
438
+ "name": name,
439
+ "input": _normalize_tool_args(call.get("arguments")),
440
+ }
441
+ )
442
+ finish_reason = "stream"
443
+ else:
444
+ choice = openai_response.choices[0]
445
+ content_blocks = content_blocks_from_openai_choice(choice, tool_mode)
446
+ finish_reason = cast(Optional[str], getattr(choice, "finish_reason", None))
447
+ message_obj = getattr(choice, "message", None) or choice
448
+ reasoning_content = getattr(message_obj, "reasoning_content", None)
449
+ if reasoning_content:
450
+ response_metadata["reasoning_content"] = reasoning_content
451
+ reasoning_field = getattr(message_obj, "reasoning", None)
452
+ if reasoning_field:
453
+ response_metadata["reasoning"] = reasoning_field
454
+ if "reasoning_content" not in response_metadata and isinstance(
455
+ reasoning_field, str
456
+ ):
457
+ response_metadata["reasoning_content"] = reasoning_field
458
+ reasoning_details = getattr(message_obj, "reasoning_details", None)
459
+ if reasoning_details:
460
+ response_metadata["reasoning_details"] = reasoning_details
461
+
462
+ if can_stream:
463
+ if stream_reasoning_text:
464
+ joined = "".join(stream_reasoning_text)
465
+ response_metadata["reasoning_content"] = joined
466
+ response_metadata.setdefault("reasoning", joined)
467
+ if stream_reasoning_details:
468
+ response_metadata["reasoning_details"] = stream_reasoning_details
469
+
470
+ logger.info(
471
+ "[openai_client] Response received",
472
+ extra={
473
+ "model": model_profile.model,
474
+ "duration_ms": round(duration_ms, 2),
475
+ "tool_mode": tool_mode,
476
+ "tool_count": len(openai_tools),
477
+ "finish_reason": finish_reason,
478
+ },
479
+ )
480
+
481
+ return ProviderResponse(
482
+ content_blocks=content_blocks,
483
+ usage_tokens=usage_tokens,
484
+ cost_usd=cost_usd,
485
+ duration_ms=duration_ms,
486
+ metadata=response_metadata,
487
+ )