mycode-sdk 0.4.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,316 @@
1
+ """Google Gemini adapter built on the official google-genai Python SDK."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from collections.abc import AsyncIterator
6
+ from typing import Any
7
+ from urllib.parse import urlparse
8
+
9
+ from google import genai
10
+ from google.genai import types
11
+ from google.genai.errors import APIError
12
+
13
+ from mycode.messages import assistant_message, text_block, thinking_block, tool_use_block
14
+ from mycode.providers.base import (
15
+ DEFAULT_REQUEST_TIMEOUT,
16
+ ProviderAdapter,
17
+ ProviderRequest,
18
+ ProviderStreamEvent,
19
+ get_native_meta,
20
+ load_document_block_payload,
21
+ load_image_block_payload,
22
+ )
23
+
24
+ _DUMMY_THOUGHT_SIGNATURE = "skip_thought_signature_validator"
25
+
26
+
27
+ def _to_json(value: Any) -> Any:
28
+ """Convert SDK objects into JSON-safe plain data."""
29
+
30
+ if value is None:
31
+ return None
32
+ if hasattr(value, "model_dump"):
33
+ try:
34
+ return value.model_dump(mode="json", exclude_none=True)
35
+ except TypeError:
36
+ return _to_json(value.model_dump())
37
+ if isinstance(value, list):
38
+ return [_to_json(item) for item in value]
39
+ if isinstance(value, dict):
40
+ return {key: normalized for key, item in value.items() if (normalized := _to_json(item)) is not None}
41
+ return value
42
+
43
+
44
+ class GoogleGeminiAdapter(ProviderAdapter):
45
+ """Adapter for the Gemini Developer API."""
46
+
47
+ provider_id = "google"
48
+ label = "Google Gemini"
49
+ default_base_url = "https://generativelanguage.googleapis.com"
50
+ env_api_key_names = ("GEMINI_API_KEY", "GOOGLE_API_KEY")
51
+ default_models = ("gemini-3.1-pro-preview", "gemini-3-flash-preview")
52
+ supports_reasoning_effort = True
53
+
54
+ async def stream_turn(self, request: ProviderRequest) -> AsyncIterator[ProviderStreamEvent]:
55
+ api_key = self.require_api_key(request.api_key)
56
+ client = genai.Client(api_key=api_key, http_options=self._http_options(request.api_base))
57
+
58
+ blocks: list[dict[str, Any]] = []
59
+ response_id: str | None = None
60
+ response_model: str | None = None
61
+ finish_reason: str | None = None
62
+ finish_message: str | None = None
63
+ usage: dict[str, Any] | None = None
64
+
65
+ try:
66
+ stream = await client.aio.models.generate_content_stream(
67
+ model=request.model,
68
+ contents=self._build_contents(request),
69
+ config=self._build_config(request),
70
+ )
71
+ async for chunk in stream:
72
+ response_id = response_id or getattr(chunk, "response_id", None)
73
+ response_model = response_model or getattr(chunk, "model_version", None)
74
+ usage = _to_json(getattr(chunk, "usage_metadata", None)) or usage
75
+
76
+ candidates = getattr(chunk, "candidates", None) or []
77
+ if not candidates:
78
+ continue
79
+ candidate = candidates[0]
80
+
81
+ finish_reason = _to_json(getattr(candidate, "finish_reason", None)) or finish_reason
82
+ finish_message = getattr(candidate, "finish_message", None) or finish_message
83
+
84
+ for part in getattr(getattr(candidate, "content", None), "parts", None) or []:
85
+ for event in self._consume_part(blocks, part):
86
+ yield event
87
+ except APIError as exc:
88
+ raise ValueError(str(exc)) from exc
89
+ finally:
90
+ try:
91
+ await client.aio.aclose()
92
+ except Exception:
93
+ pass
94
+
95
+ yield ProviderStreamEvent(
96
+ "message_done",
97
+ {
98
+ "message": assistant_message(
99
+ blocks,
100
+ provider=self.provider_id,
101
+ model=response_model or request.model,
102
+ provider_message_id=response_id,
103
+ stop_reason=str(finish_reason) if finish_reason else None,
104
+ usage=usage,
105
+ native_meta={"finish_message": str(finish_message)} if finish_message else None,
106
+ )
107
+ },
108
+ )
109
+
110
+ def _http_options(self, api_base: str | None) -> types.HttpOptions:
111
+ base_url = self.resolve_base_url(api_base)
112
+ api_version = "v1beta"
113
+ if base_url and urlparse(base_url).path.rstrip("/").lower().endswith(("/v1", "/v1beta")):
114
+ api_version = None
115
+ # google-genai expects milliseconds here; DEFAULT_REQUEST_TIMEOUT is seconds.
116
+ timeout_ms = int(DEFAULT_REQUEST_TIMEOUT * 1000)
117
+ return types.HttpOptions(base_url=base_url, api_version=api_version, timeout=timeout_ms)
118
+
119
+ def _build_contents(self, request: ProviderRequest) -> list[dict[str, Any]]:
120
+ """Convert canonical replay messages into Gemini contents."""
121
+
122
+ contents: list[dict[str, Any]] = []
123
+ tool_names: dict[str, str] = {}
124
+
125
+ for message in self.prepare_messages(request):
126
+ role = str(message.get("role") or "")
127
+ blocks = [block for block in message.get("content") or [] if isinstance(block, dict)]
128
+
129
+ if role == "assistant":
130
+ parts: list[dict[str, Any]] = []
131
+ needs_dummy_signature = True
132
+
133
+ for block in blocks:
134
+ if block.get("type") == "tool_use":
135
+ tool_id = str(block.get("id") or "")
136
+ tool_name = str(block.get("name") or "")
137
+ if tool_id and tool_name:
138
+ tool_names[tool_id] = tool_name
139
+
140
+ native_part = get_native_meta(block).get("part")
141
+ if isinstance(native_part, dict):
142
+ parts.append(dict(native_part))
143
+ if native_part.get("function_call") and native_part.get("thought_signature"):
144
+ needs_dummy_signature = False
145
+ continue
146
+
147
+ block_type = block.get("type")
148
+ if block_type == "thinking":
149
+ parts.append({"text": str(block.get("text") or ""), "thought": True})
150
+ continue
151
+
152
+ if block_type == "text":
153
+ parts.append({"text": str(block.get("text") or "")})
154
+ continue
155
+
156
+ if block_type != "tool_use":
157
+ continue
158
+
159
+ part: dict[str, Any] = {
160
+ "function_call": {
161
+ "id": block.get("id") or "",
162
+ "name": block.get("name") or "",
163
+ "args": block.get("input") if isinstance(block.get("input"), dict) else {},
164
+ }
165
+ }
166
+ # Gemini 3 validates the first function call in each step of
167
+ # the current turn. Cross-provider replay has no real thought
168
+ # signature, so we attach the documented dummy signature once.
169
+ if needs_dummy_signature:
170
+ part["thought_signature"] = _DUMMY_THOUGHT_SIGNATURE
171
+ needs_dummy_signature = False
172
+ parts.append(part)
173
+
174
+ if parts:
175
+ contents.append({"role": "model", "parts": parts})
176
+ continue
177
+
178
+ if role != "user":
179
+ continue
180
+
181
+ parts = []
182
+ for block in blocks:
183
+ block_type = block.get("type")
184
+ if block_type == "text":
185
+ parts.append({"text": str(block.get("text") or "")})
186
+ continue
187
+
188
+ if block_type == "image":
189
+ mime_type, data = load_image_block_payload(block)
190
+ parts.append({"inline_data": {"mime_type": mime_type, "data": data}})
191
+ continue
192
+ if block_type == "document":
193
+ mime_type, data, _name = load_document_block_payload(block)
194
+ parts.append({"inline_data": {"mime_type": mime_type, "data": data}})
195
+ continue
196
+
197
+ if block_type != "tool_result":
198
+ continue
199
+
200
+ tool_id = str(block.get("tool_use_id") or "")
201
+ response: dict[str, Any] = {"result": str(block.get("model_text") or "")}
202
+ if block.get("is_error"):
203
+ response["is_error"] = True
204
+
205
+ # Gemini requires the exact id and name from the matching
206
+ # function_call in the previous model turn.
207
+ parts.append(
208
+ {
209
+ "function_response": {
210
+ "id": tool_id,
211
+ "name": tool_names.get(tool_id, ""),
212
+ "response": response,
213
+ }
214
+ }
215
+ )
216
+
217
+ if parts:
218
+ contents.append({"role": "user", "parts": parts})
219
+
220
+ return contents
221
+
222
+ def _build_config(self, request: ProviderRequest) -> types.GenerateContentConfig:
223
+ tools: list[types.Tool | Any] | None = None
224
+ if request.tools:
225
+ tools = [
226
+ types.Tool(
227
+ function_declarations=[
228
+ types.FunctionDeclaration(
229
+ name=str(tool.get("name") or ""),
230
+ description=str(tool.get("description") or ""),
231
+ parameters_json_schema=tool.get("input_schema") or {"type": "object", "properties": {}},
232
+ )
233
+ for tool in request.tools
234
+ ]
235
+ )
236
+ ]
237
+
238
+ thinking_config = types.ThinkingConfig(include_thoughts=True)
239
+ if request.reasoning_effort and request.model.lower().startswith("gemini-3"):
240
+ # Official OpenAI-compat mapping:
241
+ # Gemini 3.1 Pro: minimal -> low
242
+ # Gemini 3 Flash: minimal -> minimal
243
+ effort = request.reasoning_effort
244
+ if effort in {"none", "low"}:
245
+ thinking_config.thinking_level = (
246
+ types.ThinkingLevel.LOW
247
+ if request.model.lower().startswith("gemini-3.1-pro")
248
+ else types.ThinkingLevel.MINIMAL
249
+ )
250
+ elif effort == "medium":
251
+ thinking_config.thinking_level = types.ThinkingLevel.MEDIUM
252
+ else:
253
+ thinking_config.thinking_level = types.ThinkingLevel.HIGH
254
+
255
+ return types.GenerateContentConfig(
256
+ system_instruction=request.system or None,
257
+ max_output_tokens=request.max_tokens,
258
+ tools=tools,
259
+ thinking_config=thinking_config,
260
+ )
261
+
262
+ def _consume_part(self, blocks: list[dict[str, Any]], part: Any) -> list[ProviderStreamEvent]:
263
+ native_part = _to_json(part) or {}
264
+ if native_part.get("thought") is False:
265
+ native_part.pop("thought", None)
266
+
267
+ function_call = getattr(part, "function_call", None)
268
+ if function_call is not None:
269
+ tool_input = getattr(function_call, "args", None)
270
+ blocks.append(
271
+ tool_use_block(
272
+ tool_id=str(getattr(function_call, "id", None) or f"tool_call_{len(blocks)}"),
273
+ name=str(getattr(function_call, "name", None) or ""),
274
+ input=tool_input if isinstance(tool_input, dict) else {},
275
+ meta={"native": {"part": native_part}},
276
+ )
277
+ )
278
+ return []
279
+
280
+ text = getattr(part, "text", None)
281
+ if text is None or text == "":
282
+ if not native_part.get("thought_signature"):
283
+ return []
284
+
285
+ # Gemini may put the final thought signature into an empty-text part.
286
+ # Keep it as a separate empty block so replay preserves the original
287
+ # part boundary instead of merging the signature into another block.
288
+ part_meta = {"native": {"part": native_part}}
289
+ is_thought = getattr(part, "thought", False)
290
+ blocks.append(thinking_block("", meta=part_meta) if is_thought else text_block("", meta=part_meta))
291
+ return []
292
+
293
+ is_thought = bool(getattr(part, "thought", False))
294
+ event = ProviderStreamEvent("thinking_delta" if is_thought else "text_delta", {"text": str(text)})
295
+ block_type = "thinking" if is_thought else "text"
296
+
297
+ # Gemini may stream one logical thought/text across many chunks.
298
+ # Merge only when the block kind matches and we are not combining
299
+ # distinct thought signatures.
300
+ if blocks and blocks[-1].get("type") == block_type:
301
+ last_part = get_native_meta(blocks[-1]).get("part")
302
+ if isinstance(last_part, dict):
303
+ last_signature = last_part.get("thought_signature")
304
+ current_signature = native_part.get("thought_signature")
305
+ if not (last_signature and current_signature and last_signature != current_signature):
306
+ blocks[-1]["text"] = f"{blocks[-1].get('text') or ''}{text}"
307
+ last_part["text"] = f"{last_part.get('text') or ''}{text}"
308
+ if current_signature and not last_signature:
309
+ last_part["thought_signature"] = current_signature
310
+ return [event]
311
+
312
+ part_meta = {"native": {"part": native_part}}
313
+ blocks.append(
314
+ thinking_block(str(text), meta=part_meta) if is_thought else text_block(str(text), meta=part_meta)
315
+ )
316
+ return [event]
@@ -0,0 +1,368 @@
1
+ """Chat Completions adapters for OpenAI-compatible providers."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import json
6
+ from collections.abc import AsyncIterator
7
+ from dataclasses import dataclass
8
+ from typing import Any
9
+
10
+ from openai import APIError, AsyncOpenAI
11
+
12
+ from mycode.messages import assistant_message, text_block, thinking_block, tool_use_block
13
+ from mycode.providers.base import (
14
+ DEFAULT_REQUEST_TIMEOUT,
15
+ ProviderAdapter,
16
+ ProviderRequest,
17
+ ProviderStreamEvent,
18
+ dump_model,
19
+ get_native_meta,
20
+ load_document_block_payload,
21
+ load_image_block_payload,
22
+ )
23
+ from mycode.utils import omit_none, parse_tool_arguments
24
+
25
+
26
+ @dataclass
27
+ class _ChatToolCallState:
28
+ """Accumulate one streamed tool call from chat-completions deltas."""
29
+
30
+ index: int
31
+ tool_id: str | None = None
32
+ name: str = ""
33
+ arguments_text: str = ""
34
+
35
+
36
+ class OpenAIChatAdapter(ProviderAdapter):
37
+ """Base adapter for Chat Completions style providers."""
38
+
39
+ provider_id = "openai_chat"
40
+ label = "OpenAI Chat Completions"
41
+ default_base_url = "https://api.openai.com/v1"
42
+ env_api_key_names = ("OPENAI_API_KEY",)
43
+ auto_discoverable = False
44
+
45
+ async def stream_turn(self, request: ProviderRequest) -> AsyncIterator[ProviderStreamEvent]:
46
+ api_key = self.require_api_key(request.api_key)
47
+ client = AsyncOpenAI(
48
+ api_key=api_key,
49
+ base_url=self.resolve_base_url(request.api_base),
50
+ timeout=DEFAULT_REQUEST_TIMEOUT,
51
+ )
52
+
53
+ # Keep the streamed turn state local to this adapter so the wire-format
54
+ # mapping stays readable in one file.
55
+ tool_calls: dict[int, _ChatToolCallState] = {}
56
+ text_parts: list[str] = []
57
+ thinking_parts: list[str] = []
58
+ thinking_native_meta: dict[str, Any] = {}
59
+ response_id: str | None = None
60
+ response_model: str | None = None
61
+ finish_reason: str | None = None
62
+ usage: Any = None
63
+
64
+ try:
65
+ stream = await client.chat.completions.create(**self._build_request_payload(request), stream=True)
66
+ async for chunk in stream:
67
+ response_id = response_id or getattr(chunk, "id", None)
68
+ response_model = response_model or getattr(chunk, "model", None)
69
+
70
+ if getattr(chunk, "usage", None) is not None:
71
+ usage = chunk.usage
72
+
73
+ if not chunk.choices:
74
+ continue
75
+
76
+ choice = chunk.choices[0]
77
+ if choice.finish_reason:
78
+ finish_reason = choice.finish_reason
79
+
80
+ delta = choice.delta
81
+ reasoning_delta, reasoning_meta_update = self._extract_reasoning_delta(delta)
82
+ if reasoning_delta:
83
+ thinking_parts.append(reasoning_delta)
84
+ thinking_native_meta.update(reasoning_meta_update)
85
+ yield ProviderStreamEvent("thinking_delta", {"text": reasoning_delta})
86
+
87
+ if delta.content:
88
+ text_parts.append(delta.content)
89
+ yield ProviderStreamEvent("text_delta", {"text": delta.content})
90
+
91
+ for tool_call in delta.tool_calls or []:
92
+ index = tool_call.index or 0
93
+ state = tool_calls.setdefault(index, _ChatToolCallState(index=index))
94
+ if tool_call.id:
95
+ state.tool_id = tool_call.id
96
+ function = tool_call.function
97
+ if function is None:
98
+ continue
99
+ if function.name:
100
+ state.name = function.name
101
+ if function.arguments:
102
+ state.arguments_text += function.arguments
103
+ except APIError as exc:
104
+ raise ValueError(str(exc)) from exc
105
+
106
+ blocks = []
107
+ if thinking_parts:
108
+ blocks.append(
109
+ thinking_block(
110
+ "".join(thinking_parts),
111
+ meta={"native": thinking_native_meta} if thinking_native_meta else None,
112
+ )
113
+ )
114
+ if text_parts:
115
+ blocks.append(text_block("".join(text_parts)))
116
+
117
+ for index in sorted(tool_calls):
118
+ state = tool_calls[index]
119
+ raw_arguments = state.arguments_text
120
+ parsed_arguments = parse_tool_arguments(raw_arguments)
121
+ if isinstance(parsed_arguments, str):
122
+ tool_input = {}
123
+ meta = {"native": {"raw_arguments": raw_arguments}}
124
+ else:
125
+ tool_input = parsed_arguments
126
+ meta = None
127
+
128
+ blocks.append(
129
+ tool_use_block(
130
+ tool_id=state.tool_id or f"tool_call_{index}",
131
+ name=state.name,
132
+ input=tool_input,
133
+ meta=meta,
134
+ )
135
+ )
136
+
137
+ final_message = assistant_message(
138
+ blocks,
139
+ provider=self.provider_id,
140
+ model=response_model or request.model,
141
+ provider_message_id=response_id,
142
+ stop_reason=finish_reason,
143
+ usage=dump_model(usage),
144
+ )
145
+ yield ProviderStreamEvent("message_done", {"message": final_message})
146
+
147
+ def _build_request_payload(self, request: ProviderRequest) -> dict[str, Any]:
148
+ messages = []
149
+ if request.system:
150
+ messages.append({"role": "system", "content": request.system})
151
+ for message in self.prepare_messages(request):
152
+ messages.extend(self._serialize_message(message))
153
+
154
+ payload: dict[str, Any] = {
155
+ "model": request.model,
156
+ "messages": messages,
157
+ "tools": [self._serialize_tool(tool) for tool in request.tools] or None,
158
+ "tool_choice": "auto" if request.tools else None,
159
+ "max_tokens": request.max_tokens,
160
+ "stream_options": {"include_usage": True},
161
+ }
162
+ payload.update(self._build_provider_payload_overrides(request))
163
+ return omit_none(payload)
164
+
165
+ def _build_provider_payload_overrides(self, request: ProviderRequest) -> dict[str, Any]:
166
+ del request
167
+ return {}
168
+
169
+ def _serialize_tool(self, tool: dict[str, Any]) -> dict[str, Any]:
170
+ return {
171
+ "type": "function",
172
+ "function": {
173
+ "name": tool.get("name") or "",
174
+ "description": tool.get("description") or "",
175
+ "parameters": tool.get("input_schema") or {"type": "object", "properties": {}},
176
+ },
177
+ }
178
+
179
+ def _serialize_message(self, message: dict[str, Any]) -> list[dict[str, Any]]:
180
+ """Convert one canonical message into Chat Completions wire messages."""
181
+
182
+ role = str(message.get("role") or "user")
183
+ blocks = [block for block in message.get("content") or [] if isinstance(block, dict)]
184
+
185
+ if role == "user":
186
+ payload_messages: list[dict[str, Any]] = []
187
+ has_media = any(block.get("type") in {"image", "document"} for block in blocks)
188
+ if has_media:
189
+ user_content: str | list[dict[str, Any]] | None = []
190
+ for block in blocks:
191
+ block_type = block.get("type")
192
+ if block_type == "text":
193
+ text = str(block.get("text") or "")
194
+ if text:
195
+ user_content.append({"type": "text", "text": text})
196
+ continue
197
+ if block_type == "image":
198
+ mime_type, data = load_image_block_payload(block)
199
+ user_content.append(
200
+ {
201
+ "type": "image_url",
202
+ "image_url": {"url": f"data:{mime_type};base64,{data}"},
203
+ }
204
+ )
205
+ continue
206
+ if block_type == "document":
207
+ mime_type, data, name = load_document_block_payload(block)
208
+ user_content.append(
209
+ {
210
+ "type": "file",
211
+ "file": {
212
+ "filename": name or "document.pdf",
213
+ "file_data": f"data:{mime_type};base64,{data}",
214
+ },
215
+ }
216
+ )
217
+ if not user_content:
218
+ user_content = None
219
+ else:
220
+ text_parts = [
221
+ str(block.get("text") or "")
222
+ for block in blocks
223
+ if block.get("type") == "text" and block.get("text")
224
+ ]
225
+ text = "\n".join(text_parts)
226
+ user_content = text or None
227
+
228
+ if user_content:
229
+ payload_messages.append({"role": "user", "content": user_content})
230
+
231
+ for block in blocks:
232
+ if block.get("type") != "tool_result":
233
+ continue
234
+ payload_messages.append(
235
+ {
236
+ "role": "tool",
237
+ "tool_call_id": block.get("tool_use_id") or "",
238
+ "content": str(block.get("model_text") or ""),
239
+ }
240
+ )
241
+ return payload_messages
242
+
243
+ if role != "assistant":
244
+ return []
245
+
246
+ text_parts = [str(block.get("text") or "") for block in blocks if block.get("type") == "text"]
247
+ thinking_blocks = [block for block in blocks if block.get("type") == "thinking"]
248
+ tool_use_blocks = [block for block in blocks if block.get("type") == "tool_use"]
249
+
250
+ payload: dict[str, Any] = {
251
+ "role": "assistant",
252
+ "content": "\n".join(part for part in text_parts if part),
253
+ }
254
+
255
+ if tool_use_blocks:
256
+ payload["tool_calls"] = [
257
+ {
258
+ "id": block.get("id") or "",
259
+ "type": "function",
260
+ "function": {
261
+ "name": block.get("name") or "",
262
+ "arguments": json.dumps(
263
+ block.get("input") if isinstance(block.get("input"), dict) else {},
264
+ ensure_ascii=False,
265
+ ),
266
+ },
267
+ }
268
+ for block in tool_use_blocks
269
+ ]
270
+
271
+ if thinking_blocks:
272
+ payload.update(self._serialize_reasoning(thinking_blocks))
273
+
274
+ return [payload]
275
+
276
+ def _serialize_reasoning(self, thinking_blocks: list[dict[str, Any]]) -> dict[str, Any]:
277
+ """Replay canonical thinking through the provider's reasoning field.
278
+
279
+ When the source provider did not record a native field name, default to
280
+ `reasoning_content`, which is the common reasoning slot used by the
281
+ OpenAI-compatible thinking providers we support.
282
+ """
283
+
284
+ thinking_text = "\n".join(str(block.get("text") or "") for block in thinking_blocks if block.get("text"))
285
+ native_meta = get_native_meta(thinking_blocks[0])
286
+ reasoning_field = str(native_meta.get("reasoning_field") or "")
287
+ if reasoning_field == "reasoning_details":
288
+ return {"reasoning_details": native_meta.get("reasoning_details") or []}
289
+ return {"reasoning_content": thinking_text} if thinking_text else {}
290
+
291
+ def _extract_reasoning_delta(self, delta: Any) -> tuple[str, dict[str, Any]]:
292
+ # Third-party providers surface reasoning through non-standard extras.
293
+ # We check both the delta root and model_extra to cover both patterns.
294
+ # Known fields: reasoning_content (Moonshot/MiniMax chat), reasoning_details (some others).
295
+ for source in (delta, getattr(delta, "model_extra", None) or {}):
296
+ if isinstance(source, dict):
297
+ reasoning_content = source.get("reasoning_content")
298
+ reasoning_details = source.get("reasoning_details")
299
+ else:
300
+ reasoning_content = getattr(source, "reasoning_content", None)
301
+ reasoning_details = getattr(source, "reasoning_details", None)
302
+
303
+ if isinstance(reasoning_content, str) and reasoning_content:
304
+ return reasoning_content, {"reasoning_field": "reasoning_content"}
305
+
306
+ if isinstance(reasoning_details, list) and reasoning_details:
307
+ reasoning_text = "".join(
308
+ str(item.get("text") or "") for item in reasoning_details if isinstance(item, dict)
309
+ )
310
+ if reasoning_text:
311
+ return reasoning_text, {
312
+ "reasoning_field": "reasoning_details",
313
+ "reasoning_details": reasoning_details,
314
+ }
315
+
316
+ return "", {}
317
+
318
+
319
+ class DeepSeekAdapter(OpenAIChatAdapter):
320
+ """DeepSeek's OpenAI-compatible chat endpoint.
321
+
322
+ deepseek-reasoner always thinks — no parameter needed to enable it.
323
+ deepseek-chat does not think by default; send thinking: {"type": "enabled"}
324
+ to activate it. We rely on the model's default behavior, so no overrides here.
325
+ """
326
+
327
+ provider_id = "deepseek"
328
+ label = "DeepSeek"
329
+ default_base_url = "https://api.deepseek.com"
330
+ env_api_key_names = ("DEEPSEEK_API_KEY",)
331
+ default_models = ("deepseek-chat", "deepseek-reasoner")
332
+ auto_discoverable = True
333
+
334
+
335
+ class ZAIAdapter(OpenAIChatAdapter):
336
+ """Z.AI's OpenAI-compatible chat endpoint.
337
+
338
+ GLM models think by default. We still send the explicit thinking parameter
339
+ so that clear_thinking=False preserves reasoning across multi-turn tool loops
340
+ instead of resetting it on each turn.
341
+ """
342
+
343
+ provider_id = "zai"
344
+ label = "Z.AI"
345
+ default_base_url = "https://api.z.ai/api/paas/v4/"
346
+ env_api_key_names = ("ZAI_API_KEY",)
347
+ default_models = ("glm-5.1", "glm-5-turbo")
348
+ auto_discoverable = True
349
+
350
+ def _build_provider_payload_overrides(self, request: ProviderRequest) -> dict[str, Any]:
351
+ return {"extra_body": {"thinking": {"type": "enabled", "clear_thinking": False}}}
352
+
353
+
354
+ class OpenRouterAdapter(OpenAIChatAdapter):
355
+ """OpenRouter's OpenAI-compatible chat endpoint."""
356
+
357
+ provider_id = "openrouter"
358
+ label = "OpenRouter"
359
+ default_base_url = "https://openrouter.ai/api/v1"
360
+ env_api_key_names = ("OPENROUTER_API_KEY",)
361
+ default_models = ("openrouter/auto",)
362
+ auto_discoverable = True
363
+ supports_reasoning_effort = True
364
+
365
+ def _build_provider_payload_overrides(self, request: ProviderRequest) -> dict[str, Any]:
366
+ if not request.reasoning_effort:
367
+ return {}
368
+ return {"extra_body": {"reasoning": {"effort": request.reasoning_effort}}}