mycode-sdk 0.4.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,372 @@
1
+ """OpenAI Responses API adapter."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import json
6
+ from collections.abc import AsyncIterator
7
+ from copy import deepcopy
8
+ from typing import Any, cast
9
+
10
+ from openai import APIError, AsyncOpenAI
11
+
12
+ from mycode.messages import ConversationMessage, assistant_message, text_block, thinking_block, tool_use_block
13
+ from mycode.providers.base import (
14
+ DEFAULT_REQUEST_TIMEOUT,
15
+ ProviderAdapter,
16
+ ProviderRequest,
17
+ ProviderStreamEvent,
18
+ dump_model,
19
+ load_document_block_payload,
20
+ load_image_block_payload,
21
+ tool_result_content_blocks,
22
+ )
23
+ from mycode.utils import omit_none, parse_tool_arguments
24
+
25
+
26
+ class OpenAIResponsesAdapter(ProviderAdapter):
27
+ """Adapter for OpenAI's Responses API."""
28
+
29
+ provider_id = "openai"
30
+ label = "OpenAI Responses"
31
+ default_base_url = "https://api.openai.com/v1"
32
+ env_api_key_names = ("OPENAI_API_KEY",)
33
+ default_models = ("gpt-5.4", "gpt-5.4-mini")
34
+ supports_reasoning_effort = True
35
+
36
+ async def stream_turn(self, request: ProviderRequest) -> AsyncIterator[ProviderStreamEvent]:
37
+ api_key = self.require_api_key(request.api_key)
38
+ client = AsyncOpenAI(
39
+ api_key=api_key,
40
+ base_url=self.resolve_base_url(request.api_base),
41
+ timeout=DEFAULT_REQUEST_TIMEOUT,
42
+ )
43
+
44
+ payload = self._build_request_payload(request)
45
+ try:
46
+ stream = await client.responses.create(**payload, stream=True)
47
+ final_response = None
48
+ # Some Responses-compatible endpoints emit correct completed output
49
+ # items during the stream but leave `response.output` empty on the
50
+ # final completed object. Persist the completed items from the
51
+ # stream so the canonical assistant message stays intact.
52
+ streamed_output_items: dict[int, Any] = {}
53
+ async for event in stream:
54
+ event_type = getattr(event, "type", None)
55
+
56
+ if event_type == "response.reasoning_text.delta":
57
+ delta = cast(str | None, getattr(event, "delta", None))
58
+ if delta:
59
+ yield ProviderStreamEvent("thinking_delta", {"text": delta})
60
+ continue
61
+
62
+ if event_type == "response.output_text.delta":
63
+ delta = cast(str | None, getattr(event, "delta", None))
64
+ if delta:
65
+ yield ProviderStreamEvent("text_delta", {"text": delta})
66
+ continue
67
+
68
+ if event_type == "response.output_item.done":
69
+ item = getattr(event, "item", None)
70
+ if item is not None:
71
+ output_index = int(getattr(event, "output_index", 0) or 0)
72
+ streamed_output_items[output_index] = item
73
+ continue
74
+
75
+ if event_type == "error":
76
+ raise ValueError(str(getattr(event, "message", event)))
77
+
78
+ if event_type == "response.failed":
79
+ raise ValueError(str(getattr(event, "response", None) or event))
80
+
81
+ if event_type == "response.completed":
82
+ final_response = getattr(event, "response", None)
83
+ except APIError as exc:
84
+ raise ValueError(str(exc)) from exc
85
+
86
+ if final_response is None:
87
+ raise ValueError("OpenAI Responses stream ended before response.completed")
88
+
89
+ yield ProviderStreamEvent(
90
+ "message_done",
91
+ {
92
+ "message": self._convert_final_response(
93
+ final_response,
94
+ output_items=[streamed_output_items[index] for index in sorted(streamed_output_items)] or None,
95
+ )
96
+ },
97
+ )
98
+
99
+ def _build_request_payload(self, request: ProviderRequest) -> dict[str, Any]:
100
+ prepared_messages = self.prepare_messages(request)
101
+ input_items: list[dict[str, Any]] = []
102
+ for message in prepared_messages:
103
+ role = message.get("role")
104
+ if role == "user":
105
+ input_items.extend(self._serialize_user_message(message))
106
+ continue
107
+
108
+ if role != "assistant":
109
+ continue
110
+
111
+ native_output_items = self._native_output_items(message)
112
+ if native_output_items is not None:
113
+ input_items.extend(native_output_items)
114
+ continue
115
+
116
+ input_items.extend(self._serialize_fallback_assistant_message(message))
117
+
118
+ payload: dict[str, Any] = {
119
+ "model": request.model,
120
+ "input": input_items,
121
+ "instructions": request.system or None,
122
+ "store": False,
123
+ "include": ["reasoning.encrypted_content"],
124
+ "prompt_cache_key": request.session_id or None,
125
+ "max_output_tokens": request.max_tokens,
126
+ "tools": [self._serialize_tool(tool) for tool in request.tools] or None,
127
+ "tool_choice": "auto" if request.tools else None,
128
+ }
129
+ if request.reasoning_effort:
130
+ payload["reasoning"] = {"effort": request.reasoning_effort}
131
+ return omit_none(payload)
132
+
133
+ def _serialize_user_message(self, message: ConversationMessage) -> list[dict[str, Any]]:
134
+ items: list[dict[str, Any]] = []
135
+ blocks = [block for block in message.get("content") or [] if isinstance(block, dict)]
136
+ message_content = self._serialize_input_content(
137
+ [block for block in blocks if block.get("type") in {"text", "image", "document"}]
138
+ )
139
+ if message_content:
140
+ items.append(
141
+ {
142
+ "type": "message",
143
+ "role": "user",
144
+ "content": message_content,
145
+ }
146
+ )
147
+
148
+ for block in blocks:
149
+ if block.get("type") != "tool_result":
150
+ continue
151
+ result_blocks = tool_result_content_blocks(block)
152
+ has_images = any(item.get("type") == "image" for item in result_blocks)
153
+ if has_images:
154
+ output: str | list[dict[str, Any]] = self._serialize_input_content(result_blocks)
155
+ else:
156
+ output = str(block.get("model_text") or "")
157
+ items.append(
158
+ {
159
+ "type": "function_call_output",
160
+ "call_id": block.get("tool_use_id") or "",
161
+ "output": output,
162
+ }
163
+ )
164
+
165
+ return items
166
+
167
+ def _serialize_input_content(self, blocks: list[dict[str, Any]]) -> list[dict[str, Any]]:
168
+ content: list[dict[str, Any]] = []
169
+ for block in blocks:
170
+ block_type = block.get("type")
171
+ if block_type == "text":
172
+ content.append({"type": "input_text", "text": str(block.get("text") or "")})
173
+ continue
174
+ if block_type == "image":
175
+ mime_type, data = load_image_block_payload(block)
176
+ content.append({"type": "input_image", "image_url": f"data:{mime_type};base64,{data}"})
177
+ continue
178
+ if block_type == "document":
179
+ mime_type, data, name = load_document_block_payload(block)
180
+ content.append(
181
+ {
182
+ "type": "input_file",
183
+ "filename": name or "document.pdf",
184
+ "file_data": f"data:{mime_type};base64,{data}",
185
+ }
186
+ )
187
+ return content
188
+
189
+ def _native_output_items(self, message: ConversationMessage) -> list[dict[str, Any]] | None:
190
+ """Replay stored OpenAI output items when history already came from Responses."""
191
+
192
+ raw_meta = message.get("meta")
193
+ if not isinstance(raw_meta, dict) or raw_meta.get("provider") != self.provider_id:
194
+ return None
195
+
196
+ native_meta = raw_meta.get("native")
197
+ output_items = native_meta.get("output_items") if isinstance(native_meta, dict) else None
198
+ if not isinstance(output_items, list) or not output_items:
199
+ return None
200
+
201
+ replay_items: list[dict[str, Any]] = []
202
+ for item in cast(list[dict[str, Any]], deepcopy(output_items)):
203
+ item_type = str(item.get("type") or "")
204
+ item.pop("status", None)
205
+ if item_type != "reasoning":
206
+ item.pop("id", None)
207
+ replay_items.append(item)
208
+
209
+ return replay_items
210
+
211
+ def _serialize_fallback_assistant_message(self, message: ConversationMessage) -> list[dict[str, Any]]:
212
+ blocks = [block for block in message.get("content") or [] if isinstance(block, dict)]
213
+ text_parts = [
214
+ str(block.get("text") or "") for block in blocks if block.get("type") == "text" and block.get("text")
215
+ ]
216
+
217
+ items: list[dict[str, Any]] = []
218
+ if text_parts:
219
+ message_item: dict[str, Any] = {
220
+ "type": "message",
221
+ "role": "assistant",
222
+ "content": [{"type": "output_text", "text": "\n".join(text_parts)}],
223
+ }
224
+ items.append(message_item)
225
+
226
+ for block in blocks:
227
+ if block.get("type") != "tool_use":
228
+ continue
229
+ call_item: dict[str, Any] = {
230
+ "type": "function_call",
231
+ "call_id": block.get("id") or "",
232
+ "name": block.get("name") or "",
233
+ "arguments": json.dumps(block.get("input") if isinstance(block.get("input"), dict) else {}),
234
+ }
235
+ items.append(call_item)
236
+
237
+ return items
238
+
239
+ def _serialize_tool(self, tool: dict[str, Any]) -> dict[str, Any]:
240
+ parameters = cast(dict[str, Any], dict(tool.get("input_schema") or {"type": "object", "properties": {}}))
241
+ properties = parameters.get("properties")
242
+ required = parameters.get("required")
243
+
244
+ # OpenAI strict tools require every top-level property to appear in
245
+ # `required`. Our built-in tool schemas are flat, so optional fields only
246
+ # need a shallow nullable conversion here.
247
+ if isinstance(properties, dict):
248
+ copied_properties: dict[str, Any] = {
249
+ key: dict(value) if isinstance(value, dict) else value for key, value in properties.items()
250
+ }
251
+ required_names = {str(name) for name in required} if isinstance(required, list) else set()
252
+
253
+ for name, property_schema in copied_properties.items():
254
+ if name in required_names or not isinstance(property_schema, dict):
255
+ continue
256
+
257
+ property_type = property_schema.get("type")
258
+ if isinstance(property_type, str):
259
+ property_schema["type"] = [property_type, "null"]
260
+ elif isinstance(property_type, list):
261
+ if "null" not in property_type:
262
+ property_schema["type"] = [*property_type, "null"]
263
+ else:
264
+ copied_properties[name] = {"anyOf": [property_schema, {"type": "null"}]}
265
+ continue
266
+
267
+ enum_values = property_schema.get("enum")
268
+ if isinstance(enum_values, list) and None not in enum_values:
269
+ property_schema["enum"] = [*enum_values, None]
270
+
271
+ parameters["properties"] = copied_properties
272
+ parameters["required"] = list(copied_properties.keys())
273
+
274
+ return {
275
+ "type": "function",
276
+ "name": tool.get("name") or "",
277
+ "description": tool.get("description") or "",
278
+ "parameters": parameters,
279
+ "strict": True,
280
+ }
281
+
282
+ def _convert_final_response(
283
+ self,
284
+ response: Any,
285
+ *,
286
+ output_items: list[Any] | None = None,
287
+ ) -> dict[str, Any]:
288
+ raw_output = output_items if output_items is not None else (getattr(response, "output", None) or [])
289
+ dumped_output_items = dump_model(raw_output)
290
+ blocks: list[dict[str, Any]] = []
291
+ for item in raw_output:
292
+ item_type = getattr(item, "type", None)
293
+
294
+ if item_type == "reasoning":
295
+ text_parts = []
296
+ for content in getattr(item, "content", None) or []:
297
+ text = getattr(content, "text", None)
298
+ if text:
299
+ text_parts.append(text)
300
+
301
+ if not text_parts:
302
+ for summary in getattr(item, "summary", None) or []:
303
+ text = getattr(summary, "text", None)
304
+ if text:
305
+ text_parts.append(text)
306
+
307
+ summary = dump_model(getattr(item, "summary", None))
308
+ item_meta = omit_none(
309
+ {
310
+ "item_id": getattr(item, "id", None),
311
+ "status": getattr(item, "status", None),
312
+ "summary": summary or None,
313
+ }
314
+ )
315
+ blocks.append(
316
+ thinking_block(
317
+ "".join(text_parts),
318
+ meta={"native": item_meta} if item_meta else None,
319
+ )
320
+ )
321
+ continue
322
+
323
+ if item_type == "message":
324
+ for part in getattr(item, "content", []) or []:
325
+ if getattr(part, "type", None) != "output_text":
326
+ continue
327
+ native_meta = {}
328
+ annotations = dump_model(getattr(part, "annotations", None))
329
+ if annotations:
330
+ native_meta["annotations"] = annotations
331
+ blocks.append(
332
+ text_block(
333
+ getattr(part, "text", ""),
334
+ meta={"native": native_meta} if native_meta else None,
335
+ )
336
+ )
337
+ continue
338
+
339
+ if item_type == "function_call":
340
+ raw_arguments = getattr(item, "arguments", "") or ""
341
+ parsed_arguments = parse_tool_arguments(raw_arguments)
342
+ if isinstance(parsed_arguments, str):
343
+ tool_input = {}
344
+ raw_args_entry: dict[str, Any] = {"raw_arguments": raw_arguments}
345
+ else:
346
+ tool_input = parsed_arguments
347
+ raw_args_entry = {}
348
+ item_meta = omit_none(
349
+ {
350
+ "item_id": getattr(item, "id", None),
351
+ "status": getattr(item, "status", None),
352
+ **raw_args_entry,
353
+ }
354
+ )
355
+ blocks.append(
356
+ tool_use_block(
357
+ tool_id=getattr(item, "call_id", ""),
358
+ name=getattr(item, "name", ""),
359
+ input=tool_input,
360
+ meta={"native": item_meta} if item_meta else None,
361
+ )
362
+ )
363
+
364
+ return assistant_message(
365
+ blocks,
366
+ provider=self.provider_id,
367
+ model=getattr(response, "model", None),
368
+ provider_message_id=getattr(response, "id", None),
369
+ stop_reason=getattr(response, "status", None),
370
+ usage=dump_model(getattr(response, "usage", None)),
371
+ native_meta={"output_items": dumped_output_items} if dumped_output_items else None,
372
+ )
mycode/py.typed ADDED
File without changes