mycode-sdk 0.4.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,86 @@
1
+ """Provider adapter registry and public import surface."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from mycode.providers.anthropic_like import AnthropicAdapter, MiniMaxAdapter, MoonshotAIAdapter
6
+ from mycode.providers.base import ProviderAdapter
7
+ from mycode.providers.gemini import GoogleGeminiAdapter
8
+ from mycode.providers.openai_chat import DeepSeekAdapter, OpenAIChatAdapter, OpenRouterAdapter, ZAIAdapter
9
+ from mycode.providers.openai_responses import OpenAIResponsesAdapter
10
+
11
+ _PROVIDERS: dict[str, ProviderAdapter] = {
12
+ adapter.provider_id: adapter
13
+ for adapter in (
14
+ AnthropicAdapter(),
15
+ OpenAIResponsesAdapter(),
16
+ GoogleGeminiAdapter(),
17
+ DeepSeekAdapter(),
18
+ ZAIAdapter(),
19
+ MoonshotAIAdapter(),
20
+ MiniMaxAdapter(),
21
+ OpenRouterAdapter(),
22
+ OpenAIChatAdapter(),
23
+ )
24
+ }
25
+
26
+
27
+ def list_supported_providers() -> list[str]:
28
+ """Return all built-in provider ids."""
29
+
30
+ return sorted(_PROVIDERS)
31
+
32
+
33
+ def list_env_discoverable_providers() -> list[str]:
34
+ """Return provider ids that can be discovered from env vars alone."""
35
+
36
+ return [provider_id for provider_id, adapter in _PROVIDERS.items() if adapter.auto_discoverable]
37
+
38
+
39
+ def is_supported_provider(provider_name: str | None) -> bool:
40
+ """Return whether the given provider id is registered."""
41
+
42
+ return bool(provider_name and provider_name in _PROVIDERS)
43
+
44
+
45
+ def get_provider_adapter(provider_name: str) -> ProviderAdapter:
46
+ try:
47
+ return _PROVIDERS[provider_name]
48
+ except KeyError as exc:
49
+ supported = ", ".join(list_supported_providers())
50
+ raise ValueError(f"unsupported provider {provider_name!r}; supported: {supported}") from exc
51
+
52
+
53
+ def provider_env_api_key_names(provider_name: str | None) -> tuple[str, ...]:
54
+ adapter = _PROVIDERS.get(provider_name) if provider_name else None
55
+ return adapter.env_api_key_names if adapter else ()
56
+
57
+
58
+ def provider_api_key_from_env(provider_name: str | None) -> str | None:
59
+ adapter = _PROVIDERS.get(provider_name) if provider_name else None
60
+ return adapter.api_key_from_env() if adapter else None
61
+
62
+
63
+ def provider_default_models(provider_name: str | None) -> tuple[str, ...]:
64
+ adapter = _PROVIDERS.get(provider_name) if provider_name else None
65
+ return adapter.default_models if adapter else ()
66
+
67
+
68
+ __all__ = [
69
+ "AnthropicAdapter",
70
+ "ProviderAdapter",
71
+ "GoogleGeminiAdapter",
72
+ "MiniMaxAdapter",
73
+ "MoonshotAIAdapter",
74
+ "DeepSeekAdapter",
75
+ "OpenAIChatAdapter",
76
+ "OpenAIResponsesAdapter",
77
+ "OpenRouterAdapter",
78
+ "ZAIAdapter",
79
+ "get_provider_adapter",
80
+ "list_env_discoverable_providers",
81
+ "is_supported_provider",
82
+ "list_supported_providers",
83
+ "provider_api_key_from_env",
84
+ "provider_default_models",
85
+ "provider_env_api_key_names",
86
+ ]
@@ -0,0 +1,387 @@
1
+ """Anthropic Messages adapters built on the official Anthropic Python SDK."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import hashlib
6
+ from collections.abc import AsyncIterator
7
+ from typing import Any, cast
8
+
9
+ from anthropic import APIError, AsyncAnthropic
10
+
11
+ from mycode.messages import (
12
+ ConversationMessage,
13
+ assistant_message,
14
+ text_block,
15
+ thinking_block,
16
+ tool_use_block,
17
+ )
18
+ from mycode.providers.base import (
19
+ DEFAULT_REQUEST_TIMEOUT,
20
+ ProviderAdapter,
21
+ ProviderRequest,
22
+ ProviderStreamEvent,
23
+ dump_model,
24
+ get_native_meta,
25
+ load_document_block_payload,
26
+ load_image_block_payload,
27
+ tool_result_content_blocks,
28
+ )
29
+
30
+ # Maps reasoning_effort values to extended thinking budget_tokens.
31
+ _THINKING_BUDGETS: dict[str, int] = {"low": 2048, "medium": 8192, "high": 24576, "xhigh": 32768}
32
+
33
+
34
+ class AnthropicLikeAdapter(ProviderAdapter):
35
+ """Shared Messages adapter for Anthropic-compatible providers.
36
+
37
+ Anthropic, Moonshot, and MiniMax all document agent usage around the
38
+ Anthropic Messages protocol. The differences we care about are limited to:
39
+
40
+ - default base URL
41
+ - API key env var names
42
+ - optional thinking defaults
43
+ - provider-native metadata carried in content blocks
44
+
45
+ MiniMax requires the full assistant content (all blocks) to be sent on
46
+ multi-turn tool-loop requests — not just the text portion.
47
+ """
48
+
49
+ def thinking_config(self, request: ProviderRequest) -> dict[str, Any] | None:
50
+ del request
51
+ return None
52
+
53
+ def output_config(self, request: ProviderRequest) -> dict[str, Any] | None:
54
+ del request
55
+ return None
56
+
57
+ def manual_thinking_config(self, effort: str | None) -> dict[str, Any] | None:
58
+ if not effort:
59
+ return None
60
+ if effort == "none":
61
+ return {"type": "disabled"}
62
+ budget = _THINKING_BUDGETS.get(effort)
63
+ return {"type": "enabled", "budget_tokens": budget} if budget else None
64
+
65
+ def _build_request_payload(self, request: ProviderRequest) -> dict[str, Any]:
66
+ messages = [self._serialize_message(message) for message in self.prepare_messages(request)]
67
+ self._apply_cache_control(messages)
68
+
69
+ payload: dict[str, Any] = {
70
+ "model": request.model,
71
+ "max_tokens": request.max_tokens,
72
+ "messages": messages,
73
+ }
74
+ if request.system:
75
+ payload["system"] = [
76
+ {
77
+ "type": "text",
78
+ "text": request.system,
79
+ "cache_control": {"type": "ephemeral"},
80
+ }
81
+ ]
82
+ if request.tools:
83
+ payload["tools"] = [self._serialize_tool(tool) for tool in request.tools]
84
+ payload["tool_choice"] = {"type": "auto"}
85
+ thinking = self.thinking_config(request)
86
+ if thinking is not None:
87
+ payload["thinking"] = thinking
88
+ output_config = self.output_config(request)
89
+ if output_config is not None:
90
+ payload["output_config"] = output_config
91
+ return payload
92
+
93
+ def project_tool_call_id(self, tool_call_id: str, used_tool_call_ids: set[str]) -> str:
94
+ """Return a short ASCII ID without introducing collisions.
95
+
96
+ Anthropic-compatible endpoints only accept IDs containing letters,
97
+ numbers, underscores, and dashes. When projection changes the original
98
+ ID, append a short hash so distinct canonical IDs stay distinct.
99
+ """
100
+
101
+ safe_id = "".join(char if char.isalnum() or char in "_-" else "_" for char in tool_call_id)
102
+ if safe_id == tool_call_id and len(safe_id) <= 64 and safe_id not in used_tool_call_ids:
103
+ return safe_id
104
+
105
+ prefix = (safe_id or "tool")[:55]
106
+ digest = hashlib.sha1(tool_call_id.encode("utf-8")).hexdigest()[:8]
107
+ candidate = f"{prefix}_{digest}"
108
+ if candidate not in used_tool_call_ids:
109
+ return candidate
110
+
111
+ counter = 2
112
+ while True:
113
+ suffix = f"_{digest}_{counter}"
114
+ candidate = f"{(safe_id or 'tool')[: 64 - len(suffix)]}{suffix}"
115
+ if candidate not in used_tool_call_ids:
116
+ return candidate
117
+ counter += 1
118
+
119
+ def _apply_cache_control(self, messages: list[dict[str, Any]]) -> None:
120
+ """Mark the last replayed user content block as ephemeral."""
121
+
122
+ for message in reversed(messages):
123
+ if message.get("role") != "user":
124
+ continue
125
+
126
+ content = message.get("content")
127
+ if not isinstance(content, list):
128
+ return
129
+
130
+ blocks: list[dict[str, Any]] = [cast(dict[str, Any], block) for block in content if isinstance(block, dict)]
131
+ for block in reversed(blocks):
132
+ block_type = str(block.get("type") or "")
133
+ if block_type not in {"text", "image", "document", "tool_result"}:
134
+ continue
135
+
136
+ block["cache_control"] = {"type": "ephemeral"}
137
+ return
138
+
139
+ return
140
+
141
+ async def stream_turn(self, request: ProviderRequest) -> AsyncIterator[ProviderStreamEvent]:
142
+ api_key = self.require_api_key(request.api_key)
143
+ client = AsyncAnthropic(
144
+ api_key=api_key,
145
+ base_url=self.resolve_base_url(request.api_base),
146
+ timeout=DEFAULT_REQUEST_TIMEOUT,
147
+ )
148
+
149
+ try:
150
+ async with client.messages.stream(**self._build_request_payload(request)) as stream:
151
+ async for event in stream:
152
+ event_type = getattr(event, "type", None)
153
+ if event_type == "thinking":
154
+ thinking = cast(str | None, getattr(event, "thinking", None))
155
+ if thinking:
156
+ yield ProviderStreamEvent("thinking_delta", {"text": thinking})
157
+ continue
158
+ if event_type == "text":
159
+ text = cast(str | None, getattr(event, "text", None))
160
+ if text:
161
+ yield ProviderStreamEvent("text_delta", {"text": text})
162
+
163
+ final_message = await stream.get_final_message()
164
+ except APIError as exc:
165
+ raise ValueError(str(exc)) from exc
166
+
167
+ yield ProviderStreamEvent(
168
+ "message_done",
169
+ {
170
+ "message": self._convert_final_message(final_message),
171
+ },
172
+ )
173
+
174
+ def _convert_final_message(self, message: Any) -> ConversationMessage:
175
+ blocks = []
176
+ for block in getattr(message, "content", []) or []:
177
+ block_type = getattr(block, "type", None)
178
+
179
+ if block_type == "thinking":
180
+ native_meta = {}
181
+ signature = getattr(block, "signature", None)
182
+ if signature:
183
+ native_meta["signature"] = signature
184
+ blocks.append(
185
+ thinking_block(
186
+ getattr(block, "thinking", ""),
187
+ meta={"native": native_meta} if native_meta else None,
188
+ )
189
+ )
190
+ continue
191
+
192
+ if block_type == "text":
193
+ native_meta = {}
194
+ citations = getattr(block, "citations", None)
195
+ if citations:
196
+ native_meta["citations"] = dump_model(citations)
197
+ blocks.append(
198
+ text_block(getattr(block, "text", ""), meta={"native": native_meta} if native_meta else None)
199
+ )
200
+ continue
201
+
202
+ if block_type == "tool_use":
203
+ native_meta = {}
204
+ caller = getattr(block, "caller", None)
205
+ if caller is not None:
206
+ native_meta["caller"] = caller
207
+ blocks.append(
208
+ tool_use_block(
209
+ tool_id=getattr(block, "id", ""),
210
+ name=getattr(block, "name", ""),
211
+ input=getattr(block, "input", None),
212
+ meta={"native": native_meta} if native_meta else None,
213
+ )
214
+ )
215
+ continue
216
+
217
+ native_meta: dict[str, Any] = {}
218
+ if stop_sequence := getattr(message, "stop_sequence", None):
219
+ native_meta["stop_sequence"] = stop_sequence
220
+ if service_tier := getattr(message, "service_tier", None):
221
+ native_meta["service_tier"] = service_tier
222
+ return assistant_message(
223
+ blocks,
224
+ provider=self.provider_id,
225
+ model=getattr(message, "model", None),
226
+ provider_message_id=getattr(message, "id", None),
227
+ stop_reason=getattr(message, "stop_reason", None),
228
+ usage=dump_model(getattr(message, "usage", None)),
229
+ native_meta=native_meta,
230
+ )
231
+
232
+ def _serialize_tool(self, tool: dict[str, Any]) -> dict[str, Any]:
233
+ return {
234
+ "name": tool.get("name") or "",
235
+ "description": tool.get("description") or "",
236
+ "input_schema": tool.get("input_schema") or {"type": "object", "properties": {}},
237
+ }
238
+
239
+ def _serialize_message(self, message: ConversationMessage) -> dict[str, Any]:
240
+ return {
241
+ "role": str(message.get("role") or "user"),
242
+ "content": [
243
+ self._serialize_block(block) for block in message.get("content") or [] if isinstance(block, dict)
244
+ ],
245
+ }
246
+
247
+ def _serialize_block(self, block: dict[str, Any]) -> dict[str, Any]:
248
+ block_type = block.get("type")
249
+
250
+ if block_type == "text":
251
+ return {"type": "text", "text": str(block.get("text") or "")}
252
+
253
+ if block_type == "thinking":
254
+ native_meta = get_native_meta(block)
255
+ payload: dict[str, Any] = {
256
+ "type": "thinking",
257
+ "thinking": str(block.get("text") or ""),
258
+ }
259
+ if native_meta.get("signature"):
260
+ payload["signature"] = native_meta["signature"]
261
+ return payload
262
+
263
+ if block_type == "tool_use":
264
+ native_meta = get_native_meta(block)
265
+ payload = {
266
+ "type": "tool_use",
267
+ "id": block.get("id"),
268
+ "name": block.get("name"),
269
+ "input": block.get("input") if isinstance(block.get("input"), dict) else {},
270
+ }
271
+ if native_meta.get("caller") is not None:
272
+ payload["caller"] = native_meta["caller"]
273
+ return payload
274
+
275
+ if block_type == "image":
276
+ mime_type, data = load_image_block_payload(block)
277
+ return {
278
+ "type": "image",
279
+ "source": {"type": "base64", "media_type": mime_type, "data": data},
280
+ }
281
+
282
+ if block_type == "document":
283
+ mime_type, data, _name = load_document_block_payload(block)
284
+ return {
285
+ "type": "document",
286
+ "source": {"type": "base64", "media_type": mime_type, "data": data},
287
+ }
288
+
289
+ if block_type == "tool_result":
290
+ content_blocks = []
291
+ for item in tool_result_content_blocks(block):
292
+ if item.get("type") == "text":
293
+ content_blocks.append({"type": "text", "text": str(item.get("text") or "")})
294
+ continue
295
+ if item.get("type") == "image":
296
+ mime_type, data = load_image_block_payload(item)
297
+ content_blocks.append(
298
+ {"type": "image", "source": {"type": "base64", "media_type": mime_type, "data": data}}
299
+ )
300
+ return {
301
+ "type": "tool_result",
302
+ "tool_use_id": block.get("tool_use_id"),
303
+ "content": content_blocks or str(block.get("model_text") or ""),
304
+ "is_error": bool(block.get("is_error")),
305
+ }
306
+
307
+ return dict(block)
308
+
309
+
310
+ class AnthropicAdapter(AnthropicLikeAdapter):
311
+ provider_id = "anthropic"
312
+ label = "Anthropic"
313
+ default_base_url = "https://api.anthropic.com"
314
+ env_api_key_names = ("ANTHROPIC_API_KEY", "ANTHROPIC_AUTH_TOKEN")
315
+ default_models = ("claude-sonnet-4-6", "claude-opus-4-7")
316
+ supports_reasoning_effort = True
317
+
318
+ def thinking_config(self, request: ProviderRequest) -> dict[str, Any] | None:
319
+ effort = request.reasoning_effort
320
+ if not effort:
321
+ return None
322
+ if effort == "none":
323
+ return {"type": "disabled"}
324
+ normalized = request.model.lower()
325
+ if normalized.startswith(("claude-opus-4-7", "claude-opus-4-6", "claude-sonnet-4-6")):
326
+ thinking: dict[str, Any] = {"type": "adaptive"}
327
+ if normalized.startswith("claude-opus-4-7"):
328
+ thinking["display"] = "summarized"
329
+ return thinking
330
+ return self.manual_thinking_config(effort)
331
+
332
+ def output_config(self, request: ProviderRequest) -> dict[str, Any] | None:
333
+ effort = request.reasoning_effort
334
+ if not effort or effort == "none":
335
+ return None
336
+
337
+ normalized = request.model.lower()
338
+ if normalized.startswith("claude-opus-4-7"):
339
+ return {"effort": effort}
340
+
341
+ if normalized.startswith("claude-sonnet-4-6"):
342
+ mapped_effort = "high" if effort == "xhigh" else effort
343
+ return {"effort": mapped_effort}
344
+
345
+ if normalized.startswith("claude-opus-4-6"):
346
+ mapped_effort = "max" if effort == "xhigh" else effort
347
+ return {"effort": mapped_effort}
348
+
349
+ return None
350
+
351
+
352
+ class MoonshotAIAdapter(AnthropicLikeAdapter):
353
+ """Moonshot's Anthropic-compatible Messages endpoint.
354
+
355
+ kimi-k2.5 tool loops work through this endpoint. When thinking is enabled,
356
+ prior reasoning blocks must be replayed in the conversation history —
357
+ Moonshot does not strip them on the server side.
358
+ """
359
+
360
+ provider_id = "moonshotai"
361
+ label = "Moonshot"
362
+ default_base_url = "https://api.moonshot.ai/anthropic"
363
+ env_api_key_names = ("MOONSHOT_API_KEY",)
364
+ default_models = ("kimi-k2.5",)
365
+ supports_reasoning_effort = True
366
+
367
+ def thinking_config(self, request: ProviderRequest) -> dict[str, Any] | None:
368
+ return self.manual_thinking_config(request.reasoning_effort)
369
+
370
+
371
+ class MiniMaxAdapter(AnthropicLikeAdapter):
372
+ """MiniMax's Anthropic-compatible Messages endpoint.
373
+
374
+ MiniMax reasoning models emit thinking signatures on this endpoint;
375
+ signatures are preserved in block.meta.native and replayed via
376
+ _serialize_block so multi-turn tool loops stay valid.
377
+ """
378
+
379
+ provider_id = "minimax"
380
+ label = "MiniMax"
381
+ default_base_url = "https://api.minimax.io/anthropic"
382
+ env_api_key_names = ("MINIMAX_API_KEY",)
383
+ default_models = ("MiniMax-M2.7", "MiniMax-M2.7-highspeed")
384
+ supports_reasoning_effort = True
385
+
386
+ def thinking_config(self, request: ProviderRequest) -> dict[str, Any] | None:
387
+ return self.manual_thinking_config(request.reasoning_effort)