coding-proxy 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (77) hide show
  1. coding/__init__.py +0 -0
  2. coding/proxy/__init__.py +3 -0
  3. coding/proxy/__main__.py +5 -0
  4. coding/proxy/auth/__init__.py +13 -0
  5. coding/proxy/auth/providers/__init__.py +6 -0
  6. coding/proxy/auth/providers/base.py +35 -0
  7. coding/proxy/auth/providers/github.py +133 -0
  8. coding/proxy/auth/providers/google.py +237 -0
  9. coding/proxy/auth/runtime.py +122 -0
  10. coding/proxy/auth/store.py +74 -0
  11. coding/proxy/cli/__init__.py +151 -0
  12. coding/proxy/cli/auth_commands.py +224 -0
  13. coding/proxy/compat/__init__.py +30 -0
  14. coding/proxy/compat/canonical.py +193 -0
  15. coding/proxy/compat/session_store.py +137 -0
  16. coding/proxy/config/__init__.py +6 -0
  17. coding/proxy/config/auth_schema.py +24 -0
  18. coding/proxy/config/loader.py +139 -0
  19. coding/proxy/config/resiliency.py +46 -0
  20. coding/proxy/config/routing.py +279 -0
  21. coding/proxy/config/schema.py +280 -0
  22. coding/proxy/config/server.py +23 -0
  23. coding/proxy/config/vendors.py +53 -0
  24. coding/proxy/convert/__init__.py +14 -0
  25. coding/proxy/convert/anthropic_to_gemini.py +352 -0
  26. coding/proxy/convert/anthropic_to_openai.py +352 -0
  27. coding/proxy/convert/gemini_sse_adapter.py +169 -0
  28. coding/proxy/convert/gemini_to_anthropic.py +98 -0
  29. coding/proxy/convert/openai_to_anthropic.py +88 -0
  30. coding/proxy/logging/__init__.py +49 -0
  31. coding/proxy/logging/db.py +308 -0
  32. coding/proxy/logging/stats.py +129 -0
  33. coding/proxy/model/__init__.py +93 -0
  34. coding/proxy/model/auth.py +32 -0
  35. coding/proxy/model/compat.py +153 -0
  36. coding/proxy/model/constants.py +21 -0
  37. coding/proxy/model/pricing.py +70 -0
  38. coding/proxy/model/token.py +64 -0
  39. coding/proxy/model/vendor.py +218 -0
  40. coding/proxy/pricing.py +100 -0
  41. coding/proxy/routing/__init__.py +47 -0
  42. coding/proxy/routing/circuit_breaker.py +152 -0
  43. coding/proxy/routing/error_classifier.py +67 -0
  44. coding/proxy/routing/executor.py +453 -0
  45. coding/proxy/routing/model_mapper.py +90 -0
  46. coding/proxy/routing/quota_guard.py +169 -0
  47. coding/proxy/routing/rate_limit.py +159 -0
  48. coding/proxy/routing/retry.py +82 -0
  49. coding/proxy/routing/router.py +84 -0
  50. coding/proxy/routing/session_manager.py +62 -0
  51. coding/proxy/routing/tier.py +171 -0
  52. coding/proxy/routing/usage_parser.py +193 -0
  53. coding/proxy/routing/usage_recorder.py +131 -0
  54. coding/proxy/server/__init__.py +1 -0
  55. coding/proxy/server/app.py +142 -0
  56. coding/proxy/server/factory.py +175 -0
  57. coding/proxy/server/request_normalizer.py +139 -0
  58. coding/proxy/server/responses.py +74 -0
  59. coding/proxy/server/routes.py +264 -0
  60. coding/proxy/streaming/__init__.py +1 -0
  61. coding/proxy/streaming/anthropic_compat.py +484 -0
  62. coding/proxy/vendors/__init__.py +29 -0
  63. coding/proxy/vendors/anthropic.py +44 -0
  64. coding/proxy/vendors/antigravity.py +328 -0
  65. coding/proxy/vendors/base.py +353 -0
  66. coding/proxy/vendors/copilot.py +702 -0
  67. coding/proxy/vendors/copilot_models.py +438 -0
  68. coding/proxy/vendors/copilot_token_manager.py +167 -0
  69. coding/proxy/vendors/copilot_urls.py +16 -0
  70. coding/proxy/vendors/mixins.py +71 -0
  71. coding/proxy/vendors/token_manager.py +128 -0
  72. coding/proxy/vendors/zhipu.py +243 -0
  73. coding_proxy-0.1.0.dist-info/METADATA +184 -0
  74. coding_proxy-0.1.0.dist-info/RECORD +77 -0
  75. coding_proxy-0.1.0.dist-info/WHEEL +4 -0
  76. coding_proxy-0.1.0.dist-info/entry_points.txt +2 -0
  77. coding_proxy-0.1.0.dist-info/licenses/LICENSE +201 -0
@@ -0,0 +1,352 @@
1
+ """Anthropic Messages API 请求 → Google Gemini 格式转换."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import logging
6
+ from dataclasses import dataclass, field
7
+ from typing import Any
8
+
9
+ logger = logging.getLogger(__name__)
10
+
11
+ _ROLE_MAP = {"assistant": "model", "user": "user"}
12
+ _SEARCH_TOOL_NAMES = {
13
+ "web_search",
14
+ "google_search",
15
+ "web_search_20250305",
16
+ "google_search_retrieval",
17
+ "builtin_web_search",
18
+ }
19
+ _TOOL_CHOICE_MODE = {
20
+ "auto": "AUTO",
21
+ "any": "ANY",
22
+ "required": "ANY",
23
+ "none": "NONE",
24
+ }
25
+
26
+ # 默认安全设置:编码场景宽松策略(可通过 AntigravityConfig.safety_settings 覆盖)
27
+ _DEFAULT_SAFETY_SETTINGS: list[dict[str, str]] = [
28
+ {"category": "HARM_CATEGORY_HARASSMENT", "threshold": "BLOCK_NONE"},
29
+ {"category": "HARM_CATEGORY_HATE_SPEECH", "threshold": "BLOCK_NONE"},
30
+ {"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT", "threshold": "BLOCK_NONE"},
31
+ {"category": "HARM_CATEGORY_DANGEROUS_CONTENT", "threshold": "BLOCK_ONLY_HIGH"},
32
+ ]
33
+
34
+
35
+ @dataclass
36
+ class ConversionResult:
37
+ """转换结果与适配诊断."""
38
+
39
+ body: dict[str, Any]
40
+ adaptations: list[str] = field(default_factory=list)
41
+
42
+
43
+ def convert_request(
44
+ anthropic_body: dict[str, Any],
45
+ *,
46
+ model: str | None = None,
47
+ safety_settings: dict[str, str] | None = None,
48
+ ) -> ConversionResult:
49
+ """将 Anthropic Messages API 请求体转换为 Gemini 格式."""
50
+ adaptations: list[str] = []
51
+ tool_name_by_id: dict[str, str] = {}
52
+ result: dict[str, Any] = {}
53
+
54
+ system_instruction = _convert_system(anthropic_body.get("system"), adaptations)
55
+ if system_instruction is not None:
56
+ result["systemInstruction"] = system_instruction
57
+
58
+ messages = anthropic_body.get("messages", [])
59
+ result["contents"] = _convert_messages(messages, tool_name_by_id, adaptations)
60
+
61
+ # 空 contents 防护:Gemini 要求至少一个 content part
62
+ if not result["contents"]:
63
+ result["contents"] = [{"role": "user", "parts": [{"text": " "}]}]
64
+ adaptations.append("empty_contents_padded")
65
+
66
+ generation_config = _build_generation_config(anthropic_body, model=model, adaptations=adaptations)
67
+ if generation_config:
68
+ result["generationConfig"] = generation_config
69
+
70
+ tools, tool_config = _build_tools(anthropic_body, adaptations)
71
+ if tools:
72
+ result["tools"] = tools
73
+ if tool_config:
74
+ result["toolConfig"] = tool_config
75
+
76
+ # Safety Settings(默认编码场景宽松策略,可通过 config 覆盖)
77
+ if safety_settings is not None:
78
+ result["safetySettings"] = [
79
+ {"category": k, "threshold": v} for k, v in safety_settings.items()
80
+ ]
81
+ else:
82
+ result["safetySettings"] = _DEFAULT_SAFETY_SETTINGS
83
+
84
+ if "metadata" in anthropic_body:
85
+ metadata = anthropic_body.get("metadata") or {}
86
+ if isinstance(metadata, dict) and metadata.get("user_id"):
87
+ adaptations.append("metadata_user_id_not_forwarded")
88
+ else:
89
+ adaptations.append("metadata_ignored")
90
+
91
+ deduped = _dedupe(adaptations)
92
+ logger.debug(
93
+ "Anthropic→Gemini 转换完成: adaptations=%s, keys=%s",
94
+ deduped,
95
+ list(result.keys()),
96
+ )
97
+ return ConversionResult(body=result, adaptations=deduped)
98
+
99
+
100
+ def _dedupe(items: list[str]) -> list[str]:
101
+ return list(dict.fromkeys(items))
102
+
103
+
104
+ def _convert_system(
105
+ system: str | list[dict] | None,
106
+ adaptations: list[str] | None = None,
107
+ ) -> dict[str, Any] | None:
108
+ if system is None:
109
+ return None
110
+ if isinstance(system, str):
111
+ return {"parts": [{"text": system}]}
112
+ parts = []
113
+ for block in system:
114
+ if isinstance(block, dict) and block.get("type") == "text":
115
+ if block.get("cache_control"):
116
+ if adaptations is not None:
117
+ adaptations.append("cache_control_stripped_from_system")
118
+ parts.append({"text": block["text"]})
119
+ return {"parts": parts} if parts else None
120
+
121
+
122
+ def _convert_messages(
123
+ messages: list[dict[str, Any]],
124
+ tool_name_by_id: dict[str, str],
125
+ adaptations: list[str],
126
+ ) -> list[dict[str, Any]]:
127
+ contents: list[dict[str, Any]] = []
128
+ for msg in messages:
129
+ role = _ROLE_MAP.get(msg.get("role", "user"), "user")
130
+ parts = _convert_content(msg.get("content", ""), tool_name_by_id, adaptations)
131
+ if parts:
132
+ contents.append({"role": role, "parts": parts})
133
+ return contents
134
+
135
+
136
+ def _convert_content(
137
+ content: str | list[dict[str, Any]],
138
+ tool_name_by_id: dict[str, str],
139
+ adaptations: list[str],
140
+ ) -> list[dict[str, Any]]:
141
+ if isinstance(content, str):
142
+ return [{"text": content}] if content else []
143
+
144
+ parts: list[dict[str, Any]] = []
145
+ for block in content:
146
+ block_type = block.get("type", "")
147
+ if block_type == "text":
148
+ text = block.get("text", "")
149
+ if text:
150
+ if block.get("cache_control"):
151
+ adaptations.append("cache_control_stripped_from_content")
152
+ parts.append({"text": text})
153
+ elif block_type == "thinking":
154
+ text = block.get("thinking", "")
155
+ if text:
156
+ part: dict[str, Any] = {"text": text, "thought": True}
157
+ signature = block.get("signature")
158
+ if signature:
159
+ part["thoughtSignature"] = signature
160
+ else:
161
+ adaptations.append("thinking_signature_missing")
162
+ parts.append(part)
163
+ elif block_type == "redacted_thinking":
164
+ data = block.get("data", "")
165
+ if data:
166
+ parts.append({"text": f"[Redacted Thinking: {data}]", "thought": True})
167
+ adaptations.append("redacted_thinking_downgraded")
168
+ elif block_type == "image":
169
+ source = block.get("source", {})
170
+ if source.get("type") == "base64":
171
+ parts.append({
172
+ "inlineData": {
173
+ "mimeType": source.get("media_type", "image/png"),
174
+ "data": source.get("data", ""),
175
+ }
176
+ })
177
+ elif block_type == "tool_use":
178
+ name = block.get("name", "")
179
+ tool_id = block.get("id", "")
180
+ if tool_id and name:
181
+ tool_name_by_id[tool_id] = name
182
+ part = {
183
+ "functionCall": {
184
+ "name": name,
185
+ "args": block.get("input", {}),
186
+ "id": tool_id or None,
187
+ }
188
+ }
189
+ signature = block.get("signature")
190
+ if signature:
191
+ part["thoughtSignature"] = signature
192
+ parts.append(part)
193
+ elif block_type == "tool_result":
194
+ tool_use_id = block.get("tool_use_id", "")
195
+ tool_content = block.get("content", "")
196
+ text = _stringify_tool_content(tool_content)
197
+ parts.append({
198
+ "functionResponse": {
199
+ "name": tool_name_by_id.get(tool_use_id, tool_use_id),
200
+ "response": {"result": text},
201
+ "id": tool_use_id or None,
202
+ }
203
+ })
204
+ if tool_use_id and tool_use_id not in tool_name_by_id:
205
+ adaptations.append("tool_result_name_fallback_to_tool_use_id")
206
+ else:
207
+ logger.debug("跳过不支持的内容块类型: %s", block_type)
208
+ return parts
209
+
210
+
211
+ def _stringify_tool_content(content: Any) -> str:
212
+ if isinstance(content, str):
213
+ return content
214
+ if isinstance(content, list):
215
+ chunks: list[str] = []
216
+ for block in content:
217
+ if not isinstance(block, dict):
218
+ continue
219
+ if block.get("type") == "text" and isinstance(block.get("text"), str):
220
+ chunks.append(block["text"])
221
+ elif block.get("type") == "image":
222
+ chunks.append("[image]")
223
+ logger.debug(
224
+ "tool_result 中的图片内容降级为 [image] 占位符",
225
+ )
226
+ return "\n".join(chunks)
227
+ return str(content)
228
+
229
+
230
+ def _build_generation_config(
231
+ body: dict[str, Any],
232
+ *,
233
+ model: str | None,
234
+ adaptations: list[str],
235
+ ) -> dict[str, Any]:
236
+ config: dict[str, Any] = {}
237
+
238
+ if "max_tokens" in body:
239
+ config["maxOutputTokens"] = body["max_tokens"]
240
+ if "temperature" in body:
241
+ config["temperature"] = body["temperature"]
242
+ if "top_p" in body:
243
+ config["topP"] = body["top_p"]
244
+ if "top_k" in body:
245
+ config["topK"] = body["top_k"]
246
+ if "stop_sequences" in body:
247
+ config["stopSequences"] = body["stop_sequences"]
248
+
249
+ thinking_cfg = body.get("thinking") or body.get("extended_thinking")
250
+ if thinking_cfg:
251
+ config["thinkingConfig"] = {
252
+ "includeThoughts": True,
253
+ }
254
+ if isinstance(thinking_cfg, dict):
255
+ budget = thinking_cfg.get("budget_tokens")
256
+ if isinstance(budget, int) and budget > 0:
257
+ config["thinkingConfig"]["thinkingBudget"] = budget
258
+ else:
259
+ # Gemini 要求 includeThoughts 时必须指定 thinkingBudget
260
+ config["thinkingConfig"]["thinkingBudget"] = 10000
261
+ adaptations.append("thinking_budget_defaulted_to_10k")
262
+ effort = thinking_cfg.get("effort")
263
+ if isinstance(effort, str) and effort:
264
+ config["thinkingConfig"]["thinkingLevel"] = effort
265
+
266
+ # Anthropic response_format → Gemini responseMimeType
267
+ response_format = body.get("response_format")
268
+ if isinstance(response_format, dict):
269
+ rf_type = str(response_format.get("type", ""))
270
+ if rf_type.startswith("json"):
271
+ config["responseMimeType"] = "application/json"
272
+ adaptations.append("response_format_json_mode")
273
+
274
+ has_tools = bool(body.get("tools"))
275
+ has_tool_use = any(
276
+ isinstance(msg.get("content"), list)
277
+ and any(
278
+ isinstance(block, dict) and block.get("type") == "tool_use"
279
+ for block in msg["content"]
280
+ )
281
+ for msg in body.get("messages", [])
282
+ )
283
+ if config.get("thinkingConfig") and has_tools and has_tool_use and model and not model.startswith("gemini-"):
284
+ del config["thinkingConfig"]
285
+ adaptations.append("thinking_disabled_for_tool_call_compatibility")
286
+
287
+ return config
288
+
289
+
290
+ def _build_tools(
291
+ body: dict[str, Any],
292
+ adaptations: list[str],
293
+ ) -> tuple[list[dict[str, Any]], dict[str, Any] | None]:
294
+ source_tools = body.get("tools") or []
295
+ if not source_tools:
296
+ if body.get("tool_choice"):
297
+ adaptations.append("tool_choice_ignored_without_tools")
298
+ return [], None
299
+
300
+ function_declarations: list[dict[str, Any]] = []
301
+ include_search = False
302
+ for tool in source_tools:
303
+ if not isinstance(tool, dict):
304
+ continue
305
+ tool_name = str(tool.get("name") or tool.get("type") or "")
306
+ if tool_name in _SEARCH_TOOL_NAMES:
307
+ include_search = True
308
+ adaptations.append("search_tool_mapped_to_google_search")
309
+ continue
310
+ declaration: dict[str, Any] = {"name": tool_name}
311
+ description = tool.get("description")
312
+ if isinstance(description, str) and description:
313
+ declaration["description"] = description
314
+ input_schema = tool.get("input_schema")
315
+ if isinstance(input_schema, dict):
316
+ declaration["parameters"] = input_schema
317
+ function_declarations.append(declaration)
318
+
319
+ if len(function_declarations) > 100:
320
+ logger.warning(
321
+ "Large tool set (%d functionDeclarations) may exceed Gemini API limits",
322
+ len(function_declarations),
323
+ )
324
+ adaptations.append(f"large_tool_set_{len(function_declarations)}_declarations")
325
+
326
+ tools: list[dict[str, Any]] = []
327
+ if function_declarations:
328
+ tools.append({"functionDeclarations": function_declarations})
329
+ if include_search:
330
+ tools.append({"googleSearch": {}})
331
+
332
+ tool_config: dict[str, Any] | None = None
333
+ tool_choice = body.get("tool_choice")
334
+ if tool_choice and function_declarations:
335
+ mode = "AUTO"
336
+ allowed_names: list[str] | None = None
337
+ if isinstance(tool_choice, str):
338
+ mode = _TOOL_CHOICE_MODE.get(tool_choice, "AUTO")
339
+ elif isinstance(tool_choice, dict):
340
+ choice_type = str(tool_choice.get("type", "")).lower()
341
+ mode = _TOOL_CHOICE_MODE.get(choice_type, "AUTO")
342
+ if choice_type == "tool":
343
+ name = tool_choice.get("name")
344
+ if isinstance(name, str) and name:
345
+ mode = "ANY"
346
+ allowed_names = [name]
347
+ adaptations.append("tool_choice_tool_mapped_to_allowed_function_names")
348
+ tool_config = {"functionCallingConfig": {"mode": mode}}
349
+ if allowed_names:
350
+ tool_config["functionCallingConfig"]["allowedFunctionNames"] = allowed_names
351
+
352
+ return tools, tool_config