copilotx 2.3.2__tar.gz → 2.3.4__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (31) hide show
  1. {copilotx-2.3.2 → copilotx-2.3.4}/PKG-INFO +2 -2
  2. {copilotx-2.3.2 → copilotx-2.3.4}/README.md +1 -1
  3. {copilotx-2.3.2 → copilotx-2.3.4}/pyproject.toml +1 -1
  4. {copilotx-2.3.2 → copilotx-2.3.4}/src/copilotx/proxy/client.py +15 -8
  5. {copilotx-2.3.2 → copilotx-2.3.4}/src/copilotx/proxy/translator.py +31 -16
  6. {copilotx-2.3.2 → copilotx-2.3.4}/src/copilotx/server/routes_anthropic.py +31 -5
  7. copilotx-2.3.4/uv.lock +752 -0
  8. {copilotx-2.3.2 → copilotx-2.3.4}/.gitignore +0 -0
  9. {copilotx-2.3.2 → copilotx-2.3.4}/LICENSE +0 -0
  10. {copilotx-2.3.2 → copilotx-2.3.4}/deploy/.env.example +0 -0
  11. {copilotx-2.3.2 → copilotx-2.3.4}/deploy/Caddyfile +0 -0
  12. {copilotx-2.3.2 → copilotx-2.3.4}/deploy/copilotx-azureuser.service +0 -0
  13. {copilotx-2.3.2 → copilotx-2.3.4}/deploy/copilotx.service +0 -0
  14. {copilotx-2.3.2 → copilotx-2.3.4}/deploy/nginx-copilotx-http.conf +0 -0
  15. {copilotx-2.3.2 → copilotx-2.3.4}/deploy/nginx-copilotx.conf +0 -0
  16. {copilotx-2.3.2 → copilotx-2.3.4}/src/copilotx/__init__.py +0 -0
  17. {copilotx-2.3.2 → copilotx-2.3.4}/src/copilotx/__main__.py +0 -0
  18. {copilotx-2.3.2 → copilotx-2.3.4}/src/copilotx/auth/__init__.py +0 -0
  19. {copilotx-2.3.2 → copilotx-2.3.4}/src/copilotx/auth/oauth.py +0 -0
  20. {copilotx-2.3.2 → copilotx-2.3.4}/src/copilotx/auth/storage.py +0 -0
  21. {copilotx-2.3.2 → copilotx-2.3.4}/src/copilotx/auth/token.py +0 -0
  22. {copilotx-2.3.2 → copilotx-2.3.4}/src/copilotx/cli.py +0 -0
  23. {copilotx-2.3.2 → copilotx-2.3.4}/src/copilotx/config.py +0 -0
  24. {copilotx-2.3.2 → copilotx-2.3.4}/src/copilotx/proxy/__init__.py +0 -0
  25. {copilotx-2.3.2 → copilotx-2.3.4}/src/copilotx/proxy/responses_stream.py +0 -0
  26. {copilotx-2.3.2 → copilotx-2.3.4}/src/copilotx/proxy/streaming.py +0 -0
  27. {copilotx-2.3.2 → copilotx-2.3.4}/src/copilotx/server/__init__.py +0 -0
  28. {copilotx-2.3.2 → copilotx-2.3.4}/src/copilotx/server/app.py +0 -0
  29. {copilotx-2.3.2 → copilotx-2.3.4}/src/copilotx/server/routes_models.py +0 -0
  30. {copilotx-2.3.2 → copilotx-2.3.4}/src/copilotx/server/routes_openai.py +0 -0
  31. {copilotx-2.3.2 → copilotx-2.3.4}/src/copilotx/server/routes_responses.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: copilotx
3
- Version: 2.3.2
3
+ Version: 2.3.4
4
4
  Summary: Local GitHub Copilot API proxy — use GPT-4o, Claude, Gemini via OpenAI/Anthropic compatible APIs
5
5
  Project-URL: Homepage, https://github.com/Polly2014/CopilotX
6
6
  Project-URL: Repository, https://github.com/Polly2014/CopilotX
@@ -72,7 +72,7 @@ copilotx serve
72
72
 
73
73
  Output:
74
74
  ```
75
- 🚀 CopilotX v2.2.1
75
+ 🚀 CopilotX v2.3.3
76
76
  ✅ Copilot Token valid (28m remaining, auto-refresh)
77
77
  � Local mode (localhost only)
78
78
  🎯 API: api.enterprise.githubcopilot.com (auto-detected)
@@ -47,7 +47,7 @@ copilotx serve
47
47
 
48
48
  Output:
49
49
  ```
50
- 🚀 CopilotX v2.2.1
50
+ 🚀 CopilotX v2.3.3
51
51
  ✅ Copilot Token valid (28m remaining, auto-refresh)
52
52
  � Local mode (localhost only)
53
53
  🎯 API: api.enterprise.githubcopilot.com (auto-detected)
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "copilotx"
3
- version = "2.3.2"
3
+ version = "2.3.4"
4
4
  description = "Local GitHub Copilot API proxy — use GPT-4o, Claude, Gemini via OpenAI/Anthropic compatible APIs"
5
5
  readme = "README.md"
6
6
  license = { text = "MIT" }
@@ -85,11 +85,14 @@ class CopilotClient:
85
85
 
86
86
  # ── Chat Completions (non-streaming) ────────────────────────────
87
87
 
88
- async def chat_completions(self, payload: dict) -> dict:
88
+ async def chat_completions(self, payload: dict, *, vision: bool = False) -> dict:
89
89
  """POST /chat/completions — non-streaming."""
90
90
  assert self._client is not None
91
91
  url = f"{self._api_base}{COPILOT_CHAT_COMPLETIONS_PATH}"
92
- resp = await self._client.post(url, json=payload, headers=self._headers())
92
+ extra: dict[str, str] = {}
93
+ if vision:
94
+ extra["copilot-vision-request"] = "true"
95
+ resp = await self._client.post(url, json=payload, headers=self._headers(extra))
93
96
  if resp.status_code >= 400:
94
97
  error_body = resp.text
95
98
  logger.error(
@@ -105,14 +108,17 @@ class CopilotClient:
105
108
 
106
109
  # ── Chat Completions (streaming) ────────────────────────────────
107
110
 
108
- async def chat_completions_stream(self, payload: dict) -> AsyncIterator[bytes]:
111
+ async def chat_completions_stream(self, payload: dict, *, vision: bool = False) -> AsyncIterator[bytes]:
109
112
  """POST /chat/completions with stream=true — yields raw SSE lines."""
110
113
  assert self._client is not None
111
114
  payload["stream"] = True
112
115
  url = f"{self._api_base}{COPILOT_CHAT_COMPLETIONS_PATH}"
116
+ extra: dict[str, str] = {}
117
+ if vision:
118
+ extra["copilot-vision-request"] = "true"
113
119
 
114
120
  async with self._client.stream(
115
- "POST", url, json=payload, headers=self._headers(),
121
+ "POST", url, json=payload, headers=self._headers(extra),
116
122
  ) as resp:
117
123
  if resp.status_code >= 400:
118
124
  error_body = await resp.aread()
@@ -126,10 +132,11 @@ class CopilotClient:
126
132
  response=resp,
127
133
  )
128
134
  async for line in resp.aiter_lines():
129
- if line:
130
- yield (line + "\n").encode("utf-8")
131
- # Ensure final newline
132
- yield b"\n"
135
+ # Yield ALL lines including empty ones — empty lines are
136
+ # SSE event delimiters and MUST be preserved for clients
137
+ # (e.g. OpenAI Python SDK) that rely on them to separate
138
+ # JSON chunks.
139
+ yield (line + "\n").encode("utf-8")
133
140
 
134
141
  # ── Responses API (non-streaming) ───────────────────────────────
135
142
 
@@ -209,29 +209,33 @@ def anthropic_to_openai_request(body: dict) -> dict:
209
209
 
210
210
  # --- Handle user messages with tool_result blocks ---
211
211
  elif tool_result_blocks:
212
- # If there's also regular text content, add it first
213
- if text_parts:
214
- if has_non_text or len(text_parts) > 1:
215
- messages.append({"role": role, "content": text_parts})
216
- else:
217
- text = "\n".join(
218
- p["text"] for p in text_parts if p.get("type") == "text"
219
- )
220
- if text:
221
- messages.append({"role": role, "content": text})
222
-
223
- # Convert each tool_result → OpenAI tool message
212
+ # Convert each tool_result OpenAI tool message FIRST
213
+ # (OpenAI requires tool messages immediately after the
214
+ # assistant message with tool_calls no user message
215
+ # in between)
224
216
  for tr in tool_result_blocks:
225
217
  tool_content = tr.get("content", "")
226
218
  # Anthropic tool_result content can be string or list of blocks
227
219
  if isinstance(tool_content, list):
228
- parts = []
220
+ text_parts_tr: list[str] = []
221
+ image_count = 0
229
222
  for tc_block in tool_content:
230
223
  if isinstance(tc_block, str):
231
- parts.append(tc_block)
224
+ text_parts_tr.append(tc_block)
232
225
  elif tc_block.get("type") == "text":
233
- parts.append(tc_block["text"])
234
- tool_content = "\n".join(parts)
226
+ text_parts_tr.append(tc_block["text"])
227
+ elif tc_block.get("type") == "image":
228
+ image_count += 1
229
+
230
+ if text_parts_tr:
231
+ tool_content = "\n".join(text_parts_tr)
232
+ if image_count:
233
+ tool_content += f"\n[+{image_count} image(s) omitted from tool result]"
234
+ elif image_count:
235
+ # Tool result only has images, no text
236
+ tool_content = f"[Tool returned {image_count} image(s) — content processed by the model in a previous turn]"
237
+ else:
238
+ tool_content = ""
235
239
  elif not isinstance(tool_content, str):
236
240
  tool_content = json.dumps(tool_content)
237
241
 
@@ -246,6 +250,17 @@ def anthropic_to_openai_request(body: dict) -> dict:
246
250
  tool_msg["content"] = f"[ERROR] {tool_content}"
247
251
  messages.append(tool_msg)
248
252
 
253
+ # Add any accompanying text content AFTER tool messages
254
+ if text_parts:
255
+ if has_non_text or len(text_parts) > 1:
256
+ messages.append({"role": role, "content": text_parts})
257
+ else:
258
+ text = "\n".join(
259
+ p["text"] for p in text_parts if p.get("type") == "text"
260
+ )
261
+ if text:
262
+ messages.append({"role": role, "content": text})
263
+
249
264
  # --- Regular content (no tool blocks) ---
250
265
  else:
251
266
  if has_non_text or len(text_parts) > 1:
@@ -25,6 +25,23 @@ logger = logging.getLogger(__name__)
25
25
  router = APIRouter(tags=["Anthropic"])
26
26
 
27
27
 
28
+ def _has_vision_content(body: dict) -> bool:
29
+ """Check if an Anthropic /v1/messages request contains image content.
30
+
31
+ Checks direct image blocks in message content (user/assistant messages).
32
+ Note: Images in tool_result blocks are replaced with placeholder text
33
+ during translation, so they don't require the vision header.
34
+ """
35
+ for msg in body.get("messages", []):
36
+ content = msg.get("content")
37
+ if not isinstance(content, list):
38
+ continue
39
+ for block in content:
40
+ if isinstance(block, dict) and block.get("type") == "image":
41
+ return True
42
+ return False
43
+
44
+
28
45
  @router.post("/v1/messages")
29
46
  async def messages(request: Request):
30
47
  """Anthropic-compatible messages endpoint.
@@ -35,31 +52,40 @@ async def messages(request: Request):
35
52
  body = await request.json()
36
53
  model = body.get("model", "gpt-4o")
37
54
  is_stream = body.get("stream", False)
55
+ vision = _has_vision_content(body)
38
56
 
39
- # Log the incoming request for debugging
57
+ # Log the incoming request
40
58
  logger.info(
41
- "Anthropic request: model=%s stream=%s max_tokens=%s tools=%d keys=%s",
59
+ "Anthropic request: model=%s stream=%s max_tokens=%s tools=%d vision=%s msgs=%d",
42
60
  model,
43
61
  is_stream,
44
62
  body.get("max_tokens"),
45
63
  len(body.get("tools", [])),
46
- list(body.keys()),
64
+ vision,
65
+ len(body.get("messages", [])),
47
66
  )
48
67
 
49
68
  # Translate Anthropic request → OpenAI request
50
69
  openai_payload = anthropic_to_openai_request(body)
70
+ mapped_model = openai_payload.get("model", "?")
71
+ if mapped_model != model:
72
+ logger.info("Model mapped: %s → %s", model, mapped_model)
51
73
 
52
74
  client = await get_ready_client(request.app.state)
53
75
 
54
76
  try:
55
77
  if is_stream:
56
78
  # Stream: OpenAI SSE → Anthropic SSE
57
- openai_stream = client.chat_completions_stream(openai_payload)
79
+ openai_stream = client.chat_completions_stream(
80
+ openai_payload, vision=vision,
81
+ )
58
82
  anthropic_stream = openai_stream_to_anthropic_stream(openai_stream, model)
59
83
  return sse_response(anthropic_stream)
60
84
  else:
61
85
  # Non-stream: translate response
62
- openai_resp = await client.chat_completions(openai_payload)
86
+ openai_resp = await client.chat_completions(
87
+ openai_payload, vision=vision,
88
+ )
63
89
  anthropic_resp = openai_to_anthropic_response(openai_resp, model)
64
90
  return JSONResponse(content=anthropic_resp)
65
91
  except Exception as e: