local-openai2anthropic 0.2.3__py3-none-any.whl → 0.3.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,357 @@
1
+ # SPDX-License-Identifier: Apache-2.0
2
+ """Server tool handling."""
3
+
4
+ import json
5
+ import logging
6
+ from http import HTTPStatus
7
+ from typing import Any
8
+
9
+ import httpx
10
+ from fastapi.responses import JSONResponse
11
+
12
+ from local_openai2anthropic.config import Settings
13
+ from local_openai2anthropic.converter import convert_openai_to_anthropic
14
+ from local_openai2anthropic.protocol import AnthropicError, AnthropicErrorResponse
15
+ from local_openai2anthropic.server_tools import ServerToolRegistry
16
+ from local_openai2anthropic.utils.tokens import (
17
+ _generate_server_tool_id,
18
+ _normalize_usage,
19
+ )
20
+
21
+ logger = logging.getLogger(__name__)
22
+
23
+
24
+ class ServerToolHandler:
25
+ """Handles server tool execution for non-streaming requests."""
26
+
27
+ def __init__(
28
+ self,
29
+ server_tools: list[type],
30
+ configs: dict[str, dict[str, Any]],
31
+ settings: Settings,
32
+ ):
33
+ self.server_tools = {t.tool_name: t for t in server_tools}
34
+ self.configs = configs
35
+ self.settings = settings
36
+ self.usage: dict[str, int] = {}
37
+
38
+ def is_server_tool_call(self, tool_call: dict[str, Any]) -> bool:
39
+ """Check if a tool call is for a server tool."""
40
+ func_name = tool_call.get("function", {}).get("name")
41
+ return func_name in self.server_tools
42
+
43
+ async def execute_tool(
44
+ self,
45
+ tool_call: dict[str, Any],
46
+ ) -> tuple[list[dict[str, Any]], dict[str, Any]]:
47
+ """
48
+ Execute a server tool and return content blocks + tool result message.
49
+ Returns: (content_blocks, tool_result_message)
50
+ """
51
+ func_name = tool_call.get("function", {}).get("name")
52
+ call_id = tool_call.get("id", "")
53
+ openai_call_id = tool_call.get("openai_id", call_id)
54
+
55
+ tool_class = self.server_tools[func_name]
56
+ config = self.configs.get(tool_class.tool_type, {})
57
+
58
+ # Extract call arguments
59
+ args = tool_class.extract_call_args(tool_call)
60
+ if args is None:
61
+ args = {}
62
+
63
+ # Execute the tool
64
+ result = await tool_class.execute(call_id, args, config, self.settings)
65
+
66
+ # Update usage
67
+ for key, value in result.usage_increment.items():
68
+ self.usage[key] = self.usage.get(key, 0) + value
69
+
70
+ # Build content blocks
71
+ content_blocks = tool_class.build_content_blocks(call_id, args, result)
72
+
73
+ # Build tool result message for OpenAI
74
+ tool_result_msg = tool_class.build_tool_result_message(
75
+ openai_call_id, args, result
76
+ )
77
+
78
+ return content_blocks, tool_result_msg
79
+
80
+
81
+ async def _handle_with_server_tools(
82
+ openai_params: dict[str, Any],
83
+ url: str,
84
+ headers: dict[str, str],
85
+ settings: Settings,
86
+ server_tools: list[type],
87
+ model: str,
88
+ ) -> JSONResponse:
89
+ """Handle request with server tool execution loop."""
90
+ params = dict(openai_params)
91
+ configs = params.pop("_server_tools_config", {})
92
+
93
+ handler = ServerToolHandler(server_tools, configs, settings)
94
+ accumulated_content: list[dict[str, Any]] = []
95
+
96
+ # Get max_uses from configs (default to settings or 5)
97
+ max_uses = settings.websearch_max_uses
98
+ for config in configs.values():
99
+ if config.get("max_uses"):
100
+ max_uses = config["max_uses"]
101
+ break
102
+
103
+ total_tool_calls = 0
104
+
105
+ while True:
106
+ async with httpx.AsyncClient(timeout=settings.request_timeout) as client:
107
+ try:
108
+ # Log full request for debugging
109
+ logger.debug(
110
+ f"Request body: {json.dumps(params, indent=2, default=str)[:3000]}"
111
+ )
112
+
113
+ response = await client.post(url, headers=headers, json=params)
114
+
115
+ if response.status_code != 200:
116
+ logger.error(
117
+ f"OpenAI API error: {response.status_code} - {response.text}"
118
+ )
119
+ raw_text = response.text
120
+ try:
121
+ if not raw_text:
122
+ raw_text = response.content.decode(
123
+ "utf-8", errors="replace"
124
+ )
125
+ except Exception:
126
+ raw_text = ""
127
+ if not raw_text:
128
+ raw_text = response.reason_phrase or ""
129
+ error_message = (raw_text or "").strip()
130
+ error_response = AnthropicErrorResponse(
131
+ error=AnthropicError(
132
+ type="api_error",
133
+ message=error_message
134
+ or f"Upstream API error ({response.status_code})",
135
+ )
136
+ )
137
+ return JSONResponse(
138
+ status_code=response.status_code,
139
+ content=error_response.model_dump(),
140
+ )
141
+
142
+ completion_data = response.json()
143
+ logger.debug(
144
+ f"OpenAI response: {json.dumps(completion_data, indent=2)[:500]}..."
145
+ )
146
+ from openai.types.chat import ChatCompletion
147
+
148
+ completion = ChatCompletion.model_validate(completion_data)
149
+
150
+ # Check for server tool calls
151
+ server_tool_calls = []
152
+ other_tool_calls = []
153
+
154
+ tool_calls = completion.choices[0].message.tool_calls
155
+ logger.info(
156
+ f"Model returned tool_calls: {len(tool_calls) if tool_calls else 0}"
157
+ )
158
+
159
+ if tool_calls:
160
+ for tc in tool_calls:
161
+ func = getattr(tc, "function", None)
162
+ func_name = func.name if func else ""
163
+ logger.info(f" Tool call: {func_name}")
164
+
165
+ # Generate Anthropic-style ID for server tools
166
+ is_server = handler.is_server_tool_call(
167
+ {
168
+ "id": tc.id,
169
+ "function": {"name": func_name, "arguments": ""},
170
+ }
171
+ )
172
+
173
+ # Use Anthropic-style ID for server tools, original ID otherwise
174
+ client_tool_id = (
175
+ _generate_server_tool_id() if is_server else tc.id
176
+ )
177
+
178
+ tc_dict = {
179
+ "id": client_tool_id,
180
+ "openai_id": tc.id,
181
+ "function": {
182
+ "name": func_name,
183
+ "arguments": func.arguments if func else "{}",
184
+ },
185
+ }
186
+ logger.info(
187
+ f" Is server tool: {is_server}, ID: {client_tool_id}"
188
+ )
189
+ if is_server:
190
+ server_tool_calls.append(tc_dict)
191
+ else:
192
+ other_tool_calls.append(tc)
193
+
194
+ # No server tool calls - we're done
195
+ logger.info(
196
+ f"Server tool calls: {len(server_tool_calls)}, Other: {len(other_tool_calls)}"
197
+ )
198
+ if not server_tool_calls:
199
+ message = convert_openai_to_anthropic(completion, model)
200
+
201
+ if accumulated_content:
202
+ message_dict = message.model_dump()
203
+ message_dict["content"] = (
204
+ accumulated_content + message_dict.get("content", [])
205
+ )
206
+
207
+ if message_dict.get("usage"):
208
+ message_dict["usage"]["server_tool_use"] = handler.usage
209
+ message_dict["usage"] = _normalize_usage(
210
+ message_dict.get("usage")
211
+ )
212
+
213
+ # Log full response for debugging
214
+ logger.info(
215
+ f"Response content blocks: {json.dumps(message_dict.get('content', []), ensure_ascii=False)[:1000]}"
216
+ )
217
+ logger.info(f"Response usage: {message_dict.get('usage')}")
218
+ logger.info(f"Server tool use count: {handler.usage}")
219
+
220
+ return JSONResponse(content=message_dict)
221
+
222
+ message_dict = message.model_dump()
223
+ message_dict["usage"] = _normalize_usage(message_dict.get("usage"))
224
+ return JSONResponse(content=message_dict)
225
+
226
+ # Check max_uses limit
227
+ if total_tool_calls >= max_uses:
228
+ logger.warning(f"Server tool max_uses ({max_uses}) exceeded")
229
+ # Return error for each call
230
+ for call in server_tool_calls:
231
+ func_name = call.get("function", {}).get("name", "")
232
+ tool_class = handler.server_tools.get(func_name)
233
+ if tool_class:
234
+ from local_openai2anthropic.server_tools import ToolResult
235
+
236
+ error_result = ToolResult(
237
+ success=False,
238
+ content=[],
239
+ error_code="max_uses_exceeded",
240
+ )
241
+ error_blocks = tool_class.build_content_blocks(
242
+ call["id"],
243
+ {},
244
+ error_result,
245
+ )
246
+ accumulated_content.extend(error_blocks)
247
+
248
+ # Continue with modified messages
249
+ assistant_tool_calls = []
250
+ for call in server_tool_calls:
251
+ assistant_tool_calls.append(
252
+ {
253
+ "id": call.get("openai_id", call.get("id", "")),
254
+ "type": "function",
255
+ "function": {
256
+ "name": call.get("function", {}).get("name", ""),
257
+ "arguments": call.get("function", {}).get(
258
+ "arguments", "{}"
259
+ ),
260
+ },
261
+ }
262
+ )
263
+ messages = params.get("messages", [])
264
+ messages = _add_tool_results_to_messages(
265
+ messages, assistant_tool_calls, handler, is_error=True
266
+ )
267
+ params["messages"] = messages
268
+ continue
269
+
270
+ # Execute server tools
271
+ messages = params.get("messages", [])
272
+ assistant_tool_calls = []
273
+ tool_results = []
274
+
275
+ for call in server_tool_calls:
276
+ total_tool_calls += 1
277
+ content_blocks, tool_result = await handler.execute_tool(call)
278
+ accumulated_content.extend(content_blocks)
279
+
280
+ # Track for assistant message
281
+ assistant_tool_calls.append(
282
+ {
283
+ "id": call.get("openai_id", call.get("id", "")),
284
+ "type": "function",
285
+ "function": {
286
+ "name": call["function"]["name"],
287
+ "arguments": call["function"]["arguments"],
288
+ },
289
+ }
290
+ )
291
+ tool_results.append(tool_result)
292
+
293
+ # Add to messages for next iteration
294
+ messages = _add_tool_results_to_messages(
295
+ messages, assistant_tool_calls, handler, tool_results=tool_results
296
+ )
297
+ params["messages"] = messages
298
+
299
+ except httpx.TimeoutException:
300
+ error_response = AnthropicErrorResponse(
301
+ error=AnthropicError(
302
+ type="timeout_error", message="Request timed out"
303
+ )
304
+ )
305
+ return JSONResponse(
306
+ status_code=HTTPStatus.GATEWAY_TIMEOUT,
307
+ content=error_response.model_dump(),
308
+ )
309
+ except httpx.RequestError as e:
310
+ error_response = AnthropicErrorResponse(
311
+ error=AnthropicError(type="connection_error", message=str(e))
312
+ )
313
+ return JSONResponse(
314
+ status_code=HTTPStatus.BAD_GATEWAY,
315
+ content=error_response.model_dump(),
316
+ )
317
+
318
+
319
+ def _add_tool_results_to_messages(
320
+ messages: list[dict[str, Any]],
321
+ tool_calls: list[dict[str, Any]],
322
+ handler: ServerToolHandler,
323
+ tool_results: list[dict[str, Any]] | None = None,
324
+ is_error: bool = False,
325
+ ) -> list[dict[str, Any]]:
326
+ """Add assistant tool call and results to messages."""
327
+ messages = list(messages)
328
+
329
+ # Add assistant message with tool calls
330
+ # SGLang requires content to be a string, not None
331
+ assistant_msg: dict[str, Any] = {
332
+ "role": "assistant",
333
+ "content": "", # Empty string instead of None for SGLang compatibility
334
+ "tool_calls": tool_calls,
335
+ }
336
+ messages.append(assistant_msg)
337
+
338
+ # Add tool results
339
+ if is_error:
340
+ for call in tool_calls:
341
+ tool_call_id = call.get("openai_id", call.get("id", ""))
342
+ messages.append(
343
+ {
344
+ "role": "tool",
345
+ "tool_call_id": tool_call_id,
346
+ "content": json.dumps(
347
+ {
348
+ "error": "max_uses_exceeded",
349
+ "message": "Maximum tool uses exceeded.",
350
+ }
351
+ ),
352
+ }
353
+ )
354
+ elif tool_results:
355
+ messages.extend(tool_results)
356
+
357
+ return messages
@@ -0,0 +1,18 @@
1
+ # SPDX-License-Identifier: Apache-2.0
2
+ """Utility functions for local_openai2anthropic."""
3
+
4
+ from .tokens import (
5
+ _chunk_text,
6
+ _count_tokens,
7
+ _estimate_input_tokens,
8
+ _generate_server_tool_id,
9
+ _normalize_usage,
10
+ )
11
+
12
+ __all__ = [
13
+ "_chunk_text",
14
+ "_count_tokens",
15
+ "_estimate_input_tokens",
16
+ "_generate_server_tool_id",
17
+ "_normalize_usage",
18
+ ]
@@ -0,0 +1,96 @@
1
+ # SPDX-License-Identifier: Apache-2.0
2
+ """Token-related utility functions."""
3
+
4
+ import json
5
+ import secrets
6
+ import string
7
+ from typing import Any
8
+
9
+
10
+ def _generate_server_tool_id() -> str:
11
+ """Generate Anthropic-style server tool use ID (srvtoolu_...)."""
12
+ # Generate 24 random alphanumeric characters
13
+ chars = string.ascii_lowercase + string.digits
14
+ random_part = "".join(secrets.choice(chars) for _ in range(24))
15
+ return f"srvtoolu_{random_part}"
16
+
17
+
18
+ def _normalize_usage(usage: dict[str, Any] | None) -> dict[str, Any] | None:
19
+ if not isinstance(usage, dict):
20
+ return usage
21
+ allowed_keys = {
22
+ "input_tokens",
23
+ "output_tokens",
24
+ "cache_creation_input_tokens",
25
+ "cache_read_input_tokens",
26
+ "server_tool_use",
27
+ }
28
+ normalized = {k: v for k, v in usage.items() if k in allowed_keys}
29
+ return normalized or None
30
+
31
+
32
+ def _count_tokens(text: str) -> int:
33
+ try:
34
+ import tiktoken # type: ignore[import-not-found]
35
+ except Exception:
36
+ return 0
37
+
38
+ encoding = tiktoken.get_encoding("cl100k_base")
39
+ return len(encoding.encode(text))
40
+
41
+
42
+ def _chunk_text(text: str, chunk_size: int = 200) -> list[str]:
43
+ if not text:
44
+ return []
45
+ return [text[i : i + chunk_size] for i in range(0, len(text), chunk_size)]
46
+
47
+
48
+ def _estimate_input_tokens(openai_params: dict[str, Any]) -> int:
49
+ try:
50
+ import tiktoken # type: ignore[import-not-found]
51
+ except Exception:
52
+ return 0
53
+
54
+ encoding = tiktoken.get_encoding("cl100k_base")
55
+ total_tokens = 0
56
+
57
+ system = openai_params.get("system")
58
+ if isinstance(system, str):
59
+ total_tokens += len(encoding.encode(system))
60
+
61
+ messages = openai_params.get("messages", [])
62
+ if isinstance(messages, list):
63
+ for msg in messages:
64
+ if not isinstance(msg, dict):
65
+ continue
66
+ content = msg.get("content", "")
67
+ if isinstance(content, str):
68
+ total_tokens += len(encoding.encode(content))
69
+ elif isinstance(content, list):
70
+ for block in content:
71
+ if not isinstance(block, dict):
72
+ total_tokens += len(encoding.encode(str(block)))
73
+ continue
74
+ block_type = block.get("type")
75
+ if block_type == "text":
76
+ total_tokens += len(encoding.encode(block.get("text", "")))
77
+ elif block_type == "image_url":
78
+ total_tokens += 85
79
+
80
+ tool_calls = msg.get("tool_calls")
81
+ if isinstance(tool_calls, list) and tool_calls:
82
+ total_tokens += len(encoding.encode(json.dumps(tool_calls)))
83
+
84
+ tools = openai_params.get("tools")
85
+ if isinstance(tools, list) and tools:
86
+ total_tokens += len(encoding.encode(json.dumps(tools)))
87
+
88
+ tool_choice = openai_params.get("tool_choice")
89
+ if tool_choice is not None:
90
+ total_tokens += len(encoding.encode(json.dumps(tool_choice)))
91
+
92
+ response_format = openai_params.get("response_format")
93
+ if response_format is not None:
94
+ total_tokens += len(encoding.encode(json.dumps(response_format)))
95
+
96
+ return total_tokens
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: local-openai2anthropic
3
- Version: 0.2.3
3
+ Version: 0.3.6
4
4
  Summary: A lightweight proxy server that converts Anthropic Messages API to OpenAI API
5
5
  Project-URL: Homepage, https://github.com/dongfangzan/local-openai2anthropic
6
6
  Project-URL: Repository, https://github.com/dongfangzan/local-openai2anthropic
@@ -24,6 +24,7 @@ Requires-Dist: httpx>=0.25.0
24
24
  Requires-Dist: openai>=1.30.0
25
25
  Requires-Dist: pydantic-settings>=2.0.0
26
26
  Requires-Dist: pydantic>=2.0.0
27
+ Requires-Dist: tomli>=2.0.0; python_version < '3.11'
27
28
  Requires-Dist: uvicorn[standard]>=0.23.0
28
29
  Provides-Extra: dev
29
30
  Requires-Dist: black>=23.0.0; extra == 'dev'
@@ -55,6 +56,7 @@ This proxy translates Claude SDK calls to OpenAI API format in real-time, enabli
55
56
  - **Offline development** without cloud API costs
56
57
  - **Privacy-first AI** - data never leaves your machine
57
58
  - **Seamless model switching** between cloud and local
59
+ - **Web Search tool** - built-in Tavily web search for local models
58
60
 
59
61
  ---
60
62
 
@@ -79,7 +81,11 @@ Other OpenAI-compatible backends may work but are not fully tested.
79
81
  pip install local-openai2anthropic
80
82
  ```
81
83
 
82
- ### 2. Start Your Local LLM Server
84
+ ### 2. Configure Your LLM Backend (Optional)
85
+
86
+ **Option A: Start a local LLM server**
87
+
88
+ If you don't have an LLM server running, you can start one locally:
83
89
 
84
90
  Example with vLLM:
85
91
  ```bash
@@ -93,6 +99,16 @@ sglang launch --model-path meta-llama/Llama-2-7b-chat-hf --port 8000
93
99
  # SGLang starts at http://localhost:8000/v1
94
100
  ```
95
101
 
102
+ **Option B: Use an existing OpenAI-compatible API**
103
+
104
+ If you already have a deployed OpenAI-compatible API (local or remote), you can use it directly. Just note the base URL for the next step.
105
+
106
+ Examples:
107
+ - Local vLLM/SGLang: `http://localhost:8000/v1`
108
+ - Remote API: `https://api.example.com/v1`
109
+
110
+ > **Note:** If you're using [Ollama](https://ollama.com), it natively supports the Anthropic API format, so you don't need this proxy. Just point your Claude SDK directly to `http://localhost:11434/v1`.
111
+
96
112
  ### 3. Start the Proxy
97
113
 
98
114
  **Option A: Run in background (recommended)**
@@ -155,22 +171,31 @@ You can configure [Claude Code](https://github.com/anthropics/claude-code) to us
155
171
 
156
172
  ### Configuration Steps
157
173
 
158
- 1. **Create or edit Claude Code config file** at `~/.claude/CLAUDE.md`:
159
-
160
- ```markdown
161
- # Claude Code Configuration
162
-
163
- ## API Settings
164
-
165
- - Claude API Base URL: http://localhost:8080
166
- - Claude API Key: dummy-key
167
-
168
- ## Model Settings
169
-
170
- Use model: meta-llama/Llama-2-7b-chat-hf # Your local model name
174
+ 1. **Edit Claude Code config file** at `~/.claude/settings.json`:
175
+
176
+ ```json
177
+ {
178
+ "env": {
179
+ "ANTHROPIC_BASE_URL": "http://localhost:8080",
180
+ "ANTHROPIC_API_KEY": "dummy-key",
181
+ "ANTHROPIC_MODEL": "meta-llama/Llama-2-7b-chat-hf",
182
+ "ANTHROPIC_DEFAULT_SONNET_MODEL": "meta-llama/Llama-2-7b-chat-hf",
183
+ "ANTHROPIC_DEFAULT_OPUS_MODEL": "meta-llama/Llama-2-7b-chat-hf",
184
+ "ANTHROPIC_DEFAULT_HAIKU_MODEL": "meta-llama/Llama-2-7b-chat-hf",
185
+ "ANTHROPIC_REASONING_MODEL": "meta-llama/Llama-2-7b-chat-hf"
186
+ }
187
+ }
171
188
  ```
172
189
 
173
- 2. **Alternatively, set environment variables** before running Claude Code:
190
+ | Variable | Description |
191
+ |----------|-------------|
192
+ | `ANTHROPIC_MODEL` | General model setting |
193
+ | `ANTHROPIC_DEFAULT_SONNET_MODEL` | Default model for Sonnet mode (Claude Code default) |
194
+ | `ANTHROPIC_DEFAULT_OPUS_MODEL` | Default model for Opus mode |
195
+ | `ANTHROPIC_DEFAULT_HAIKU_MODEL` | Default model for Haiku mode |
196
+ | `ANTHROPIC_REASONING_MODEL` | Default model for reasoning tasks |
197
+
198
+ 2. **Or set environment variables** before running Claude Code:
174
199
 
175
200
  ```bash
176
201
  export ANTHROPIC_BASE_URL=http://localhost:8080
@@ -179,38 +204,36 @@ export ANTHROPIC_API_KEY=dummy-key
179
204
  claude
180
205
  ```
181
206
 
182
- 3. **Or use the `--api-key` and `--base-url` flags**:
183
-
184
- ```bash
185
- claude --api-key dummy-key --base-url http://localhost:8080
186
- ```
187
-
188
207
  ### Complete Workflow Example
189
208
 
209
+ Make sure `~/.claude/settings.json` is configured as described above.
210
+
190
211
  Terminal 1 - Start your local LLM:
191
212
  ```bash
192
213
  vllm serve meta-llama/Llama-2-7b-chat-hf
193
214
  ```
194
215
 
195
- Terminal 2 - Start the proxy:
216
+ Terminal 2 - Start the proxy (background mode):
196
217
  ```bash
197
218
  export OA2A_OPENAI_BASE_URL=http://localhost:8000/v1
198
219
  export OA2A_OPENAI_API_KEY=dummy
199
220
  export OA2A_TAVILY_API_KEY="tvly-your-tavily-api-key" # Optional: enable web search
200
221
 
201
- oa2a
222
+ oa2a start
202
223
  ```
203
224
 
204
- Terminal 3 - Launch Claude Code with local LLM:
225
+ Terminal 3 - Launch Claude Code:
205
226
  ```bash
206
- export ANTHROPIC_BASE_URL=http://localhost:8080
207
- export ANTHROPIC_API_KEY=dummy-key
208
-
209
227
  claude
210
228
  ```
211
229
 
212
230
  Now Claude Code will use your local LLM instead of the cloud API.
213
231
 
232
+ To stop the proxy:
233
+ ```bash
234
+ oa2a stop
235
+ ```
236
+
214
237
  ---
215
238
 
216
239
  ## Features
@@ -0,0 +1,25 @@
1
+ local_openai2anthropic/__init__.py,sha256=YEz1wpAzYlPY-zbmlQuLf8gpwLEVJBqff4LdfWcz6NM,1059
2
+ local_openai2anthropic/__main__.py,sha256=K21u5u7FN8-DbO67TT_XDF0neGqJeFrVNkteRauCRQk,179
3
+ local_openai2anthropic/config.py,sha256=y40uEMBE57dOGCV3w3v5j82ZPZZJWUnJ4yaFZXJ8pRk,4706
4
+ local_openai2anthropic/converter.py,sha256=og94I514M9km_Wbk9c1ddU6fyaQNEbpd2zfpfnBQaTQ,16029
5
+ local_openai2anthropic/daemon.py,sha256=pZnRojGFcuIpR8yLDNjV-b0LJRBVhgRAa-dKeRRse44,10017
6
+ local_openai2anthropic/daemon_runner.py,sha256=rguOH0PgpbjqNsKYei0uCQX8JQOQ1wmtQH1CtW95Dbw,3274
7
+ local_openai2anthropic/main.py,sha256=3xrjsKFBYK6B8niAtQz0U_yz-eTpf91HnHeAiR9CLQE,12174
8
+ local_openai2anthropic/openai_types.py,sha256=jFdCvLwtXYoo5gGRqOhbHQcVaxcsxNnCP_yFPIv7rG4,3823
9
+ local_openai2anthropic/protocol.py,sha256=VW3B1YrbYg5UAo7PveQv0Ny5vfuNa6yG6IlHtkuyXiI,5178
10
+ local_openai2anthropic/router.py,sha256=gwSGCYQGd0tAj4B4cl30UDkIJDIfBP4D8T9KEMKnxyk,16196
11
+ local_openai2anthropic/tavily_client.py,sha256=QsBhnyF8BFWPAxB4XtWCCpHCquNL5SW93-zjTTi4Meg,3774
12
+ local_openai2anthropic/server_tools/__init__.py,sha256=QlJfjEta-HOCtLe7NaY_fpbEKv-ZpInjAnfmSqE9tbk,615
13
+ local_openai2anthropic/server_tools/base.py,sha256=pNFsv-jSgxVrkY004AHAcYMNZgVSO8ZOeCzQBUtQ3vU,5633
14
+ local_openai2anthropic/server_tools/web_search.py,sha256=1C7lX_cm-tMaN3MsCjinEZYPJc_Hj4yAxYay9h8Zbvs,6543
15
+ local_openai2anthropic/streaming/__init__.py,sha256=RFKYQnc0zlhWK-Dm7GZpmabmszbZhY5NcXaaSsQ7Sys,227
16
+ local_openai2anthropic/streaming/handler.py,sha256=X8viml6b40p-vr-A4HlEi5iCqmTsIMyQgj3S2RfweVE,22033
17
+ local_openai2anthropic/tools/__init__.py,sha256=OM_6YAwy3G1kbrF7n5NvmBwWPGO0hwq4xLrYZFMHANA,318
18
+ local_openai2anthropic/tools/handler.py,sha256=SO8AmEUfNIg16s6jOKBaYdajYc0fiI8ciOoiKXIJe_c,14106
19
+ local_openai2anthropic/utils/__init__.py,sha256=0Apd3lQCmWpQHol4AfjtQe6A3Cpex9Zn-8dyK_FU8Z0,372
20
+ local_openai2anthropic/utils/tokens.py,sha256=TV3vGAjoGZeyo1xPvwb5jto43p1U1f4HteCApB86X0g,3187
21
+ local_openai2anthropic-0.3.6.dist-info/METADATA,sha256=B5M75TvhwturteqT_zxAJ7lXSTAy2QpjTCeDvPRixhQ,11293
22
+ local_openai2anthropic-0.3.6.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
23
+ local_openai2anthropic-0.3.6.dist-info/entry_points.txt,sha256=hdc9tSJUNxyNLXcTYye5SuD2K0bEQhxBhGnWTFup6ZM,116
24
+ local_openai2anthropic-0.3.6.dist-info/licenses/LICENSE,sha256=X3_kZy3lJvd_xp8IeyUcIAO2Y367MXZc6aaRx8BYR_s,11369
25
+ local_openai2anthropic-0.3.6.dist-info/RECORD,,
@@ -1,19 +0,0 @@
1
- local_openai2anthropic/__init__.py,sha256=bj8tRC4_GyO5x4A5NqRdpxWWrdhAi7pC8xN9-ui0bQo,1059
2
- local_openai2anthropic/__main__.py,sha256=K21u5u7FN8-DbO67TT_XDF0neGqJeFrVNkteRauCRQk,179
3
- local_openai2anthropic/config.py,sha256=bnM7p5htd6rHgLn7Z0Ukmm2jVImLuVjIB5Cnfpf2ClY,1918
4
- local_openai2anthropic/converter.py,sha256=qp0LPJBTP0uAb_5l9VINZ03RAjmumxdquP6JqWXiZkQ,15779
5
- local_openai2anthropic/daemon.py,sha256=pZnRojGFcuIpR8yLDNjV-b0LJRBVhgRAa-dKeRRse44,10017
6
- local_openai2anthropic/daemon_runner.py,sha256=rguOH0PgpbjqNsKYei0uCQX8JQOQ1wmtQH1CtW95Dbw,3274
7
- local_openai2anthropic/main.py,sha256=5tdgPel8RSCn1iK0d7hYAmcTM9vYHlepgQujaEXA2ic,9866
8
- local_openai2anthropic/openai_types.py,sha256=jFdCvLwtXYoo5gGRqOhbHQcVaxcsxNnCP_yFPIv7rG4,3823
9
- local_openai2anthropic/protocol.py,sha256=vUEgxtRPFll6jEtLc4DyxTLCBjrWIEScZXhEqe4uibk,5185
10
- local_openai2anthropic/router.py,sha256=5c9APWIIkM2pi4C6AZ0OWP_yrE6wn5YQmJo1OOHcuVo,36101
11
- local_openai2anthropic/tavily_client.py,sha256=QsBhnyF8BFWPAxB4XtWCCpHCquNL5SW93-zjTTi4Meg,3774
12
- local_openai2anthropic/server_tools/__init__.py,sha256=QlJfjEta-HOCtLe7NaY_fpbEKv-ZpInjAnfmSqE9tbk,615
13
- local_openai2anthropic/server_tools/base.py,sha256=pNFsv-jSgxVrkY004AHAcYMNZgVSO8ZOeCzQBUtQ3vU,5633
14
- local_openai2anthropic/server_tools/web_search.py,sha256=1C7lX_cm-tMaN3MsCjinEZYPJc_Hj4yAxYay9h8Zbvs,6543
15
- local_openai2anthropic-0.2.3.dist-info/METADATA,sha256=auO3568iC566_VVykvf8x7oZylGVBhu0qW_zuAgp5WQ,10040
16
- local_openai2anthropic-0.2.3.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
17
- local_openai2anthropic-0.2.3.dist-info/entry_points.txt,sha256=hdc9tSJUNxyNLXcTYye5SuD2K0bEQhxBhGnWTFup6ZM,116
18
- local_openai2anthropic-0.2.3.dist-info/licenses/LICENSE,sha256=X3_kZy3lJvd_xp8IeyUcIAO2Y367MXZc6aaRx8BYR_s,11369
19
- local_openai2anthropic-0.2.3.dist-info/RECORD,,