local-openai2anthropic 0.1.1__py3-none-any.whl → 0.2.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -21,25 +21,25 @@ def create_app(settings: Settings | None = None) -> FastAPI:
21
21
  """Create and configure the FastAPI application."""
22
22
  if settings is None:
23
23
  settings = get_settings()
24
-
24
+
25
25
  # Configure logging
26
26
  logging.basicConfig(
27
27
  level=getattr(logging, settings.log_level.upper()),
28
28
  format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
29
29
  )
30
-
30
+
31
31
  # Create FastAPI app
32
32
  app = FastAPI(
33
33
  title="local-openai2anthropic",
34
34
  description="A proxy server that converts Anthropic Messages API to OpenAI API",
35
- version="0.1.0",
35
+ version="0.2.0",
36
36
  docs_url="/docs",
37
37
  redoc_url="/redoc",
38
38
  )
39
-
39
+
40
40
  # Store settings in app state
41
41
  app.state.settings = settings
42
-
42
+
43
43
  # Add CORS middleware
44
44
  app.add_middleware(
45
45
  CORSMiddleware,
@@ -48,16 +48,17 @@ def create_app(settings: Settings | None = None) -> FastAPI:
48
48
  allow_methods=settings.cors_methods,
49
49
  allow_headers=settings.cors_headers,
50
50
  )
51
-
51
+
52
52
  # Add API key authentication middleware if configured
53
53
  if settings.api_key:
54
+
54
55
  @app.middleware("http")
55
56
  async def auth_middleware(request: Request, call_next):
56
57
  """Validate API key if configured."""
57
58
  # Skip auth for docs and health check
58
59
  if request.url.path in ["/docs", "/redoc", "/openapi.json", "/health"]:
59
60
  return await call_next(request)
60
-
61
+
61
62
  auth_header = request.headers.get("Authorization", "")
62
63
  if not auth_header.startswith("Bearer "):
63
64
  error_response = AnthropicErrorResponse(
@@ -70,7 +71,7 @@ def create_app(settings: Settings | None = None) -> FastAPI:
70
71
  status_code=401,
71
72
  content=error_response.model_dump(),
72
73
  )
73
-
74
+
74
75
  token = auth_header[7:] # Remove "Bearer " prefix
75
76
  if token != settings.api_key:
76
77
  error_response = AnthropicErrorResponse(
@@ -83,12 +84,12 @@ def create_app(settings: Settings | None = None) -> FastAPI:
83
84
  status_code=401,
84
85
  content=error_response.model_dump(),
85
86
  )
86
-
87
+
87
88
  return await call_next(request)
88
-
89
+
89
90
  # Include routers
90
91
  app.include_router(router)
91
-
92
+
92
93
  # Exception handlers
93
94
  @app.exception_handler(HTTPException)
94
95
  async def http_exception_handler(request: Request, exc: HTTPException):
@@ -99,7 +100,7 @@ def create_app(settings: Settings | None = None) -> FastAPI:
99
100
  status_code=exc.status_code,
100
101
  content=exc.detail,
101
102
  )
102
-
103
+
103
104
  error_response = AnthropicErrorResponse(
104
105
  error=AnthropicError(
105
106
  type="api_error",
@@ -110,7 +111,7 @@ def create_app(settings: Settings | None = None) -> FastAPI:
110
111
  status_code=exc.status_code,
111
112
  content=error_response.model_dump(),
112
113
  )
113
-
114
+
114
115
  @app.exception_handler(Exception)
115
116
  async def general_exception_handler(request: Request, exc: Exception):
116
117
  """Handle unexpected exceptions."""
@@ -125,27 +126,12 @@ def create_app(settings: Settings | None = None) -> FastAPI:
125
126
  status_code=500,
126
127
  content=error_response.model_dump(),
127
128
  )
128
-
129
- return app
130
129
 
130
+ return app
131
131
 
132
- def main() -> None:
133
- """Main entry point."""
134
- # Parse arguments first (before loading settings)
135
- parser = argparse.ArgumentParser(
136
- prog="oa2a",
137
- description="A proxy server that converts Anthropic Messages API to OpenAI API",
138
- )
139
- parser.add_argument(
140
- "--version",
141
- action="version",
142
- version="%(prog)s 0.1.1",
143
- )
144
- args = parser.parse_args()
145
-
146
- # Load settings
147
- settings = get_settings()
148
132
 
133
+ def run_foreground(settings: Settings) -> None:
134
+ """Run server in foreground mode (blocking)."""
149
135
  # Validate required settings
150
136
  if not settings.openai_api_key:
151
137
  print(
@@ -159,11 +145,12 @@ def main() -> None:
159
145
 
160
146
  # Create app
161
147
  app = create_app(settings)
162
-
148
+
163
149
  # Run server
164
- print(f"Starting local-openai2anthropic server on {settings.host}:{settings.port}")
150
+ print(f"Starting server on {settings.host}:{settings.port}")
165
151
  print(f"Proxying to: {settings.openai_base_url}")
166
-
152
+ print("Press Ctrl+C to stop\n")
153
+
167
154
  uvicorn.run(
168
155
  app,
169
156
  host=settings.host,
@@ -173,5 +160,157 @@ def main() -> None:
173
160
  )
174
161
 
175
162
 
163
+ def main() -> None:
164
+ """Main entry point with subcommand support."""
165
+ # Create main parser
166
+ parser = argparse.ArgumentParser(
167
+ prog="oa2a",
168
+ description="A proxy server that converts Anthropic Messages API to OpenAI API",
169
+ formatter_class=argparse.RawDescriptionHelpFormatter,
170
+ epilog="""
171
+ Examples:
172
+ oa2a start # Start server in background
173
+ oa2a stop # Stop background server
174
+ oa2a restart # Restart background server
175
+ oa2a status # Check server status
176
+ oa2a logs # View server logs
177
+ oa2a logs -f # Follow server logs (tail -f)
178
+ oa2a # Run server in foreground (default behavior)
179
+ """.strip(),
180
+ )
181
+
182
+ parser.add_argument(
183
+ "--version",
184
+ action="version",
185
+ version="%(prog)s 0.2.0",
186
+ )
187
+
188
+ # Create subparsers for commands
189
+ subparsers = parser.add_subparsers(dest="command", help="Available commands")
190
+
191
+ # start command
192
+ start_parser = subparsers.add_parser("start", help="Start server in background")
193
+ start_parser.add_argument(
194
+ "--host",
195
+ default=None,
196
+ help="Server host (default: 0.0.0.0)",
197
+ )
198
+ start_parser.add_argument(
199
+ "--port",
200
+ type=int,
201
+ default=None,
202
+ help="Server port (default: 8080)",
203
+ )
204
+ start_parser.add_argument(
205
+ "--log-level",
206
+ default="info",
207
+ choices=["debug", "info", "warning", "error"],
208
+ help="Logging level (default: info)",
209
+ )
210
+
211
+ # stop command
212
+ stop_parser = subparsers.add_parser("stop", help="Stop background server")
213
+ stop_parser.add_argument(
214
+ "-f", "--force",
215
+ action="store_true",
216
+ help="Force kill the server",
217
+ )
218
+
219
+ # restart command
220
+ restart_parser = subparsers.add_parser("restart", help="Restart background server")
221
+ restart_parser.add_argument(
222
+ "--host",
223
+ default=None,
224
+ help="Server host (default: 0.0.0.0)",
225
+ )
226
+ restart_parser.add_argument(
227
+ "--port",
228
+ type=int,
229
+ default=None,
230
+ help="Server port (default: 8080)",
231
+ )
232
+ restart_parser.add_argument(
233
+ "--log-level",
234
+ default="info",
235
+ choices=["debug", "info", "warning", "error"],
236
+ help="Logging level (default: info)",
237
+ )
238
+
239
+ # status command
240
+ status_parser = subparsers.add_parser("status", help="Check server status")
241
+
242
+ # logs command
243
+ logs_parser = subparsers.add_parser("logs", help="View server logs")
244
+ logs_parser.add_argument(
245
+ "-f", "--follow",
246
+ action="store_true",
247
+ help="Follow log output (like tail -f)",
248
+ )
249
+ logs_parser.add_argument(
250
+ "-n", "--lines",
251
+ type=int,
252
+ default=50,
253
+ help="Number of lines to show (default: 50)",
254
+ )
255
+
256
+ args = parser.parse_args()
257
+
258
+ # Import daemon module only when needed
259
+ from local_openai2anthropic import daemon
260
+
261
+ # Handle subcommands
262
+ if args.command == "start":
263
+ # Get settings for defaults
264
+ settings = get_settings()
265
+ host = args.host or settings.host
266
+ port = args.port or settings.port
267
+
268
+ success = daemon.start_daemon(
269
+ host=host,
270
+ port=port,
271
+ log_level=args.log_level,
272
+ )
273
+ sys.exit(0 if success else 1)
274
+
275
+ elif args.command == "stop":
276
+ success = daemon.stop_daemon(force=args.force)
277
+ sys.exit(0 if success else 1)
278
+
279
+ elif args.command == "restart":
280
+ # Get settings for defaults
281
+ settings = get_settings()
282
+ host = args.host or settings.host
283
+ port = args.port or settings.port
284
+
285
+ success = daemon.restart_daemon(
286
+ host=host,
287
+ port=port,
288
+ log_level=args.log_level,
289
+ )
290
+ sys.exit(0 if success else 1)
291
+
292
+ elif args.command == "status":
293
+ running, pid, config = daemon.get_status()
294
+ if running and config:
295
+ host = config.get("host", "0.0.0.0")
296
+ port = config.get("port", 8080)
297
+ print(f"Server is running (PID: {pid})")
298
+ print(f"Listening on: {host}:{port}")
299
+ elif running:
300
+ print(f"Server is running (PID: {pid})")
301
+ else:
302
+ print("Server is not running")
303
+ sys.exit(0)
304
+
305
+ elif args.command == "logs":
306
+ success = daemon.show_logs(follow=args.follow, lines=args.lines)
307
+ sys.exit(0 if success else 1)
308
+
309
+ else:
310
+ # No command - run in foreground (original behavior)
311
+ settings = get_settings()
312
+ run_foreground(settings)
313
+
314
+
176
315
  if __name__ == "__main__":
177
316
  main()
@@ -148,6 +148,23 @@ async def _stream_response(
148
148
  finish_reason = choice["finish_reason"]
149
149
  continue
150
150
 
151
+ # Handle reasoning content (thinking)
152
+ if delta.get("reasoning_content"):
153
+ reasoning = delta["reasoning_content"]
154
+ # Start thinking content block if not already started
155
+ if not content_block_started or content_block_index == 0:
156
+ # We need a separate index for thinking block
157
+ # For simplicity, we treat thinking as a separate block before text
158
+ if content_block_started:
159
+ # Close previous block
160
+ yield f"event: content_block_stop\ndata: {json.dumps({'type': 'content_block_stop', 'index': content_block_index})}\n\n"
161
+ content_block_index += 1
162
+ yield f"event: content_block_start\ndata: {json.dumps({'type': 'content_block_start', 'index': content_block_index, 'content_block': {'type': 'thinking', 'thinking': ''}})}\n\n"
163
+ content_block_started = True
164
+
165
+ yield f"event: content_block_delta\ndata: {json.dumps({'type': 'content_block_delta', 'index': content_block_index, 'delta': {'type': 'thinking_delta', 'thinking': reasoning}})}\n\n"
166
+ continue
167
+
151
168
  # Handle content
152
169
  if delta.get("content"):
153
170
  if not content_block_started:
@@ -562,6 +579,7 @@ async def create_message(
562
579
  try:
563
580
  body_bytes = await request.body()
564
581
  body_json = json.loads(body_bytes.decode("utf-8"))
582
+ logger.info(f"Received body: {body_json}")
565
583
  anthropic_params = body_json
566
584
  except json.JSONDecodeError as e:
567
585
  logger.error(f"Invalid JSON in request body: {e}")
@@ -0,0 +1,351 @@
1
+ Metadata-Version: 2.4
2
+ Name: local-openai2anthropic
3
+ Version: 0.2.2
4
+ Summary: A lightweight proxy server that converts Anthropic Messages API to OpenAI API
5
+ Project-URL: Homepage, https://github.com/dongfangzan/local-openai2anthropic
6
+ Project-URL: Repository, https://github.com/dongfangzan/local-openai2anthropic
7
+ Project-URL: Issues, https://github.com/dongfangzan/local-openai2anthropic/issues
8
+ Author-email: dongfangzan <zsybook0124@163.com>
9
+ Maintainer-email: dongfangzan <zsybook0124@163.com>
10
+ License: Apache-2.0
11
+ License-File: LICENSE
12
+ Keywords: anthropic,api,claude,messages,openai,proxy
13
+ Classifier: Development Status :: 4 - Beta
14
+ Classifier: Intended Audience :: Developers
15
+ Classifier: License :: OSI Approved :: Apache Software License
16
+ Classifier: Programming Language :: Python :: 3
17
+ Classifier: Programming Language :: Python :: 3.12
18
+ Classifier: Programming Language :: Python :: 3.13
19
+ Classifier: Topic :: Software Development :: Libraries :: Python Modules
20
+ Requires-Python: >=3.12
21
+ Requires-Dist: anthropic>=0.30.0
22
+ Requires-Dist: fastapi>=0.100.0
23
+ Requires-Dist: httpx>=0.25.0
24
+ Requires-Dist: openai>=1.30.0
25
+ Requires-Dist: pydantic-settings>=2.0.0
26
+ Requires-Dist: pydantic>=2.0.0
27
+ Requires-Dist: uvicorn[standard]>=0.23.0
28
+ Provides-Extra: dev
29
+ Requires-Dist: black>=23.0.0; extra == 'dev'
30
+ Requires-Dist: mypy>=1.0.0; extra == 'dev'
31
+ Requires-Dist: pytest-asyncio>=0.21.0; extra == 'dev'
32
+ Requires-Dist: pytest>=7.0.0; extra == 'dev'
33
+ Requires-Dist: ruff>=0.1.0; extra == 'dev'
34
+ Description-Content-Type: text/markdown
35
+
36
+ # local-openai2anthropic
37
+
38
+ [![Python 3.12+](https://img.shields.io/badge/python-3.12+-blue.svg)](https://www.python.org/downloads/)
39
+ [![License: Apache 2.0](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0)
40
+ [![PyPI](https://img.shields.io/pypi/v/local-openai2anthropic.svg)](https://pypi.org/project/local-openai2anthropic/)
41
+
42
+ **English | [中文](README_zh.md)**
43
+
44
+ A lightweight proxy that lets applications built with [Claude SDK](https://github.com/anthropics/anthropic-sdk-python) talk to locally-hosted OpenAI-compatible LLMs.
45
+
46
+ ---
47
+
48
+ ## What Problem This Solves
49
+
50
+ Many local LLM tools (vLLM, SGLang, etc.) provide an OpenAI-compatible API. But if you've built your app using Anthropic's Claude SDK, you can't use them directly.
51
+
52
+ This proxy translates Claude SDK calls to OpenAI API format in real-time, enabling:
53
+
54
+ - **Local LLM inference** with Claude-based apps
55
+ - **Offline development** without cloud API costs
56
+ - **Privacy-first AI** - data never leaves your machine
57
+ - **Seamless model switching** between cloud and local
58
+
59
+ ---
60
+
61
+ ## Supported Local Backends
62
+
63
+ Currently tested and supported:
64
+
65
+ | Backend | Description | Status |
66
+ |---------|-------------|--------|
67
+ | [vLLM](https://github.com/vllm-project/vllm) | High-throughput LLM inference | ✅ Fully supported |
68
+ | [SGLang](https://github.com/sgl-project/sglang) | Fast structured language model serving | ✅ Fully supported |
69
+
70
+ Other OpenAI-compatible backends may work but are not fully tested.
71
+
72
+ ---
73
+
74
+ ## Quick Start
75
+
76
+ ### 1. Install
77
+
78
+ ```bash
79
+ pip install local-openai2anthropic
80
+ ```
81
+
82
+ ### 2. Start Your Local LLM Server
83
+
84
+ Example with vLLM:
85
+ ```bash
86
+ vllm serve meta-llama/Llama-2-7b-chat-hf
87
+ # vLLM starts OpenAI-compatible API at http://localhost:8000/v1
88
+ ```
89
+
90
+ Or with SGLang:
91
+ ```bash
92
+ sglang launch --model-path meta-llama/Llama-2-7b-chat-hf --port 8000
93
+ # SGLang starts at http://localhost:8000/v1
94
+ ```
95
+
96
+ ### 3. Start the Proxy
97
+
98
+ **Option A: Run in background (recommended)**
99
+
100
+ ```bash
101
+ export OA2A_OPENAI_BASE_URL=http://localhost:8000/v1 # Your local LLM endpoint
102
+ export OA2A_OPENAI_API_KEY=dummy # Any value, not used by local backends
103
+
104
+ oa2a start # Start server in background
105
+ # Server starts at http://localhost:8080
106
+
107
+ # View logs
108
+ oa2a logs # Show last 50 lines of logs
109
+ oa2a logs -f # Follow logs in real-time (Ctrl+C to exit)
110
+
111
+ # Check status
112
+ oa2a status # Check if server is running
113
+
114
+ # Stop server
115
+ oa2a stop # Stop background server
116
+
117
+ # Restart server
118
+ oa2a restart # Restart with same settings
119
+ ```
120
+
121
+ **Option B: Run in foreground**
122
+
123
+ ```bash
124
+ export OA2A_OPENAI_BASE_URL=http://localhost:8000/v1
125
+ export OA2A_OPENAI_API_KEY=dummy
126
+
127
+ oa2a # Run server in foreground (blocking)
128
+ # Press Ctrl+C to stop
129
+ ```
130
+
131
+ ### 4. Use in Your App
132
+
133
+ ```python
134
+ import anthropic
135
+
136
+ client = anthropic.Anthropic(
137
+ base_url="http://localhost:8080", # Point to proxy
138
+ api_key="dummy-key", # Not used
139
+ )
140
+
141
+ message = client.messages.create(
142
+ model="meta-llama/Llama-2-7b-chat-hf", # Your local model name
143
+ max_tokens=1024,
144
+ messages=[{"role": "user", "content": "Hello!"}],
145
+ )
146
+
147
+ print(message.content[0].text)
148
+ ```
149
+
150
+ ---
151
+
152
+ ## Using with Claude Code
153
+
154
+ You can configure [Claude Code](https://github.com/anthropics/claude-code) to use your local LLM through this proxy.
155
+
156
+ ### Configuration Steps
157
+
158
+ 1. **Create or edit Claude Code config file** at `~/.claude/CLAUDE.md`:
159
+
160
+ ```markdown
161
+ # Claude Code Configuration
162
+
163
+ ## API Settings
164
+
165
+ - Claude API Base URL: http://localhost:8080
166
+ - Claude API Key: dummy-key
167
+
168
+ ## Model Settings
169
+
170
+ Use model: meta-llama/Llama-2-7b-chat-hf # Your local model name
171
+ ```
172
+
173
+ 2. **Alternatively, set environment variables** before running Claude Code:
174
+
175
+ ```bash
176
+ export ANTHROPIC_BASE_URL=http://localhost:8080
177
+ export ANTHROPIC_API_KEY=dummy-key
178
+
179
+ claude
180
+ ```
181
+
182
+ 3. **Or use the `--api-key` and `--base-url` flags**:
183
+
184
+ ```bash
185
+ claude --api-key dummy-key --base-url http://localhost:8080
186
+ ```
187
+
188
+ ### Complete Workflow Example
189
+
190
+ Terminal 1 - Start your local LLM:
191
+ ```bash
192
+ vllm serve meta-llama/Llama-2-7b-chat-hf
193
+ ```
194
+
195
+ Terminal 2 - Start the proxy:
196
+ ```bash
197
+ export OA2A_OPENAI_BASE_URL=http://localhost:8000/v1
198
+ export OA2A_OPENAI_API_KEY=dummy
199
+ export OA2A_TAVILY_API_KEY="tvly-your-tavily-api-key" # Optional: enable web search
200
+
201
+ oa2a
202
+ ```
203
+
204
+ Terminal 3 - Launch Claude Code with local LLM:
205
+ ```bash
206
+ export ANTHROPIC_BASE_URL=http://localhost:8080
207
+ export ANTHROPIC_API_KEY=dummy-key
208
+
209
+ claude
210
+ ```
211
+
212
+ Now Claude Code will use your local LLM instead of the cloud API.
213
+
214
+ ---
215
+
216
+ ## Features
217
+
218
+ - ✅ **Streaming responses** - Real-time token streaming via SSE
219
+ - ✅ **Tool calling** - Local LLM function calling support
220
+ - ✅ **Vision models** - Multi-modal input for vision-capable models
221
+ - ✅ **Web Search** - Give your local LLM internet access (see below)
222
+ - ✅ **Thinking mode** - Supports reasoning/thinking model outputs
223
+
224
+ ---
225
+
226
+ ## Web Search Capability 🔍
227
+
228
+ **Bridge the gap: Give your local LLM the web search power that Claude Code users enjoy!**
229
+
230
+ When using locally-hosted models with Claude Code, you lose access to the built-in web search tool. This proxy fills that gap by providing a server-side web search implementation powered by [Tavily](https://tavily.com).
231
+
232
+ ### The Problem
233
+
234
+ | Scenario | Web Search Available? |
235
+ |----------|----------------------|
236
+ | Using Claude (cloud) in Claude Code | ✅ Built-in |
237
+ | Using local vLLM/SGLang in Claude Code | ❌ Not available |
238
+ | **Using this proxy + local LLM** | ✅ **Enabled via Tavily** |
239
+
240
+ ### How It Works
241
+
242
+ ```
243
+ Claude Code → Anthropic SDK → This Proxy → Local LLM
244
+
245
+ Tavily API (Web Search)
246
+ ```
247
+
248
+ The proxy intercepts `web_search_20250305` tool calls and handles them directly, regardless of whether your local model supports web search natively.
249
+
250
+ ### Setup Tavily Search
251
+
252
+ 1. **Get a free API key** at [tavily.com](https://tavily.com) - generous free tier available
253
+
254
+ 2. **Configure the proxy:**
255
+ ```bash
256
+ export OA2A_OPENAI_BASE_URL=http://localhost:8000/v1
257
+ export OA2A_OPENAI_API_KEY=dummy
258
+ export OA2A_TAVILY_API_KEY="tvly-your-tavily-api-key" # Enable web search
259
+
260
+ oa2a
261
+ ```
262
+
263
+ 3. **Use in your app:**
264
+ ```python
265
+ import anthropic
266
+
267
+ client = anthropic.Anthropic(
268
+ base_url="http://localhost:8080",
269
+ api_key="dummy-key",
270
+ )
271
+
272
+ message = client.messages.create(
273
+ model="meta-llama/Llama-2-7b-chat-hf",
274
+ max_tokens=1024,
275
+ tools=[
276
+ {
277
+ "name": "web_search_20250305",
278
+ "description": "Search the web for current information",
279
+ "input_schema": {
280
+ "type": "object",
281
+ "properties": {
282
+ "query": {"type": "string", "description": "Search query"},
283
+ },
284
+ "required": ["query"],
285
+ },
286
+ }
287
+ ],
288
+ messages=[{"role": "user", "content": "What happened in AI today?"}],
289
+ )
290
+
291
+ if message.stop_reason == "tool_use":
292
+ tool_use = message.content[-1]
293
+ print(f"Searching: {tool_use.input}")
294
+ # The proxy automatically calls Tavily and returns results
295
+ ```
296
+
297
+ ### Tavily Configuration Options
298
+
299
+ | Variable | Default | Description |
300
+ |----------|---------|-------------|
301
+ | `OA2A_TAVILY_API_KEY` | - | Your Tavily API key ([get free at tavily.com](https://tavily.com)) |
302
+ | `OA2A_TAVILY_MAX_RESULTS` | 5 | Number of search results to return |
303
+ | `OA2A_TAVILY_TIMEOUT` | 30 | Search timeout in seconds |
304
+ | `OA2A_WEBSEARCH_MAX_USES` | 5 | Max search calls per request |
305
+
306
+ ---
307
+
308
+ ## Configuration
309
+
310
+ | Variable | Required | Default | Description |
311
+ |----------|----------|---------|-------------|
312
+ | `OA2A_OPENAI_BASE_URL` | ✅ | - | Your local LLM's OpenAI-compatible endpoint |
313
+ | `OA2A_OPENAI_API_KEY` | ✅ | - | Any value (local backends usually ignore this) |
314
+ | `OA2A_PORT` | ❌ | 8080 | Proxy server port |
315
+ | `OA2A_HOST` | ❌ | 0.0.0.0 | Proxy server host |
316
+ | `OA2A_TAVILY_API_KEY` | ❌ | - | Enable web search ([tavily.com](https://tavily.com)) |
317
+
318
+ ---
319
+
320
+ ## Architecture
321
+
322
+ ```
323
+ Your App (Claude SDK)
324
+
325
+
326
+ ┌─────────────────────┐
327
+ │ local-openai2anthropic │ ← This proxy
328
+ │ (Port 8080) │
329
+ └─────────────────────┘
330
+
331
+
332
+ Your Local LLM Server
333
+ (vLLM / SGLang)
334
+ (OpenAI-compatible API)
335
+ ```
336
+
337
+ ---
338
+
339
+ ## Development
340
+
341
+ ```bash
342
+ git clone https://github.com/dongfangzan/local-openai2anthropic.git
343
+ cd local-openai2anthropic
344
+ pip install -e ".[dev]"
345
+
346
+ pytest
347
+ ```
348
+
349
+ ## License
350
+
351
+ Apache License 2.0
@@ -0,0 +1,18 @@
1
+ local_openai2anthropic/__init__.py,sha256=jgIoIwQXIXS83WbRUx2CF1x0A8DloLduoUIUGXwWhSU,1059
2
+ local_openai2anthropic/__main__.py,sha256=K21u5u7FN8-DbO67TT_XDF0neGqJeFrVNkteRauCRQk,179
3
+ local_openai2anthropic/config.py,sha256=jkPqZZ_uaEjG9uOTEyLnrJS74VVONJdKvgab2XzMTDs,1917
4
+ local_openai2anthropic/converter.py,sha256=u5YaeuOZZfcx4vAxAyXS52TKDory9w_nVwkOhEOBme4,22757
5
+ local_openai2anthropic/daemon.py,sha256=pZnRojGFcuIpR8yLDNjV-b0LJRBVhgRAa-dKeRRse44,10017
6
+ local_openai2anthropic/daemon_runner.py,sha256=rguOH0PgpbjqNsKYei0uCQX8JQOQ1wmtQH1CtW95Dbw,3274
7
+ local_openai2anthropic/main.py,sha256=5tdgPel8RSCn1iK0d7hYAmcTM9vYHlepgQujaEXA2ic,9866
8
+ local_openai2anthropic/protocol.py,sha256=vUEgxtRPFll6jEtLc4DyxTLCBjrWIEScZXhEqe4uibk,5185
9
+ local_openai2anthropic/router.py,sha256=xgZiH7Nnb74OJ2_PSIfO9oOgVtUG4akEciUAuuJozJ4,32673
10
+ local_openai2anthropic/tavily_client.py,sha256=QsBhnyF8BFWPAxB4XtWCCpHCquNL5SW93-zjTTi4Meg,3774
11
+ local_openai2anthropic/server_tools/__init__.py,sha256=QlJfjEta-HOCtLe7NaY_fpbEKv-ZpInjAnfmSqE9tbk,615
12
+ local_openai2anthropic/server_tools/base.py,sha256=pNFsv-jSgxVrkY004AHAcYMNZgVSO8ZOeCzQBUtQ3vU,5633
13
+ local_openai2anthropic/server_tools/web_search.py,sha256=1C7lX_cm-tMaN3MsCjinEZYPJc_Hj4yAxYay9h8Zbvs,6543
14
+ local_openai2anthropic-0.2.2.dist-info/METADATA,sha256=1LDwStgAm9IYC4MV52b9VxgjNQCY8eR2gUd2Ini7ET8,10040
15
+ local_openai2anthropic-0.2.2.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
16
+ local_openai2anthropic-0.2.2.dist-info/entry_points.txt,sha256=hdc9tSJUNxyNLXcTYye5SuD2K0bEQhxBhGnWTFup6ZM,116
17
+ local_openai2anthropic-0.2.2.dist-info/licenses/LICENSE,sha256=X3_kZy3lJvd_xp8IeyUcIAO2Y367MXZc6aaRx8BYR_s,11369
18
+ local_openai2anthropic-0.2.2.dist-info/RECORD,,