local-openai2anthropic 0.2.5__tar.gz → 0.2.7__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (33) hide show
  1. {local_openai2anthropic-0.2.5 → local_openai2anthropic-0.2.7}/PKG-INFO +49 -28
  2. {local_openai2anthropic-0.2.5 → local_openai2anthropic-0.2.7}/README.md +48 -27
  3. {local_openai2anthropic-0.2.5 → local_openai2anthropic-0.2.7}/README_zh.md +47 -26
  4. {local_openai2anthropic-0.2.5 → local_openai2anthropic-0.2.7}/pyproject.toml +1 -1
  5. {local_openai2anthropic-0.2.5 → local_openai2anthropic-0.2.7}/src/local_openai2anthropic/config.py +7 -6
  6. {local_openai2anthropic-0.2.5 → local_openai2anthropic-0.2.7}/src/local_openai2anthropic/main.py +2 -2
  7. {local_openai2anthropic-0.2.5 → local_openai2anthropic-0.2.7}/.github/workflows/publish.yml +0 -0
  8. {local_openai2anthropic-0.2.5 → local_openai2anthropic-0.2.7}/.gitignore +0 -0
  9. {local_openai2anthropic-0.2.5 → local_openai2anthropic-0.2.7}/LICENSE +0 -0
  10. {local_openai2anthropic-0.2.5 → local_openai2anthropic-0.2.7}/examples/basic_chat.py +0 -0
  11. {local_openai2anthropic-0.2.5 → local_openai2anthropic-0.2.7}/examples/streaming.py +0 -0
  12. {local_openai2anthropic-0.2.5 → local_openai2anthropic-0.2.7}/examples/thinking_mode.py +0 -0
  13. {local_openai2anthropic-0.2.5 → local_openai2anthropic-0.2.7}/examples/tool_calling.py +0 -0
  14. {local_openai2anthropic-0.2.5 → local_openai2anthropic-0.2.7}/examples/vision.py +0 -0
  15. {local_openai2anthropic-0.2.5 → local_openai2anthropic-0.2.7}/examples/web_search.py +0 -0
  16. {local_openai2anthropic-0.2.5 → local_openai2anthropic-0.2.7}/src/local_openai2anthropic/__init__.py +0 -0
  17. {local_openai2anthropic-0.2.5 → local_openai2anthropic-0.2.7}/src/local_openai2anthropic/__main__.py +0 -0
  18. {local_openai2anthropic-0.2.5 → local_openai2anthropic-0.2.7}/src/local_openai2anthropic/converter.py +0 -0
  19. {local_openai2anthropic-0.2.5 → local_openai2anthropic-0.2.7}/src/local_openai2anthropic/daemon.py +0 -0
  20. {local_openai2anthropic-0.2.5 → local_openai2anthropic-0.2.7}/src/local_openai2anthropic/daemon_runner.py +0 -0
  21. {local_openai2anthropic-0.2.5 → local_openai2anthropic-0.2.7}/src/local_openai2anthropic/openai_types.py +0 -0
  22. {local_openai2anthropic-0.2.5 → local_openai2anthropic-0.2.7}/src/local_openai2anthropic/protocol.py +0 -0
  23. {local_openai2anthropic-0.2.5 → local_openai2anthropic-0.2.7}/src/local_openai2anthropic/router.py +0 -0
  24. {local_openai2anthropic-0.2.5 → local_openai2anthropic-0.2.7}/src/local_openai2anthropic/server_tools/__init__.py +0 -0
  25. {local_openai2anthropic-0.2.5 → local_openai2anthropic-0.2.7}/src/local_openai2anthropic/server_tools/base.py +0 -0
  26. {local_openai2anthropic-0.2.5 → local_openai2anthropic-0.2.7}/src/local_openai2anthropic/server_tools/web_search.py +0 -0
  27. {local_openai2anthropic-0.2.5 → local_openai2anthropic-0.2.7}/src/local_openai2anthropic/tavily_client.py +0 -0
  28. {local_openai2anthropic-0.2.5 → local_openai2anthropic-0.2.7}/tests/__init__.py +0 -0
  29. {local_openai2anthropic-0.2.5 → local_openai2anthropic-0.2.7}/tests/test_converter.py +0 -0
  30. {local_openai2anthropic-0.2.5 → local_openai2anthropic-0.2.7}/tests/test_integration.py +0 -0
  31. {local_openai2anthropic-0.2.5 → local_openai2anthropic-0.2.7}/tests/test_router.py +0 -0
  32. {local_openai2anthropic-0.2.5 → local_openai2anthropic-0.2.7}/tests/test_upstream.sh +0 -0
  33. {local_openai2anthropic-0.2.5 → local_openai2anthropic-0.2.7}/uv.lock +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: local-openai2anthropic
3
- Version: 0.2.5
3
+ Version: 0.2.7
4
4
  Summary: A lightweight proxy server that converts Anthropic Messages API to OpenAI API
5
5
  Project-URL: Homepage, https://github.com/dongfangzan/local-openai2anthropic
6
6
  Project-URL: Repository, https://github.com/dongfangzan/local-openai2anthropic
@@ -80,7 +80,11 @@ Other OpenAI-compatible backends may work but are not fully tested.
80
80
  pip install local-openai2anthropic
81
81
  ```
82
82
 
83
- ### 2. Start Your Local LLM Server
83
+ ### 2. Configure Your LLM Backend (Optional)
84
+
85
+ **Option A: Start a local LLM server**
86
+
87
+ If you don't have an LLM server running, you can start one locally:
84
88
 
85
89
  Example with vLLM:
86
90
  ```bash
@@ -94,6 +98,16 @@ sglang launch --model-path meta-llama/Llama-2-7b-chat-hf --port 8000
94
98
  # SGLang starts at http://localhost:8000/v1
95
99
  ```
96
100
 
101
+ **Option B: Use an existing OpenAI-compatible API**
102
+
103
+ If you already have a deployed OpenAI-compatible API (local or remote), you can use it directly. Just note the base URL for the next step.
104
+
105
+ Examples:
106
+ - Local vLLM/SGLang: `http://localhost:8000/v1`
107
+ - Remote API: `https://api.example.com/v1`
108
+
109
+ > **Note:** If you're using [Ollama](https://ollama.com), it natively supports the Anthropic API format, so you don't need this proxy. Just point your Claude SDK directly to `http://localhost:11434/v1`.
110
+
97
111
  ### 3. Start the Proxy
98
112
 
99
113
  **Option A: Run in background (recommended)**
@@ -156,22 +170,31 @@ You can configure [Claude Code](https://github.com/anthropics/claude-code) to us
156
170
 
157
171
  ### Configuration Steps
158
172
 
159
- 1. **Create or edit Claude Code config file** at `~/.claude/CLAUDE.md`:
160
-
161
- ```markdown
162
- # Claude Code Configuration
163
-
164
- ## API Settings
165
-
166
- - Claude API Base URL: http://localhost:8080
167
- - Claude API Key: dummy-key
168
-
169
- ## Model Settings
170
-
171
- Use model: meta-llama/Llama-2-7b-chat-hf # Your local model name
173
+ 1. **Edit Claude Code config file** at `~/.claude/settings.json`:
174
+
175
+ ```json
176
+ {
177
+ "env": {
178
+ "ANTHROPIC_BASE_URL": "http://localhost:8080",
179
+ "ANTHROPIC_API_KEY": "dummy-key",
180
+ "ANTHROPIC_MODEL": "meta-llama/Llama-2-7b-chat-hf",
181
+ "ANTHROPIC_DEFAULT_SONNET_MODEL": "meta-llama/Llama-2-7b-chat-hf",
182
+ "ANTHROPIC_DEFAULT_OPUS_MODEL": "meta-llama/Llama-2-7b-chat-hf",
183
+ "ANTHROPIC_DEFAULT_HAIKU_MODEL": "meta-llama/Llama-2-7b-chat-hf",
184
+ "ANTHROPIC_REASONING_MODEL": "meta-llama/Llama-2-7b-chat-hf"
185
+ }
186
+ }
172
187
  ```
173
188
 
174
- 2. **Alternatively, set environment variables** before running Claude Code:
189
+ | Variable | Description |
190
+ |----------|-------------|
191
+ | `ANTHROPIC_MODEL` | General model setting |
192
+ | `ANTHROPIC_DEFAULT_SONNET_MODEL` | Default model for Sonnet mode (Claude Code default) |
193
+ | `ANTHROPIC_DEFAULT_OPUS_MODEL` | Default model for Opus mode |
194
+ | `ANTHROPIC_DEFAULT_HAIKU_MODEL` | Default model for Haiku mode |
195
+ | `ANTHROPIC_REASONING_MODEL` | Default model for reasoning tasks |
196
+
197
+ 2. **Or set environment variables** before running Claude Code:
175
198
 
176
199
  ```bash
177
200
  export ANTHROPIC_BASE_URL=http://localhost:8080
@@ -180,38 +203,36 @@ export ANTHROPIC_API_KEY=dummy-key
180
203
  claude
181
204
  ```
182
205
 
183
- 3. **Or use the `--api-key` and `--base-url` flags**:
184
-
185
- ```bash
186
- claude --api-key dummy-key --base-url http://localhost:8080
187
- ```
188
-
189
206
  ### Complete Workflow Example
190
207
 
208
+ Make sure `~/.claude/settings.json` is configured as described above.
209
+
191
210
  Terminal 1 - Start your local LLM:
192
211
  ```bash
193
212
  vllm serve meta-llama/Llama-2-7b-chat-hf
194
213
  ```
195
214
 
196
- Terminal 2 - Start the proxy:
215
+ Terminal 2 - Start the proxy (background mode):
197
216
  ```bash
198
217
  export OA2A_OPENAI_BASE_URL=http://localhost:8000/v1
199
218
  export OA2A_OPENAI_API_KEY=dummy
200
219
  export OA2A_TAVILY_API_KEY="tvly-your-tavily-api-key" # Optional: enable web search
201
220
 
202
- oa2a
221
+ oa2a start
203
222
  ```
204
223
 
205
- Terminal 3 - Launch Claude Code with local LLM:
224
+ Terminal 3 - Launch Claude Code:
206
225
  ```bash
207
- export ANTHROPIC_BASE_URL=http://localhost:8080
208
- export ANTHROPIC_API_KEY=dummy-key
209
-
210
226
  claude
211
227
  ```
212
228
 
213
229
  Now Claude Code will use your local LLM instead of the cloud API.
214
230
 
231
+ To stop the proxy:
232
+ ```bash
233
+ oa2a stop
234
+ ```
235
+
215
236
  ---
216
237
 
217
238
  ## Features
@@ -45,7 +45,11 @@ Other OpenAI-compatible backends may work but are not fully tested.
45
45
  pip install local-openai2anthropic
46
46
  ```
47
47
 
48
- ### 2. Start Your Local LLM Server
48
+ ### 2. Configure Your LLM Backend (Optional)
49
+
50
+ **Option A: Start a local LLM server**
51
+
52
+ If you don't have an LLM server running, you can start one locally:
49
53
 
50
54
  Example with vLLM:
51
55
  ```bash
@@ -59,6 +63,16 @@ sglang launch --model-path meta-llama/Llama-2-7b-chat-hf --port 8000
59
63
  # SGLang starts at http://localhost:8000/v1
60
64
  ```
61
65
 
66
+ **Option B: Use an existing OpenAI-compatible API**
67
+
68
+ If you already have a deployed OpenAI-compatible API (local or remote), you can use it directly. Just note the base URL for the next step.
69
+
70
+ Examples:
71
+ - Local vLLM/SGLang: `http://localhost:8000/v1`
72
+ - Remote API: `https://api.example.com/v1`
73
+
74
+ > **Note:** If you're using [Ollama](https://ollama.com), it natively supports the Anthropic API format, so you don't need this proxy. Just point your Claude SDK directly to `http://localhost:11434/v1`.
75
+
62
76
  ### 3. Start the Proxy
63
77
 
64
78
  **Option A: Run in background (recommended)**
@@ -121,22 +135,31 @@ You can configure [Claude Code](https://github.com/anthropics/claude-code) to us
121
135
 
122
136
  ### Configuration Steps
123
137
 
124
- 1. **Create or edit Claude Code config file** at `~/.claude/CLAUDE.md`:
125
-
126
- ```markdown
127
- # Claude Code Configuration
128
-
129
- ## API Settings
130
-
131
- - Claude API Base URL: http://localhost:8080
132
- - Claude API Key: dummy-key
133
-
134
- ## Model Settings
135
-
136
- Use model: meta-llama/Llama-2-7b-chat-hf # Your local model name
138
+ 1. **Edit Claude Code config file** at `~/.claude/settings.json`:
139
+
140
+ ```json
141
+ {
142
+ "env": {
143
+ "ANTHROPIC_BASE_URL": "http://localhost:8080",
144
+ "ANTHROPIC_API_KEY": "dummy-key",
145
+ "ANTHROPIC_MODEL": "meta-llama/Llama-2-7b-chat-hf",
146
+ "ANTHROPIC_DEFAULT_SONNET_MODEL": "meta-llama/Llama-2-7b-chat-hf",
147
+ "ANTHROPIC_DEFAULT_OPUS_MODEL": "meta-llama/Llama-2-7b-chat-hf",
148
+ "ANTHROPIC_DEFAULT_HAIKU_MODEL": "meta-llama/Llama-2-7b-chat-hf",
149
+ "ANTHROPIC_REASONING_MODEL": "meta-llama/Llama-2-7b-chat-hf"
150
+ }
151
+ }
137
152
  ```
138
153
 
139
- 2. **Alternatively, set environment variables** before running Claude Code:
154
+ | Variable | Description |
155
+ |----------|-------------|
156
+ | `ANTHROPIC_MODEL` | General model setting |
157
+ | `ANTHROPIC_DEFAULT_SONNET_MODEL` | Default model for Sonnet mode (Claude Code default) |
158
+ | `ANTHROPIC_DEFAULT_OPUS_MODEL` | Default model for Opus mode |
159
+ | `ANTHROPIC_DEFAULT_HAIKU_MODEL` | Default model for Haiku mode |
160
+ | `ANTHROPIC_REASONING_MODEL` | Default model for reasoning tasks |
161
+
162
+ 2. **Or set environment variables** before running Claude Code:
140
163
 
141
164
  ```bash
142
165
  export ANTHROPIC_BASE_URL=http://localhost:8080
@@ -145,38 +168,36 @@ export ANTHROPIC_API_KEY=dummy-key
145
168
  claude
146
169
  ```
147
170
 
148
- 3. **Or use the `--api-key` and `--base-url` flags**:
149
-
150
- ```bash
151
- claude --api-key dummy-key --base-url http://localhost:8080
152
- ```
153
-
154
171
  ### Complete Workflow Example
155
172
 
173
+ Make sure `~/.claude/settings.json` is configured as described above.
174
+
156
175
  Terminal 1 - Start your local LLM:
157
176
  ```bash
158
177
  vllm serve meta-llama/Llama-2-7b-chat-hf
159
178
  ```
160
179
 
161
- Terminal 2 - Start the proxy:
180
+ Terminal 2 - Start the proxy (background mode):
162
181
  ```bash
163
182
  export OA2A_OPENAI_BASE_URL=http://localhost:8000/v1
164
183
  export OA2A_OPENAI_API_KEY=dummy
165
184
  export OA2A_TAVILY_API_KEY="tvly-your-tavily-api-key" # Optional: enable web search
166
185
 
167
- oa2a
186
+ oa2a start
168
187
  ```
169
188
 
170
- Terminal 3 - Launch Claude Code with local LLM:
189
+ Terminal 3 - Launch Claude Code:
171
190
  ```bash
172
- export ANTHROPIC_BASE_URL=http://localhost:8080
173
- export ANTHROPIC_API_KEY=dummy-key
174
-
175
191
  claude
176
192
  ```
177
193
 
178
194
  Now Claude Code will use your local LLM instead of the cloud API.
179
195
 
196
+ To stop the proxy:
197
+ ```bash
198
+ oa2a stop
199
+ ```
200
+
180
201
  ---
181
202
 
182
203
  ## Features
@@ -45,7 +45,11 @@
45
45
  pip install local-openai2anthropic
46
46
  ```
47
47
 
48
- ### 2. 启动本地模型服务
48
+ ### 2. 配置你的 LLM 后端(可选)
49
+
50
+ **选项 A:启动本地模型服务**
51
+
52
+ 如果你还没有运行 LLM 服务,可以在本地启动一个:
49
53
 
50
54
  使用 vLLM 示例:
51
55
  ```bash
@@ -59,6 +63,16 @@ sglang launch --model-path meta-llama/Llama-2-7b-chat-hf --port 8000
59
63
  # SGLang 在 http://localhost:8000/v1 启动
60
64
  ```
61
65
 
66
+ **选项 B:使用已有的 OpenAI 兼容 API**
67
+
68
+ 如果你已经部署了 OpenAI 兼容的 API(本地或远程),可以直接使用。记下 base URL 用于下一步。
69
+
70
+ 示例:
71
+ - 本地 vLLM/SGLang:`http://localhost:8000/v1`
72
+ - 远程 API:`https://api.example.com/v1`
73
+
74
+ > **注意:** 如果你使用 [Ollama](https://ollama.com),它原生支持 Anthropic API 格式,无需使用本代理工具。直接将 Claude SDK 指向 `http://localhost:11434/v1` 即可。
75
+
62
76
  ### 3. 启动代理
63
77
 
64
78
  **方式 A: 后台运行(推荐)**
@@ -121,21 +135,30 @@ print(message.content[0].text)
121
135
 
122
136
  ### 配置步骤
123
137
 
124
- 1. **创建或编辑 Claude Code 配置文件** `~/.claude/CLAUDE.md`:
125
-
126
- ```markdown
127
- # Claude Code 配置
128
-
129
- ## API 设置
130
-
131
- - Claude API Base URL: http://localhost:8080
132
- - Claude API Key: dummy-key
133
-
134
- ## 模型设置
135
-
136
- Use model: meta-llama/Llama-2-7b-chat-hf # 你的本地模型名称
138
+ 1. **编辑 Claude Code 配置文件** `~/.claude/settings.json`:
139
+
140
+ ```json
141
+ {
142
+ "env": {
143
+ "ANTHROPIC_BASE_URL": "http://localhost:8080",
144
+ "ANTHROPIC_API_KEY": "dummy-key",
145
+ "ANTHROPIC_MODEL": "meta-llama/Llama-2-7b-chat-hf",
146
+ "ANTHROPIC_DEFAULT_SONNET_MODEL": "meta-llama/Llama-2-7b-chat-hf",
147
+ "ANTHROPIC_DEFAULT_OPUS_MODEL": "meta-llama/Llama-2-7b-chat-hf",
148
+ "ANTHROPIC_DEFAULT_HAIKU_MODEL": "meta-llama/Llama-2-7b-chat-hf",
149
+ "ANTHROPIC_REASONING_MODEL": "meta-llama/Llama-2-7b-chat-hf"
150
+ }
151
+ }
137
152
  ```
138
153
 
154
+ | 变量 | 说明 |
155
+ |------|------|
156
+ | `ANTHROPIC_MODEL` | 通用模型配置 |
157
+ | `ANTHROPIC_DEFAULT_SONNET_MODEL` | Sonnet 模式默认模型(Claude Code 默认使用) |
158
+ | `ANTHROPIC_DEFAULT_OPUS_MODEL` | Opus 模式默认模型 |
159
+ | `ANTHROPIC_DEFAULT_HAIKU_MODEL` | Haiku 模式默认模型 |
160
+ | `ANTHROPIC_REASONING_MODEL` | 推理任务默认模型 |
161
+
139
162
  2. **或者在运行 Claude Code 前设置环境变量**:
140
163
 
141
164
  ```bash
@@ -145,38 +168,36 @@ export ANTHROPIC_API_KEY=dummy-key
145
168
  claude
146
169
  ```
147
170
 
148
- 3. **也可以使用 `--api-key` 和 `--base-url` 参数**:
149
-
150
- ```bash
151
- claude --api-key dummy-key --base-url http://localhost:8080
152
- ```
153
-
154
171
  ### 完整工作流示例
155
172
 
173
+ 确保 `~/.claude/settings.json` 已按上述步骤配置好。
174
+
156
175
  终端 1 - 启动本地模型:
157
176
  ```bash
158
177
  vllm serve meta-llama/Llama-2-7b-chat-hf
159
178
  ```
160
179
 
161
- 终端 2 - 启动代理:
180
+ 终端 2 - 启动代理(后台运行):
162
181
  ```bash
163
182
  export OA2A_OPENAI_BASE_URL=http://localhost:8000/v1
164
183
  export OA2A_OPENAI_API_KEY=dummy
165
184
  export OA2A_TAVILY_API_KEY="tvly-your-tavily-api-key" # 可选:启用网页搜索
166
185
 
167
- oa2a
186
+ oa2a start
168
187
  ```
169
188
 
170
- 终端 3 - 启动 Claude Code 并使用本地模型:
189
+ 终端 3 - 启动 Claude Code
171
190
  ```bash
172
- export ANTHROPIC_BASE_URL=http://localhost:8080
173
- export ANTHROPIC_API_KEY=dummy-key
174
-
175
191
  claude
176
192
  ```
177
193
 
178
194
  现在 Claude Code 将使用你的本地大模型,而不是云端 API。
179
195
 
196
+ 如需停止代理:
197
+ ```bash
198
+ oa2a stop
199
+ ```
200
+
180
201
  ---
181
202
 
182
203
  ## 功能特性
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "local-openai2anthropic"
3
- version = "0.2.5"
3
+ version = "0.2.7"
4
4
  description = "A lightweight proxy server that converts Anthropic Messages API to OpenAI API"
5
5
  readme = "README.md"
6
6
  license = { text = "Apache-2.0" }
@@ -11,34 +11,35 @@ from pydantic_settings import BaseSettings, SettingsConfigDict
11
11
 
12
12
  class Settings(BaseSettings):
13
13
  """Application settings loaded from environment variables."""
14
-
14
+
15
15
  model_config = SettingsConfigDict(
16
16
  env_prefix="OA2A_", # OpenAI-to-Anthropic prefix
17
17
  env_file=".env",
18
18
  env_file_encoding="utf-8",
19
19
  case_sensitive=False,
20
+ extra="ignore",
20
21
  )
21
-
22
+
22
23
  # OpenAI API Configuration
23
24
  openai_api_key: Optional[str] = None
24
25
  openai_base_url: str = "https://api.openai.com/v1"
25
26
  openai_org_id: Optional[str] = None
26
27
  openai_project_id: Optional[str] = None
27
-
28
+
28
29
  # Server Configuration
29
30
  host: str = "0.0.0.0"
30
31
  port: int = 8080
31
32
  request_timeout: float = 300.0 # 5 minutes
32
-
33
+
33
34
  # API Key for authenticating requests to this server (optional)
34
35
  api_key: Optional[str] = None
35
-
36
+
36
37
  # CORS settings
37
38
  cors_origins: list[str] = ["*"]
38
39
  cors_credentials: bool = True
39
40
  cors_methods: list[str] = ["*"]
40
41
  cors_headers: list[str] = ["*"]
41
-
42
+
42
43
  # Logging
43
44
  log_level: str = "DEBUG"
44
45
 
@@ -32,7 +32,7 @@ def create_app(settings: Settings | None = None) -> FastAPI:
32
32
  app = FastAPI(
33
33
  title="local-openai2anthropic",
34
34
  description="A proxy server that converts Anthropic Messages API to OpenAI API",
35
- version="0.2.0",
35
+ version="0.2.5",
36
36
  docs_url="/docs",
37
37
  redoc_url="/redoc",
38
38
  )
@@ -182,7 +182,7 @@ Examples:
182
182
  parser.add_argument(
183
183
  "--version",
184
184
  action="version",
185
- version="%(prog)s 0.2.0",
185
+ version="%(prog)s 0.2.5",
186
186
  )
187
187
 
188
188
  # Create subparsers for commands