local-openai2anthropic 0.2.4__tar.gz → 0.2.7__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (33) hide show
  1. {local_openai2anthropic-0.2.4 → local_openai2anthropic-0.2.7}/PKG-INFO +50 -28
  2. {local_openai2anthropic-0.2.4 → local_openai2anthropic-0.2.7}/README.md +49 -27
  3. {local_openai2anthropic-0.2.4 → local_openai2anthropic-0.2.7}/README_zh.md +48 -26
  4. {local_openai2anthropic-0.2.4 → local_openai2anthropic-0.2.7}/pyproject.toml +1 -1
  5. {local_openai2anthropic-0.2.4 → local_openai2anthropic-0.2.7}/src/local_openai2anthropic/__init__.py +1 -1
  6. {local_openai2anthropic-0.2.4 → local_openai2anthropic-0.2.7}/src/local_openai2anthropic/config.py +7 -6
  7. {local_openai2anthropic-0.2.4 → local_openai2anthropic-0.2.7}/src/local_openai2anthropic/converter.py +2 -2
  8. {local_openai2anthropic-0.2.4 → local_openai2anthropic-0.2.7}/src/local_openai2anthropic/main.py +2 -2
  9. {local_openai2anthropic-0.2.4 → local_openai2anthropic-0.2.7}/src/local_openai2anthropic/router.py +2 -2
  10. {local_openai2anthropic-0.2.4 → local_openai2anthropic-0.2.7}/.github/workflows/publish.yml +0 -0
  11. {local_openai2anthropic-0.2.4 → local_openai2anthropic-0.2.7}/.gitignore +0 -0
  12. {local_openai2anthropic-0.2.4 → local_openai2anthropic-0.2.7}/LICENSE +0 -0
  13. {local_openai2anthropic-0.2.4 → local_openai2anthropic-0.2.7}/examples/basic_chat.py +0 -0
  14. {local_openai2anthropic-0.2.4 → local_openai2anthropic-0.2.7}/examples/streaming.py +0 -0
  15. {local_openai2anthropic-0.2.4 → local_openai2anthropic-0.2.7}/examples/thinking_mode.py +0 -0
  16. {local_openai2anthropic-0.2.4 → local_openai2anthropic-0.2.7}/examples/tool_calling.py +0 -0
  17. {local_openai2anthropic-0.2.4 → local_openai2anthropic-0.2.7}/examples/vision.py +0 -0
  18. {local_openai2anthropic-0.2.4 → local_openai2anthropic-0.2.7}/examples/web_search.py +0 -0
  19. {local_openai2anthropic-0.2.4 → local_openai2anthropic-0.2.7}/src/local_openai2anthropic/__main__.py +0 -0
  20. {local_openai2anthropic-0.2.4 → local_openai2anthropic-0.2.7}/src/local_openai2anthropic/daemon.py +0 -0
  21. {local_openai2anthropic-0.2.4 → local_openai2anthropic-0.2.7}/src/local_openai2anthropic/daemon_runner.py +0 -0
  22. {local_openai2anthropic-0.2.4 → local_openai2anthropic-0.2.7}/src/local_openai2anthropic/openai_types.py +0 -0
  23. {local_openai2anthropic-0.2.4 → local_openai2anthropic-0.2.7}/src/local_openai2anthropic/protocol.py +0 -0
  24. {local_openai2anthropic-0.2.4 → local_openai2anthropic-0.2.7}/src/local_openai2anthropic/server_tools/__init__.py +0 -0
  25. {local_openai2anthropic-0.2.4 → local_openai2anthropic-0.2.7}/src/local_openai2anthropic/server_tools/base.py +0 -0
  26. {local_openai2anthropic-0.2.4 → local_openai2anthropic-0.2.7}/src/local_openai2anthropic/server_tools/web_search.py +0 -0
  27. {local_openai2anthropic-0.2.4 → local_openai2anthropic-0.2.7}/src/local_openai2anthropic/tavily_client.py +0 -0
  28. {local_openai2anthropic-0.2.4 → local_openai2anthropic-0.2.7}/tests/__init__.py +0 -0
  29. {local_openai2anthropic-0.2.4 → local_openai2anthropic-0.2.7}/tests/test_converter.py +0 -0
  30. {local_openai2anthropic-0.2.4 → local_openai2anthropic-0.2.7}/tests/test_integration.py +0 -0
  31. {local_openai2anthropic-0.2.4 → local_openai2anthropic-0.2.7}/tests/test_router.py +0 -0
  32. {local_openai2anthropic-0.2.4 → local_openai2anthropic-0.2.7}/tests/test_upstream.sh +0 -0
  33. {local_openai2anthropic-0.2.4 → local_openai2anthropic-0.2.7}/uv.lock +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: local-openai2anthropic
3
- Version: 0.2.4
3
+ Version: 0.2.7
4
4
  Summary: A lightweight proxy server that converts Anthropic Messages API to OpenAI API
5
5
  Project-URL: Homepage, https://github.com/dongfangzan/local-openai2anthropic
6
6
  Project-URL: Repository, https://github.com/dongfangzan/local-openai2anthropic
@@ -55,6 +55,7 @@ This proxy translates Claude SDK calls to OpenAI API format in real-time, enabli
55
55
  - **Offline development** without cloud API costs
56
56
  - **Privacy-first AI** - data never leaves your machine
57
57
  - **Seamless model switching** between cloud and local
58
+ - **Web Search tool** - built-in Tavily web search for local models
58
59
 
59
60
  ---
60
61
 
@@ -79,7 +80,11 @@ Other OpenAI-compatible backends may work but are not fully tested.
79
80
  pip install local-openai2anthropic
80
81
  ```
81
82
 
82
- ### 2. Start Your Local LLM Server
83
+ ### 2. Configure Your LLM Backend (Optional)
84
+
85
+ **Option A: Start a local LLM server**
86
+
87
+ If you don't have an LLM server running, you can start one locally:
83
88
 
84
89
  Example with vLLM:
85
90
  ```bash
@@ -93,6 +98,16 @@ sglang launch --model-path meta-llama/Llama-2-7b-chat-hf --port 8000
93
98
  # SGLang starts at http://localhost:8000/v1
94
99
  ```
95
100
 
101
+ **Option B: Use an existing OpenAI-compatible API**
102
+
103
+ If you already have a deployed OpenAI-compatible API (local or remote), you can use it directly. Just note the base URL for the next step.
104
+
105
+ Examples:
106
+ - Local vLLM/SGLang: `http://localhost:8000/v1`
107
+ - Remote API: `https://api.example.com/v1`
108
+
109
+ > **Note:** If you're using [Ollama](https://ollama.com), it natively supports the Anthropic API format, so you don't need this proxy. Just point your Claude SDK directly to `http://localhost:11434/v1`.
110
+
96
111
  ### 3. Start the Proxy
97
112
 
98
113
  **Option A: Run in background (recommended)**
@@ -155,22 +170,31 @@ You can configure [Claude Code](https://github.com/anthropics/claude-code) to us
155
170
 
156
171
  ### Configuration Steps
157
172
 
158
- 1. **Create or edit Claude Code config file** at `~/.claude/CLAUDE.md`:
159
-
160
- ```markdown
161
- # Claude Code Configuration
162
-
163
- ## API Settings
164
-
165
- - Claude API Base URL: http://localhost:8080
166
- - Claude API Key: dummy-key
167
-
168
- ## Model Settings
169
-
170
- Use model: meta-llama/Llama-2-7b-chat-hf # Your local model name
173
+ 1. **Edit Claude Code config file** at `~/.claude/settings.json`:
174
+
175
+ ```json
176
+ {
177
+ "env": {
178
+ "ANTHROPIC_BASE_URL": "http://localhost:8080",
179
+ "ANTHROPIC_API_KEY": "dummy-key",
180
+ "ANTHROPIC_MODEL": "meta-llama/Llama-2-7b-chat-hf",
181
+ "ANTHROPIC_DEFAULT_SONNET_MODEL": "meta-llama/Llama-2-7b-chat-hf",
182
+ "ANTHROPIC_DEFAULT_OPUS_MODEL": "meta-llama/Llama-2-7b-chat-hf",
183
+ "ANTHROPIC_DEFAULT_HAIKU_MODEL": "meta-llama/Llama-2-7b-chat-hf",
184
+ "ANTHROPIC_REASONING_MODEL": "meta-llama/Llama-2-7b-chat-hf"
185
+ }
186
+ }
171
187
  ```
172
188
 
173
- 2. **Alternatively, set environment variables** before running Claude Code:
189
+ | Variable | Description |
190
+ |----------|-------------|
191
+ | `ANTHROPIC_MODEL` | General model setting |
192
+ | `ANTHROPIC_DEFAULT_SONNET_MODEL` | Default model for Sonnet mode (Claude Code default) |
193
+ | `ANTHROPIC_DEFAULT_OPUS_MODEL` | Default model for Opus mode |
194
+ | `ANTHROPIC_DEFAULT_HAIKU_MODEL` | Default model for Haiku mode |
195
+ | `ANTHROPIC_REASONING_MODEL` | Default model for reasoning tasks |
196
+
197
+ 2. **Or set environment variables** before running Claude Code:
174
198
 
175
199
  ```bash
176
200
  export ANTHROPIC_BASE_URL=http://localhost:8080
@@ -179,38 +203,36 @@ export ANTHROPIC_API_KEY=dummy-key
179
203
  claude
180
204
  ```
181
205
 
182
- 3. **Or use the `--api-key` and `--base-url` flags**:
183
-
184
- ```bash
185
- claude --api-key dummy-key --base-url http://localhost:8080
186
- ```
187
-
188
206
  ### Complete Workflow Example
189
207
 
208
+ Make sure `~/.claude/settings.json` is configured as described above.
209
+
190
210
  Terminal 1 - Start your local LLM:
191
211
  ```bash
192
212
  vllm serve meta-llama/Llama-2-7b-chat-hf
193
213
  ```
194
214
 
195
- Terminal 2 - Start the proxy:
215
+ Terminal 2 - Start the proxy (background mode):
196
216
  ```bash
197
217
  export OA2A_OPENAI_BASE_URL=http://localhost:8000/v1
198
218
  export OA2A_OPENAI_API_KEY=dummy
199
219
  export OA2A_TAVILY_API_KEY="tvly-your-tavily-api-key" # Optional: enable web search
200
220
 
201
- oa2a
221
+ oa2a start
202
222
  ```
203
223
 
204
- Terminal 3 - Launch Claude Code with local LLM:
224
+ Terminal 3 - Launch Claude Code:
205
225
  ```bash
206
- export ANTHROPIC_BASE_URL=http://localhost:8080
207
- export ANTHROPIC_API_KEY=dummy-key
208
-
209
226
  claude
210
227
  ```
211
228
 
212
229
  Now Claude Code will use your local LLM instead of the cloud API.
213
230
 
231
+ To stop the proxy:
232
+ ```bash
233
+ oa2a stop
234
+ ```
235
+
214
236
  ---
215
237
 
216
238
  ## Features
@@ -20,6 +20,7 @@ This proxy translates Claude SDK calls to OpenAI API format in real-time, enabli
20
20
  - **Offline development** without cloud API costs
21
21
  - **Privacy-first AI** - data never leaves your machine
22
22
  - **Seamless model switching** between cloud and local
23
+ - **Web Search tool** - built-in Tavily web search for local models
23
24
 
24
25
  ---
25
26
 
@@ -44,7 +45,11 @@ Other OpenAI-compatible backends may work but are not fully tested.
44
45
  pip install local-openai2anthropic
45
46
  ```
46
47
 
47
- ### 2. Start Your Local LLM Server
48
+ ### 2. Configure Your LLM Backend (Optional)
49
+
50
+ **Option A: Start a local LLM server**
51
+
52
+ If you don't have an LLM server running, you can start one locally:
48
53
 
49
54
  Example with vLLM:
50
55
  ```bash
@@ -58,6 +63,16 @@ sglang launch --model-path meta-llama/Llama-2-7b-chat-hf --port 8000
58
63
  # SGLang starts at http://localhost:8000/v1
59
64
  ```
60
65
 
66
+ **Option B: Use an existing OpenAI-compatible API**
67
+
68
+ If you already have a deployed OpenAI-compatible API (local or remote), you can use it directly. Just note the base URL for the next step.
69
+
70
+ Examples:
71
+ - Local vLLM/SGLang: `http://localhost:8000/v1`
72
+ - Remote API: `https://api.example.com/v1`
73
+
74
+ > **Note:** If you're using [Ollama](https://ollama.com), it natively supports the Anthropic API format, so you don't need this proxy. Just point your Claude SDK directly to `http://localhost:11434/v1`.
75
+
61
76
  ### 3. Start the Proxy
62
77
 
63
78
  **Option A: Run in background (recommended)**
@@ -120,22 +135,31 @@ You can configure [Claude Code](https://github.com/anthropics/claude-code) to us
120
135
 
121
136
  ### Configuration Steps
122
137
 
123
- 1. **Create or edit Claude Code config file** at `~/.claude/CLAUDE.md`:
124
-
125
- ```markdown
126
- # Claude Code Configuration
127
-
128
- ## API Settings
129
-
130
- - Claude API Base URL: http://localhost:8080
131
- - Claude API Key: dummy-key
132
-
133
- ## Model Settings
134
-
135
- Use model: meta-llama/Llama-2-7b-chat-hf # Your local model name
138
+ 1. **Edit Claude Code config file** at `~/.claude/settings.json`:
139
+
140
+ ```json
141
+ {
142
+ "env": {
143
+ "ANTHROPIC_BASE_URL": "http://localhost:8080",
144
+ "ANTHROPIC_API_KEY": "dummy-key",
145
+ "ANTHROPIC_MODEL": "meta-llama/Llama-2-7b-chat-hf",
146
+ "ANTHROPIC_DEFAULT_SONNET_MODEL": "meta-llama/Llama-2-7b-chat-hf",
147
+ "ANTHROPIC_DEFAULT_OPUS_MODEL": "meta-llama/Llama-2-7b-chat-hf",
148
+ "ANTHROPIC_DEFAULT_HAIKU_MODEL": "meta-llama/Llama-2-7b-chat-hf",
149
+ "ANTHROPIC_REASONING_MODEL": "meta-llama/Llama-2-7b-chat-hf"
150
+ }
151
+ }
136
152
  ```
137
153
 
138
- 2. **Alternatively, set environment variables** before running Claude Code:
154
+ | Variable | Description |
155
+ |----------|-------------|
156
+ | `ANTHROPIC_MODEL` | General model setting |
157
+ | `ANTHROPIC_DEFAULT_SONNET_MODEL` | Default model for Sonnet mode (Claude Code default) |
158
+ | `ANTHROPIC_DEFAULT_OPUS_MODEL` | Default model for Opus mode |
159
+ | `ANTHROPIC_DEFAULT_HAIKU_MODEL` | Default model for Haiku mode |
160
+ | `ANTHROPIC_REASONING_MODEL` | Default model for reasoning tasks |
161
+
162
+ 2. **Or set environment variables** before running Claude Code:
139
163
 
140
164
  ```bash
141
165
  export ANTHROPIC_BASE_URL=http://localhost:8080
@@ -144,38 +168,36 @@ export ANTHROPIC_API_KEY=dummy-key
144
168
  claude
145
169
  ```
146
170
 
147
- 3. **Or use the `--api-key` and `--base-url` flags**:
148
-
149
- ```bash
150
- claude --api-key dummy-key --base-url http://localhost:8080
151
- ```
152
-
153
171
  ### Complete Workflow Example
154
172
 
173
+ Make sure `~/.claude/settings.json` is configured as described above.
174
+
155
175
  Terminal 1 - Start your local LLM:
156
176
  ```bash
157
177
  vllm serve meta-llama/Llama-2-7b-chat-hf
158
178
  ```
159
179
 
160
- Terminal 2 - Start the proxy:
180
+ Terminal 2 - Start the proxy (background mode):
161
181
  ```bash
162
182
  export OA2A_OPENAI_BASE_URL=http://localhost:8000/v1
163
183
  export OA2A_OPENAI_API_KEY=dummy
164
184
  export OA2A_TAVILY_API_KEY="tvly-your-tavily-api-key" # Optional: enable web search
165
185
 
166
- oa2a
186
+ oa2a start
167
187
  ```
168
188
 
169
- Terminal 3 - Launch Claude Code with local LLM:
189
+ Terminal 3 - Launch Claude Code:
170
190
  ```bash
171
- export ANTHROPIC_BASE_URL=http://localhost:8080
172
- export ANTHROPIC_API_KEY=dummy-key
173
-
174
191
  claude
175
192
  ```
176
193
 
177
194
  Now Claude Code will use your local LLM instead of the cloud API.
178
195
 
196
+ To stop the proxy:
197
+ ```bash
198
+ oa2a stop
199
+ ```
200
+
179
201
  ---
180
202
 
181
203
  ## Features
@@ -20,6 +20,7 @@
20
20
  - **离线开发** - 无需支付云 API 费用
21
21
  - **隐私优先** - 数据不出本机
22
22
  - **灵活切换** - 云端和本地模型无缝切换
23
+ - **网络搜索** - 内置 Tavily 网页搜索工具,为本地模型提供联网能力
23
24
 
24
25
  ---
25
26
 
@@ -44,7 +45,11 @@
44
45
  pip install local-openai2anthropic
45
46
  ```
46
47
 
47
- ### 2. 启动本地模型服务
48
+ ### 2. 配置你的 LLM 后端(可选)
49
+
50
+ **选项 A:启动本地模型服务**
51
+
52
+ 如果你还没有运行 LLM 服务,可以在本地启动一个:
48
53
 
49
54
  使用 vLLM 示例:
50
55
  ```bash
@@ -58,6 +63,16 @@ sglang launch --model-path meta-llama/Llama-2-7b-chat-hf --port 8000
58
63
  # SGLang 在 http://localhost:8000/v1 启动
59
64
  ```
60
65
 
66
+ **选项 B:使用已有的 OpenAI 兼容 API**
67
+
68
+ 如果你已经部署了 OpenAI 兼容的 API(本地或远程),可以直接使用。记下 base URL 用于下一步。
69
+
70
+ 示例:
71
+ - 本地 vLLM/SGLang:`http://localhost:8000/v1`
72
+ - 远程 API:`https://api.example.com/v1`
73
+
74
+ > **注意:** 如果你使用 [Ollama](https://ollama.com),它原生支持 Anthropic API 格式,无需使用本代理工具。直接将 Claude SDK 指向 `http://localhost:11434/v1` 即可。
75
+
61
76
  ### 3. 启动代理
62
77
 
63
78
  **方式 A: 后台运行(推荐)**
@@ -120,21 +135,30 @@ print(message.content[0].text)
120
135
 
121
136
  ### 配置步骤
122
137
 
123
- 1. **创建或编辑 Claude Code 配置文件** `~/.claude/CLAUDE.md`:
124
-
125
- ```markdown
126
- # Claude Code 配置
127
-
128
- ## API 设置
129
-
130
- - Claude API Base URL: http://localhost:8080
131
- - Claude API Key: dummy-key
132
-
133
- ## 模型设置
134
-
135
- Use model: meta-llama/Llama-2-7b-chat-hf # 你的本地模型名称
138
+ 1. **编辑 Claude Code 配置文件** `~/.claude/settings.json`:
139
+
140
+ ```json
141
+ {
142
+ "env": {
143
+ "ANTHROPIC_BASE_URL": "http://localhost:8080",
144
+ "ANTHROPIC_API_KEY": "dummy-key",
145
+ "ANTHROPIC_MODEL": "meta-llama/Llama-2-7b-chat-hf",
146
+ "ANTHROPIC_DEFAULT_SONNET_MODEL": "meta-llama/Llama-2-7b-chat-hf",
147
+ "ANTHROPIC_DEFAULT_OPUS_MODEL": "meta-llama/Llama-2-7b-chat-hf",
148
+ "ANTHROPIC_DEFAULT_HAIKU_MODEL": "meta-llama/Llama-2-7b-chat-hf",
149
+ "ANTHROPIC_REASONING_MODEL": "meta-llama/Llama-2-7b-chat-hf"
150
+ }
151
+ }
136
152
  ```
137
153
 
154
+ | 变量 | 说明 |
155
+ |------|------|
156
+ | `ANTHROPIC_MODEL` | 通用模型配置 |
157
+ | `ANTHROPIC_DEFAULT_SONNET_MODEL` | Sonnet 模式默认模型(Claude Code 默认使用) |
158
+ | `ANTHROPIC_DEFAULT_OPUS_MODEL` | Opus 模式默认模型 |
159
+ | `ANTHROPIC_DEFAULT_HAIKU_MODEL` | Haiku 模式默认模型 |
160
+ | `ANTHROPIC_REASONING_MODEL` | 推理任务默认模型 |
161
+
138
162
  2. **或者在运行 Claude Code 前设置环境变量**:
139
163
 
140
164
  ```bash
@@ -144,38 +168,36 @@ export ANTHROPIC_API_KEY=dummy-key
144
168
  claude
145
169
  ```
146
170
 
147
- 3. **也可以使用 `--api-key` 和 `--base-url` 参数**:
148
-
149
- ```bash
150
- claude --api-key dummy-key --base-url http://localhost:8080
151
- ```
152
-
153
171
  ### 完整工作流示例
154
172
 
173
+ 确保 `~/.claude/settings.json` 已按上述步骤配置好。
174
+
155
175
  终端 1 - 启动本地模型:
156
176
  ```bash
157
177
  vllm serve meta-llama/Llama-2-7b-chat-hf
158
178
  ```
159
179
 
160
- 终端 2 - 启动代理:
180
+ 终端 2 - 启动代理(后台运行):
161
181
  ```bash
162
182
  export OA2A_OPENAI_BASE_URL=http://localhost:8000/v1
163
183
  export OA2A_OPENAI_API_KEY=dummy
164
184
  export OA2A_TAVILY_API_KEY="tvly-your-tavily-api-key" # 可选:启用网页搜索
165
185
 
166
- oa2a
186
+ oa2a start
167
187
  ```
168
188
 
169
- 终端 3 - 启动 Claude Code 并使用本地模型:
189
+ 终端 3 - 启动 Claude Code
170
190
  ```bash
171
- export ANTHROPIC_BASE_URL=http://localhost:8080
172
- export ANTHROPIC_API_KEY=dummy-key
173
-
174
191
  claude
175
192
  ```
176
193
 
177
194
  现在 Claude Code 将使用你的本地大模型,而不是云端 API。
178
195
 
196
+ 如需停止代理:
197
+ ```bash
198
+ oa2a stop
199
+ ```
200
+
179
201
  ---
180
202
 
181
203
  ## 功能特性
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "local-openai2anthropic"
3
- version = "0.2.4"
3
+ version = "0.2.7"
4
4
  description = "A lightweight proxy server that converts Anthropic Messages API to OpenAI API"
5
5
  readme = "README.md"
6
6
  license = { text = "Apache-2.0" }
@@ -3,7 +3,7 @@
3
3
  local-openai2anthropic: A proxy server that converts Anthropic Messages API to OpenAI API.
4
4
  """
5
5
 
6
- __version__ = "0.2.4"
6
+ __version__ = "0.2.5"
7
7
 
8
8
  from local_openai2anthropic.protocol import (
9
9
  AnthropicError,
@@ -11,34 +11,35 @@ from pydantic_settings import BaseSettings, SettingsConfigDict
11
11
 
12
12
  class Settings(BaseSettings):
13
13
  """Application settings loaded from environment variables."""
14
-
14
+
15
15
  model_config = SettingsConfigDict(
16
16
  env_prefix="OA2A_", # OpenAI-to-Anthropic prefix
17
17
  env_file=".env",
18
18
  env_file_encoding="utf-8",
19
19
  case_sensitive=False,
20
+ extra="ignore",
20
21
  )
21
-
22
+
22
23
  # OpenAI API Configuration
23
24
  openai_api_key: Optional[str] = None
24
25
  openai_base_url: str = "https://api.openai.com/v1"
25
26
  openai_org_id: Optional[str] = None
26
27
  openai_project_id: Optional[str] = None
27
-
28
+
28
29
  # Server Configuration
29
30
  host: str = "0.0.0.0"
30
31
  port: int = 8080
31
32
  request_timeout: float = 300.0 # 5 minutes
32
-
33
+
33
34
  # API Key for authenticating requests to this server (optional)
34
35
  api_key: Optional[str] = None
35
-
36
+
36
37
  # CORS settings
37
38
  cors_origins: list[str] = ["*"]
38
39
  cors_credentials: bool = True
39
40
  cors_methods: list[str] = ["*"]
40
41
  cors_headers: list[str] = ["*"]
41
-
42
+
42
43
  # Logging
43
44
  log_level: str = "DEBUG"
44
45
 
@@ -49,11 +49,11 @@ def convert_anthropic_to_openai(
49
49
  system = anthropic_params.get("system")
50
50
  stop_sequences = anthropic_params.get("stop_sequences")
51
51
  stream = anthropic_params.get("stream", False)
52
- temperature = anthropic_params.get("temperature")
52
+ temperature = anthropic_params.get("temperature", 0.6)
53
53
  tool_choice = anthropic_params.get("tool_choice")
54
54
  tools = anthropic_params.get("tools")
55
55
  top_k = anthropic_params.get("top_k")
56
- top_p = anthropic_params.get("top_p")
56
+ top_p = anthropic_params.get("top_p", 0.95)
57
57
  thinking = anthropic_params.get("thinking")
58
58
  # metadata is accepted but not forwarded to OpenAI
59
59
 
@@ -32,7 +32,7 @@ def create_app(settings: Settings | None = None) -> FastAPI:
32
32
  app = FastAPI(
33
33
  title="local-openai2anthropic",
34
34
  description="A proxy server that converts Anthropic Messages API to OpenAI API",
35
- version="0.2.0",
35
+ version="0.2.5",
36
36
  docs_url="/docs",
37
37
  redoc_url="/redoc",
38
38
  )
@@ -182,7 +182,7 @@ Examples:
182
182
  parser.add_argument(
183
183
  "--version",
184
184
  action="version",
185
- version="%(prog)s 0.2.0",
185
+ version="%(prog)s 0.2.5",
186
186
  )
187
187
 
188
188
  # Create subparsers for commands
@@ -406,7 +406,7 @@ async def _handle_with_server_tools(
406
406
  async with httpx.AsyncClient(timeout=settings.request_timeout) as client:
407
407
  try:
408
408
  # Log full request for debugging
409
- logger.info(f"Request body: {json.dumps(params, indent=2, default=str)[:3000]}")
409
+ logger.debug(f"Request body: {json.dumps(params, indent=2, default=str)[:3000]}")
410
410
 
411
411
  response = await client.post(url, headers=headers, json=params)
412
412
 
@@ -421,7 +421,7 @@ async def _handle_with_server_tools(
421
421
  )
422
422
 
423
423
  completion_data = response.json()
424
- logger.info(f"OpenAI response: {json.dumps(completion_data, indent=2)[:500]}...")
424
+ logger.debug(f"OpenAI response: {json.dumps(completion_data, indent=2)[:500]}...")
425
425
  from openai.types.chat import ChatCompletion
426
426
  completion = ChatCompletion.model_validate(completion_data)
427
427