local-openai2anthropic 0.2.4__py3-none-any.whl → 0.2.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -3,7 +3,7 @@
3
3
  local-openai2anthropic: A proxy server that converts Anthropic Messages API to OpenAI API.
4
4
  """
5
5
 
6
- __version__ = "0.2.4"
6
+ __version__ = "0.2.5"
7
7
 
8
8
  from local_openai2anthropic.protocol import (
9
9
  AnthropicError,
@@ -11,34 +11,35 @@ from pydantic_settings import BaseSettings, SettingsConfigDict
11
11
 
12
12
  class Settings(BaseSettings):
13
13
  """Application settings loaded from environment variables."""
14
-
14
+
15
15
  model_config = SettingsConfigDict(
16
16
  env_prefix="OA2A_", # OpenAI-to-Anthropic prefix
17
17
  env_file=".env",
18
18
  env_file_encoding="utf-8",
19
19
  case_sensitive=False,
20
+ extra="ignore",
20
21
  )
21
-
22
+
22
23
  # OpenAI API Configuration
23
24
  openai_api_key: Optional[str] = None
24
25
  openai_base_url: str = "https://api.openai.com/v1"
25
26
  openai_org_id: Optional[str] = None
26
27
  openai_project_id: Optional[str] = None
27
-
28
+
28
29
  # Server Configuration
29
30
  host: str = "0.0.0.0"
30
31
  port: int = 8080
31
32
  request_timeout: float = 300.0 # 5 minutes
32
-
33
+
33
34
  # API Key for authenticating requests to this server (optional)
34
35
  api_key: Optional[str] = None
35
-
36
+
36
37
  # CORS settings
37
38
  cors_origins: list[str] = ["*"]
38
39
  cors_credentials: bool = True
39
40
  cors_methods: list[str] = ["*"]
40
41
  cors_headers: list[str] = ["*"]
41
-
42
+
42
43
  # Logging
43
44
  log_level: str = "DEBUG"
44
45
 
@@ -49,11 +49,11 @@ def convert_anthropic_to_openai(
49
49
  system = anthropic_params.get("system")
50
50
  stop_sequences = anthropic_params.get("stop_sequences")
51
51
  stream = anthropic_params.get("stream", False)
52
- temperature = anthropic_params.get("temperature")
52
+ temperature = anthropic_params.get("temperature", 0.6)
53
53
  tool_choice = anthropic_params.get("tool_choice")
54
54
  tools = anthropic_params.get("tools")
55
55
  top_k = anthropic_params.get("top_k")
56
- top_p = anthropic_params.get("top_p")
56
+ top_p = anthropic_params.get("top_p", 0.95)
57
57
  thinking = anthropic_params.get("thinking")
58
58
  # metadata is accepted but not forwarded to OpenAI
59
59
 
@@ -32,7 +32,7 @@ def create_app(settings: Settings | None = None) -> FastAPI:
32
32
  app = FastAPI(
33
33
  title="local-openai2anthropic",
34
34
  description="A proxy server that converts Anthropic Messages API to OpenAI API",
35
- version="0.2.0",
35
+ version="0.2.5",
36
36
  docs_url="/docs",
37
37
  redoc_url="/redoc",
38
38
  )
@@ -182,7 +182,7 @@ Examples:
182
182
  parser.add_argument(
183
183
  "--version",
184
184
  action="version",
185
- version="%(prog)s 0.2.0",
185
+ version="%(prog)s 0.2.5",
186
186
  )
187
187
 
188
188
  # Create subparsers for commands
@@ -406,7 +406,7 @@ async def _handle_with_server_tools(
406
406
  async with httpx.AsyncClient(timeout=settings.request_timeout) as client:
407
407
  try:
408
408
  # Log full request for debugging
409
- logger.info(f"Request body: {json.dumps(params, indent=2, default=str)[:3000]}")
409
+ logger.debug(f"Request body: {json.dumps(params, indent=2, default=str)[:3000]}")
410
410
 
411
411
  response = await client.post(url, headers=headers, json=params)
412
412
 
@@ -421,7 +421,7 @@ async def _handle_with_server_tools(
421
421
  )
422
422
 
423
423
  completion_data = response.json()
424
- logger.info(f"OpenAI response: {json.dumps(completion_data, indent=2)[:500]}...")
424
+ logger.debug(f"OpenAI response: {json.dumps(completion_data, indent=2)[:500]}...")
425
425
  from openai.types.chat import ChatCompletion
426
426
  completion = ChatCompletion.model_validate(completion_data)
427
427
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: local-openai2anthropic
3
- Version: 0.2.4
3
+ Version: 0.2.7
4
4
  Summary: A lightweight proxy server that converts Anthropic Messages API to OpenAI API
5
5
  Project-URL: Homepage, https://github.com/dongfangzan/local-openai2anthropic
6
6
  Project-URL: Repository, https://github.com/dongfangzan/local-openai2anthropic
@@ -55,6 +55,7 @@ This proxy translates Claude SDK calls to OpenAI API format in real-time, enabli
55
55
  - **Offline development** without cloud API costs
56
56
  - **Privacy-first AI** - data never leaves your machine
57
57
  - **Seamless model switching** between cloud and local
58
+ - **Web Search tool** - built-in Tavily web search for local models
58
59
 
59
60
  ---
60
61
 
@@ -79,7 +80,11 @@ Other OpenAI-compatible backends may work but are not fully tested.
79
80
  pip install local-openai2anthropic
80
81
  ```
81
82
 
82
- ### 2. Start Your Local LLM Server
83
+ ### 2. Configure Your LLM Backend (Optional)
84
+
85
+ **Option A: Start a local LLM server**
86
+
87
+ If you don't have an LLM server running, you can start one locally:
83
88
 
84
89
  Example with vLLM:
85
90
  ```bash
@@ -93,6 +98,16 @@ sglang launch --model-path meta-llama/Llama-2-7b-chat-hf --port 8000
93
98
  # SGLang starts at http://localhost:8000/v1
94
99
  ```
95
100
 
101
+ **Option B: Use an existing OpenAI-compatible API**
102
+
103
+ If you already have a deployed OpenAI-compatible API (local or remote), you can use it directly. Just note the base URL for the next step.
104
+
105
+ Examples:
106
+ - Local vLLM/SGLang: `http://localhost:8000/v1`
107
+ - Remote API: `https://api.example.com/v1`
108
+
109
+ > **Note:** If you're using [Ollama](https://ollama.com), it natively supports the Anthropic API format, so you don't need this proxy. Just point your Claude SDK directly to `http://localhost:11434/v1`.
110
+
96
111
  ### 3. Start the Proxy
97
112
 
98
113
  **Option A: Run in background (recommended)**
@@ -155,22 +170,31 @@ You can configure [Claude Code](https://github.com/anthropics/claude-code) to us
155
170
 
156
171
  ### Configuration Steps
157
172
 
158
- 1. **Create or edit Claude Code config file** at `~/.claude/CLAUDE.md`:
159
-
160
- ```markdown
161
- # Claude Code Configuration
162
-
163
- ## API Settings
164
-
165
- - Claude API Base URL: http://localhost:8080
166
- - Claude API Key: dummy-key
167
-
168
- ## Model Settings
169
-
170
- Use model: meta-llama/Llama-2-7b-chat-hf # Your local model name
173
+ 1. **Edit Claude Code config file** at `~/.claude/settings.json`:
174
+
175
+ ```json
176
+ {
177
+ "env": {
178
+ "ANTHROPIC_BASE_URL": "http://localhost:8080",
179
+ "ANTHROPIC_API_KEY": "dummy-key",
180
+ "ANTHROPIC_MODEL": "meta-llama/Llama-2-7b-chat-hf",
181
+ "ANTHROPIC_DEFAULT_SONNET_MODEL": "meta-llama/Llama-2-7b-chat-hf",
182
+ "ANTHROPIC_DEFAULT_OPUS_MODEL": "meta-llama/Llama-2-7b-chat-hf",
183
+ "ANTHROPIC_DEFAULT_HAIKU_MODEL": "meta-llama/Llama-2-7b-chat-hf",
184
+ "ANTHROPIC_REASONING_MODEL": "meta-llama/Llama-2-7b-chat-hf"
185
+ }
186
+ }
171
187
  ```
172
188
 
173
- 2. **Alternatively, set environment variables** before running Claude Code:
189
+ | Variable | Description |
190
+ |----------|-------------|
191
+ | `ANTHROPIC_MODEL` | General model setting |
192
+ | `ANTHROPIC_DEFAULT_SONNET_MODEL` | Default model for Sonnet mode (Claude Code default) |
193
+ | `ANTHROPIC_DEFAULT_OPUS_MODEL` | Default model for Opus mode |
194
+ | `ANTHROPIC_DEFAULT_HAIKU_MODEL` | Default model for Haiku mode |
195
+ | `ANTHROPIC_REASONING_MODEL` | Default model for reasoning tasks |
196
+
197
+ 2. **Or set environment variables** before running Claude Code:
174
198
 
175
199
  ```bash
176
200
  export ANTHROPIC_BASE_URL=http://localhost:8080
@@ -179,38 +203,36 @@ export ANTHROPIC_API_KEY=dummy-key
179
203
  claude
180
204
  ```
181
205
 
182
- 3. **Or use the `--api-key` and `--base-url` flags**:
183
-
184
- ```bash
185
- claude --api-key dummy-key --base-url http://localhost:8080
186
- ```
187
-
188
206
  ### Complete Workflow Example
189
207
 
208
+ Make sure `~/.claude/settings.json` is configured as described above.
209
+
190
210
  Terminal 1 - Start your local LLM:
191
211
  ```bash
192
212
  vllm serve meta-llama/Llama-2-7b-chat-hf
193
213
  ```
194
214
 
195
- Terminal 2 - Start the proxy:
215
+ Terminal 2 - Start the proxy (background mode):
196
216
  ```bash
197
217
  export OA2A_OPENAI_BASE_URL=http://localhost:8000/v1
198
218
  export OA2A_OPENAI_API_KEY=dummy
199
219
  export OA2A_TAVILY_API_KEY="tvly-your-tavily-api-key" # Optional: enable web search
200
220
 
201
- oa2a
221
+ oa2a start
202
222
  ```
203
223
 
204
- Terminal 3 - Launch Claude Code with local LLM:
224
+ Terminal 3 - Launch Claude Code:
205
225
  ```bash
206
- export ANTHROPIC_BASE_URL=http://localhost:8080
207
- export ANTHROPIC_API_KEY=dummy-key
208
-
209
226
  claude
210
227
  ```
211
228
 
212
229
  Now Claude Code will use your local LLM instead of the cloud API.
213
230
 
231
+ To stop the proxy:
232
+ ```bash
233
+ oa2a stop
234
+ ```
235
+
214
236
  ---
215
237
 
216
238
  ## Features
@@ -1,19 +1,19 @@
1
- local_openai2anthropic/__init__.py,sha256=xPWXxEgbns2l2aiZzW0BDbNBkjcfXK-Ee-2ukgjQPKc,1059
1
+ local_openai2anthropic/__init__.py,sha256=IEn8YcQGsaEaCr04s3hS2AcgsIt5NU5Qa2C8Uwz7RdY,1059
2
2
  local_openai2anthropic/__main__.py,sha256=K21u5u7FN8-DbO67TT_XDF0neGqJeFrVNkteRauCRQk,179
3
- local_openai2anthropic/config.py,sha256=bnM7p5htd6rHgLn7Z0Ukmm2jVImLuVjIB5Cnfpf2ClY,1918
4
- local_openai2anthropic/converter.py,sha256=qp0LPJBTP0uAb_5l9VINZ03RAjmumxdquP6JqWXiZkQ,15779
3
+ local_openai2anthropic/config.py,sha256=3M5ZAz3uYNMGxaottEBseEOZF-GnVaGuioH9Hpmgnd8,1918
4
+ local_openai2anthropic/converter.py,sha256=d-qYwtv6FIbpKSRsZN4jhnKM4D4k52la-_bpEYPTAS0,15790
5
5
  local_openai2anthropic/daemon.py,sha256=pZnRojGFcuIpR8yLDNjV-b0LJRBVhgRAa-dKeRRse44,10017
6
6
  local_openai2anthropic/daemon_runner.py,sha256=rguOH0PgpbjqNsKYei0uCQX8JQOQ1wmtQH1CtW95Dbw,3274
7
- local_openai2anthropic/main.py,sha256=5tdgPel8RSCn1iK0d7hYAmcTM9vYHlepgQujaEXA2ic,9866
7
+ local_openai2anthropic/main.py,sha256=FK5JBBpzB_T44y3N16lPl1hK4ht4LEQqRKzVmkIjIoo,9866
8
8
  local_openai2anthropic/openai_types.py,sha256=jFdCvLwtXYoo5gGRqOhbHQcVaxcsxNnCP_yFPIv7rG4,3823
9
9
  local_openai2anthropic/protocol.py,sha256=vUEgxtRPFll6jEtLc4DyxTLCBjrWIEScZXhEqe4uibk,5185
10
- local_openai2anthropic/router.py,sha256=KDIsckdQLx78z5rmVX8Zhr5zWO9m_qB-BjQbTwWjj0s,40224
10
+ local_openai2anthropic/router.py,sha256=imzvgduneiniwHroTgeT9d8q4iF5GAuptaVP38sakUg,40226
11
11
  local_openai2anthropic/tavily_client.py,sha256=QsBhnyF8BFWPAxB4XtWCCpHCquNL5SW93-zjTTi4Meg,3774
12
12
  local_openai2anthropic/server_tools/__init__.py,sha256=QlJfjEta-HOCtLe7NaY_fpbEKv-ZpInjAnfmSqE9tbk,615
13
13
  local_openai2anthropic/server_tools/base.py,sha256=pNFsv-jSgxVrkY004AHAcYMNZgVSO8ZOeCzQBUtQ3vU,5633
14
14
  local_openai2anthropic/server_tools/web_search.py,sha256=1C7lX_cm-tMaN3MsCjinEZYPJc_Hj4yAxYay9h8Zbvs,6543
15
- local_openai2anthropic-0.2.4.dist-info/METADATA,sha256=nWz75h6XmZzWk3BdkMhTZNT0xlUmUSNmx2jgyFONS10,10040
16
- local_openai2anthropic-0.2.4.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
17
- local_openai2anthropic-0.2.4.dist-info/entry_points.txt,sha256=hdc9tSJUNxyNLXcTYye5SuD2K0bEQhxBhGnWTFup6ZM,116
18
- local_openai2anthropic-0.2.4.dist-info/licenses/LICENSE,sha256=X3_kZy3lJvd_xp8IeyUcIAO2Y367MXZc6aaRx8BYR_s,11369
19
- local_openai2anthropic-0.2.4.dist-info/RECORD,,
15
+ local_openai2anthropic-0.2.7.dist-info/METADATA,sha256=eA34CtgLACHsE4gf4Scuj7yU5IBg_Ys26x8nMnCd_eM,11240
16
+ local_openai2anthropic-0.2.7.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
17
+ local_openai2anthropic-0.2.7.dist-info/entry_points.txt,sha256=hdc9tSJUNxyNLXcTYye5SuD2K0bEQhxBhGnWTFup6ZM,116
18
+ local_openai2anthropic-0.2.7.dist-info/licenses/LICENSE,sha256=X3_kZy3lJvd_xp8IeyUcIAO2Y367MXZc6aaRx8BYR_s,11369
19
+ local_openai2anthropic-0.2.7.dist-info/RECORD,,