local-openai2anthropic 0.3.7__py3-none-any.whl → 0.3.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -3,7 +3,7 @@
3
3
  local-openai2anthropic: A proxy server that converts Anthropic Messages API to OpenAI API.
4
4
  """
5
5
 
6
- __version__ = "0.3.7"
6
+ __version__ = "0.3.8"
7
7
 
8
8
  from local_openai2anthropic.protocol import (
9
9
  AnthropicError,
@@ -101,7 +101,7 @@ def create_app(settings: Settings | None = None) -> FastAPI:
101
101
  app = FastAPI(
102
102
  title="local-openai2anthropic",
103
103
  description="A proxy server that converts Anthropic Messages API to OpenAI API",
104
- version="0.3.7",
104
+ version="0.3.8",
105
105
  docs_url="/docs",
106
106
  redoc_url="/redoc",
107
107
  )
@@ -253,7 +253,7 @@ Examples:
253
253
  parser.add_argument(
254
254
  "--version",
255
255
  action="version",
256
- version="%(prog)s 0.3.7",
256
+ version="%(prog)s 0.3.8",
257
257
  )
258
258
 
259
259
  # Create subparsers for commands
@@ -41,7 +41,7 @@ from anthropic.types.message_create_params import MessageCreateParams
41
41
 
42
42
  class UsageWithCache(BaseModel):
43
43
  """Extended usage with cache token support."""
44
-
44
+
45
45
  input_tokens: int
46
46
  output_tokens: int
47
47
  cache_creation_input_tokens: Optional[int] = None
@@ -50,14 +50,14 @@ class UsageWithCache(BaseModel):
50
50
 
51
51
  class AnthropicError(BaseModel):
52
52
  """Error structure for Anthropic API."""
53
-
53
+
54
54
  type: str
55
55
  message: str
56
56
 
57
57
 
58
58
  class AnthropicErrorResponse(BaseModel):
59
59
  """Error response structure for Anthropic API."""
60
-
60
+
61
61
  type: str = "error"
62
62
  error: AnthropicError
63
63
 
@@ -70,6 +70,7 @@ class PingEvent(BaseModel):
70
70
 
71
71
  # Web Search Tool Types
72
72
 
73
+
73
74
  class ApproximateLocation(BaseModel):
74
75
  """Approximate user location for web search."""
75
76
 
@@ -122,7 +123,13 @@ class WebSearchToolResultError(BaseModel):
122
123
  """Error content for web search tool result."""
123
124
 
124
125
  type: Literal["web_search_tool_result_error"] = "web_search_tool_result_error"
125
- error_code: Literal["max_uses_exceeded", "too_many_requests", "unavailable"]
126
+ error_code: Literal[
127
+ "invalid_input",
128
+ "max_uses_exceeded",
129
+ "query_too_long",
130
+ "too_many_requests",
131
+ "unavailable",
132
+ ]
126
133
 
127
134
 
128
135
  class WebSearchToolResult(BaseModel):
@@ -131,6 +138,7 @@ class WebSearchToolResult(BaseModel):
131
138
  type: Literal["web_search_tool_result"] = "web_search_tool_result"
132
139
  tool_use_id: str
133
140
  results: list[WebSearchResult] | WebSearchToolResultError # 'results' for client
141
+ content: list[WebSearchResult] | WebSearchToolResultError | None = None # 'content' for compatibility
134
142
 
135
143
 
136
144
  class WebSearchCitation(BaseModel):
@@ -168,12 +176,10 @@ __all__ = [
168
176
  "TextBlockParam",
169
177
  "ToolUseBlockParam",
170
178
  "ToolResultBlockParam",
171
-
172
179
  # Message types
173
180
  "Message",
174
181
  "MessageParam",
175
182
  "MessageCreateParams",
176
-
177
183
  # Streaming events
178
184
  "MessageStreamEvent",
179
185
  "MessageStartEvent",
@@ -183,23 +189,18 @@ __all__ = [
183
189
  "ContentBlockDeltaEvent",
184
190
  "ContentBlockStopEvent",
185
191
  "PingEvent",
186
-
187
192
  # Delta types
188
193
  "TextDelta",
189
194
  "BetaThinkingDelta",
190
-
191
195
  # Usage
192
196
  "UsageWithCache",
193
197
  "UsageWithServerToolUse",
194
198
  "MessageDeltaUsage",
195
-
196
199
  # Config
197
200
  "BetaThinkingConfigParam",
198
-
199
201
  # Error
200
202
  "AnthropicError",
201
203
  "AnthropicErrorResponse",
202
-
203
204
  # Web Search Tool Types
204
205
  "ApproximateLocation",
205
206
  "WebSearchToolDefinition",
@@ -148,30 +148,39 @@ class WebSearchServerTool(ServerTool):
148
148
  blocks: list[dict[str, Any]] = []
149
149
 
150
150
  # 1. server_tool_use block - signals a server-side tool was invoked
151
- blocks.append({
152
- "type": "server_tool_use",
153
- "id": call_id,
154
- "name": cls.tool_name,
155
- "input": call_args,
156
- })
151
+ blocks.append(
152
+ {
153
+ "type": "server_tool_use",
154
+ "id": call_id,
155
+ "name": cls.tool_name,
156
+ "input": call_args,
157
+ }
158
+ )
157
159
 
158
160
  # 2. web_search_tool_result block - contains the search results
159
- # Note: Claude Code client expects 'results' field (not 'content') for counting
161
+ # Provide both 'results' and 'content' for client compatibility.
160
162
  if result.success:
161
- blocks.append({
162
- "type": "web_search_tool_result",
163
- "tool_use_id": call_id,
164
- "results": result.content,
165
- })
163
+ blocks.append(
164
+ {
165
+ "type": "web_search_tool_result",
166
+ "tool_use_id": call_id,
167
+ "results": result.content,
168
+ "content": result.content,
169
+ }
170
+ )
166
171
  else:
167
- blocks.append({
168
- "type": "web_search_tool_result",
169
- "tool_use_id": call_id,
170
- "results": {
171
- "type": "web_search_tool_result_error",
172
- "error_code": result.error_code,
173
- },
174
- })
172
+ error_payload = {
173
+ "type": "web_search_tool_result_error",
174
+ "error_code": result.error_code or "unavailable",
175
+ }
176
+ blocks.append(
177
+ {
178
+ "type": "web_search_tool_result",
179
+ "tool_use_id": call_id,
180
+ "results": error_payload,
181
+ "content": error_payload,
182
+ }
183
+ )
175
184
 
176
185
  return blocks
177
186
 
@@ -190,7 +199,7 @@ class WebSearchServerTool(ServerTool):
190
199
  {
191
200
  "url": item.get("url"),
192
201
  "title": item.get("title"),
193
- "snippet": item.get("snippet"),
202
+ "content": item.get("encrypted_content"),
194
203
  "page_age": item.get("page_age"),
195
204
  }
196
205
  for item in result.content
@@ -57,12 +57,16 @@ class TavilyClient:
57
57
 
58
58
  Returns:
59
59
  Tuple of (list of WebSearchResult, error_code or None).
60
- Error codes: "max_uses_exceeded", "too_many_requests", "unavailable"
60
+ Error codes: "invalid_input", "query_too_long", "too_many_requests", "unavailable"
61
61
  """
62
62
  if not self._enabled:
63
63
  logger.warning("Tavily search called but API key not configured")
64
64
  return [], "unavailable"
65
65
 
66
+ if not query or not query.strip():
67
+ logger.warning("Tavily search called with empty query")
68
+ return [], "invalid_input"
69
+
66
70
  url = f"{self.base_url}/search"
67
71
  headers = {
68
72
  "Content-Type": "application/json",
@@ -80,6 +84,14 @@ class TavilyClient:
80
84
  async with httpx.AsyncClient(timeout=self.timeout) as client:
81
85
  response = await client.post(url, headers=headers, json=payload)
82
86
 
87
+ if response.status_code == 400:
88
+ logger.warning("Tavily invalid request")
89
+ return [], "invalid_input"
90
+
91
+ if response.status_code == 413:
92
+ logger.warning("Tavily query too long")
93
+ return [], "query_too_long"
94
+
83
95
  if response.status_code == 429:
84
96
  logger.warning("Tavily rate limit exceeded")
85
97
  return [], "too_many_requests"
@@ -88,6 +100,11 @@ class TavilyClient:
88
100
  logger.error(f"Tavily server error: {response.status_code}")
89
101
  return [], "unavailable"
90
102
 
103
+ # Only raise for status codes we haven't handled (e.g., 401, 403)
104
+ if response.status_code >= 400:
105
+ logger.error(f"Tavily client error: {response.status_code}")
106
+ return [], "unavailable"
107
+
91
108
  response.raise_for_status()
92
109
  data = response.json()
93
110
 
@@ -102,7 +119,9 @@ class TavilyClient:
102
119
  )
103
120
  results.append(result)
104
121
 
105
- logger.debug(f"Tavily search returned {len(results)} results for query: {query[:50]}...")
122
+ logger.debug(
123
+ f"Tavily search returned {len(results)} results for query: {query[:50]}..."
124
+ )
106
125
  return results, None
107
126
 
108
127
  except httpx.TimeoutException:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: local-openai2anthropic
3
- Version: 0.3.7
3
+ Version: 0.3.8
4
4
  Summary: A lightweight proxy server that converts Anthropic Messages API to OpenAI API
5
5
  Project-URL: Homepage, https://github.com/dongfangzan/local-openai2anthropic
6
6
  Project-URL: Repository, https://github.com/dongfangzan/local-openai2anthropic
@@ -109,37 +109,47 @@ Examples:
109
109
 
110
110
  > **Note:** If you're using [Ollama](https://ollama.com), it natively supports the Anthropic API format, so you don't need this proxy. Just point your Claude SDK directly to `http://localhost:11434/v1`.
111
111
 
112
- ### 3. Start the Proxy
112
+ ### 3. Start the Proxy (Recommended)
113
113
 
114
- **Option A: Run in background (recommended)**
114
+ Run the following command to start the proxy in background mode:
115
115
 
116
116
  ```bash
117
- export OA2A_OPENAI_BASE_URL=http://localhost:8000/v1 # Your local LLM endpoint
118
- export OA2A_OPENAI_API_KEY=dummy # Any value, not used by local backends
117
+ oa2a start
118
+ ```
119
+
120
+ **First-time setup**: If `~/.oa2a/config.toml` doesn't exist, an interactive setup wizard will guide you through:
121
+ - Enter your OpenAI API Key (for the local LLM backend)
122
+ - Enter the base URL of your local LLM (e.g., `http://localhost:8000/v1`)
123
+ - Configure server host and port (optional)
124
+ - Set server API key for authentication (optional)
119
125
 
120
- oa2a start # Start server in background
121
- # Server starts at http://localhost:8080
126
+ After configuration, the server starts at `http://localhost:8080`.
122
127
 
123
- # View logs
128
+ **Daemon management commands:**
129
+
130
+ ```bash
124
131
  oa2a logs # Show last 50 lines of logs
125
132
  oa2a logs -f # Follow logs in real-time (Ctrl+C to exit)
126
-
127
- # Check status
128
133
  oa2a status # Check if server is running
129
-
130
- # Stop server
131
134
  oa2a stop # Stop background server
132
-
133
- # Restart server
134
135
  oa2a restart # Restart with same settings
135
136
  ```
136
137
 
138
+ **Manual Configuration**
139
+
140
+ You can also manually create/edit the config file at `~/.oa2a/config.toml`:
141
+
142
+ ```toml
143
+ # OA2A Configuration File
144
+ openai_api_key = "dummy"
145
+ openai_base_url = "http://localhost:8000/v1"
146
+ host = "0.0.0.0"
147
+ port = 8080
148
+ ```
149
+
137
150
  **Option B: Run in foreground**
138
151
 
139
152
  ```bash
140
- export OA2A_OPENAI_BASE_URL=http://localhost:8000/v1
141
- export OA2A_OPENAI_API_KEY=dummy
142
-
143
153
  oa2a # Run server in foreground (blocking)
144
154
  # Press Ctrl+C to stop
145
155
  ```
@@ -195,15 +205,6 @@ You can configure [Claude Code](https://github.com/anthropics/claude-code) to us
195
205
  | `ANTHROPIC_DEFAULT_HAIKU_MODEL` | Default model for Haiku mode |
196
206
  | `ANTHROPIC_REASONING_MODEL` | Default model for reasoning tasks |
197
207
 
198
- 2. **Or set environment variables** before running Claude Code:
199
-
200
- ```bash
201
- export ANTHROPIC_BASE_URL=http://localhost:8080
202
- export ANTHROPIC_API_KEY=dummy-key
203
-
204
- claude
205
- ```
206
-
207
208
  ### Complete Workflow Example
208
209
 
209
210
  Make sure `~/.claude/settings.json` is configured as described above.
@@ -215,10 +216,7 @@ vllm serve meta-llama/Llama-2-7b-chat-hf
215
216
 
216
217
  Terminal 2 - Start the proxy (background mode):
217
218
  ```bash
218
- export OA2A_OPENAI_BASE_URL=http://localhost:8000/v1
219
- export OA2A_OPENAI_API_KEY=dummy
220
- export OA2A_TAVILY_API_KEY="tvly-your-tavily-api-key" # Optional: enable web search
221
-
219
+ # First run: interactive setup wizard will guide you
222
220
  oa2a start
223
221
  ```
224
222
 
@@ -241,102 +239,43 @@ oa2a stop
241
239
  - ✅ **Streaming responses** - Real-time token streaming via SSE
242
240
  - ✅ **Tool calling** - Local LLM function calling support
243
241
  - ✅ **Vision models** - Multi-modal input for vision-capable models
244
- - ✅ **Web Search** - Give your local LLM internet access (see below)
242
+ - ✅ **Web Search** - Built-in Tavily web search for local models
245
243
  - ✅ **Thinking mode** - Supports reasoning/thinking model outputs
246
244
 
247
245
  ---
248
246
 
249
- ## Web Search Capability 🔍
250
-
251
- **Bridge the gap: Give your local LLM the web search power that Claude Code users enjoy!**
252
-
253
- When using locally-hosted models with Claude Code, you lose access to the built-in web search tool. This proxy fills that gap by providing a server-side web search implementation powered by [Tavily](https://tavily.com).
247
+ ## Web Search 🔍
254
248
 
255
- ### The Problem
249
+ Enable web search for your local LLM using [Tavily](https://tavily.com).
256
250
 
257
- | Scenario | Web Search Available? |
258
- |----------|----------------------|
259
- | Using Claude (cloud) in Claude Code | ✅ Built-in |
260
- | Using local vLLM/SGLang in Claude Code | ❌ Not available |
261
- | **Using this proxy + local LLM** | ✅ **Enabled via Tavily** |
251
+ **Setup:**
262
252
 
263
- ### How It Works
253
+ 1. Get a free API key at [tavily.com](https://tavily.com)
264
254
 
255
+ 2. Add to your config (`~/.oa2a/config.toml`):
256
+ ```toml
257
+ tavily_api_key = "tvly-your-api-key"
265
258
  ```
266
- Claude Code → Anthropic SDK → This Proxy → Local LLM
267
-
268
- Tavily API (Web Search)
269
- ```
270
-
271
- The proxy intercepts `web_search_20250305` tool calls and handles them directly, regardless of whether your local model supports web search natively.
272
-
273
- ### Setup Tavily Search
274
259
 
275
- 1. **Get a free API key** at [tavily.com](https://tavily.com) - generous free tier available
260
+ 3. Use `web_search_20250305` tool in your app - the proxy handles search automatically.
276
261
 
277
- 2. **Configure the proxy:**
278
- ```bash
279
- export OA2A_OPENAI_BASE_URL=http://localhost:8000/v1
280
- export OA2A_OPENAI_API_KEY=dummy
281
- export OA2A_TAVILY_API_KEY="tvly-your-tavily-api-key" # Enable web search
282
-
283
- oa2a
284
- ```
285
-
286
- 3. **Use in your app:**
287
- ```python
288
- import anthropic
289
-
290
- client = anthropic.Anthropic(
291
- base_url="http://localhost:8080",
292
- api_key="dummy-key",
293
- )
294
-
295
- message = client.messages.create(
296
- model="meta-llama/Llama-2-7b-chat-hf",
297
- max_tokens=1024,
298
- tools=[
299
- {
300
- "name": "web_search_20250305",
301
- "description": "Search the web for current information",
302
- "input_schema": {
303
- "type": "object",
304
- "properties": {
305
- "query": {"type": "string", "description": "Search query"},
306
- },
307
- "required": ["query"],
308
- },
309
- }
310
- ],
311
- messages=[{"role": "user", "content": "What happened in AI today?"}],
312
- )
313
-
314
- if message.stop_reason == "tool_use":
315
- tool_use = message.content[-1]
316
- print(f"Searching: {tool_use.input}")
317
- # The proxy automatically calls Tavily and returns results
318
- ```
319
-
320
- ### Tavily Configuration Options
321
-
322
- | Variable | Default | Description |
323
- |----------|---------|-------------|
324
- | `OA2A_TAVILY_API_KEY` | - | Your Tavily API key ([get free at tavily.com](https://tavily.com)) |
325
- | `OA2A_TAVILY_MAX_RESULTS` | 5 | Number of search results to return |
326
- | `OA2A_TAVILY_TIMEOUT` | 30 | Search timeout in seconds |
327
- | `OA2A_WEBSEARCH_MAX_USES` | 5 | Max search calls per request |
262
+ **Options:** `tavily_max_results` (default: 5), `tavily_timeout` (default: 30), `websearch_max_uses` (default: 5)
328
263
 
329
264
  ---
330
265
 
331
266
  ## Configuration
332
267
 
333
- | Variable | Required | Default | Description |
268
+ Config file: `~/.oa2a/config.toml` (auto-created on first run)
269
+
270
+ | Option | Required | Default | Description |
334
271
  |----------|----------|---------|-------------|
335
- | `OA2A_OPENAI_BASE_URL` | ✅ | - | Your local LLM's OpenAI-compatible endpoint |
336
- | `OA2A_OPENAI_API_KEY` | ✅ | - | Any value (local backends usually ignore this) |
337
- | `OA2A_PORT` | ❌ | 8080 | Proxy server port |
338
- | `OA2A_HOST` | ❌ | 0.0.0.0 | Proxy server host |
339
- | `OA2A_TAVILY_API_KEY` | ❌ | - | Enable web search ([tavily.com](https://tavily.com)) |
272
+ | `openai_base_url` | ✅ | - | Local LLM endpoint (e.g., `http://localhost:8000/v1`) |
273
+ | `openai_api_key` | ✅ | - | API key for local LLM |
274
+ | `port` | ❌ | 8080 | Proxy port |
275
+ | `host` | ❌ | 0.0.0.0 | Proxy host |
276
+ | `api_key` | ❌ | - | Auth key for this proxy |
277
+ | `tavily_api_key` | ❌ | - | Enable web search |
278
+ | `log_level` | ❌ | INFO | DEBUG, INFO, WARNING, ERROR |
340
279
 
341
280
  ---
342
281
 
@@ -1,25 +1,25 @@
1
- local_openai2anthropic/__init__.py,sha256=ykkqdWKgqvSffE2-GmDcZQFHJRAe0y4d4aeD3fUq_Os,1059
1
+ local_openai2anthropic/__init__.py,sha256=gFCH8riHg-nyPD7S0l1TAAKYwgzp4BLzPdUEn4Ce5dM,1059
2
2
  local_openai2anthropic/__main__.py,sha256=K21u5u7FN8-DbO67TT_XDF0neGqJeFrVNkteRauCRQk,179
3
3
  local_openai2anthropic/config.py,sha256=Cjg6J7H7ydKtVSd5m0RlTj-YF6yht3TpF4LcyodqQP4,9621
4
4
  local_openai2anthropic/converter.py,sha256=og94I514M9km_Wbk9c1ddU6fyaQNEbpd2zfpfnBQaTQ,16029
5
5
  local_openai2anthropic/daemon.py,sha256=pZnRojGFcuIpR8yLDNjV-b0LJRBVhgRAa-dKeRRse44,10017
6
6
  local_openai2anthropic/daemon_runner.py,sha256=rguOH0PgpbjqNsKYei0uCQX8JQOQ1wmtQH1CtW95Dbw,3274
7
- local_openai2anthropic/main.py,sha256=FPCEATNPXvGpkszftdXJh0o0F5sUAOXo2zDagmsWGKI,12174
7
+ local_openai2anthropic/main.py,sha256=AaG_MKznk_XA0PYFgTVby7ykOFhdOJGq7fB9YPx8cBM,12174
8
8
  local_openai2anthropic/openai_types.py,sha256=jFdCvLwtXYoo5gGRqOhbHQcVaxcsxNnCP_yFPIv7rG4,3823
9
- local_openai2anthropic/protocol.py,sha256=VW3B1YrbYg5UAo7PveQv0Ny5vfuNa6yG6IlHtkuyXiI,5178
9
+ local_openai2anthropic/protocol.py,sha256=B6nL9z86vIEiuYPvndYc7DA_zrMh7GLzhNmO67M9f3g,5349
10
10
  local_openai2anthropic/router.py,sha256=gwSGCYQGd0tAj4B4cl30UDkIJDIfBP4D8T9KEMKnxyk,16196
11
- local_openai2anthropic/tavily_client.py,sha256=QsBhnyF8BFWPAxB4XtWCCpHCquNL5SW93-zjTTi4Meg,3774
11
+ local_openai2anthropic/tavily_client.py,sha256=b_gyW0rLPIMNJYAFfeZkStbPYisuQzADCUZXzXRdyKo,4548
12
12
  local_openai2anthropic/server_tools/__init__.py,sha256=QlJfjEta-HOCtLe7NaY_fpbEKv-ZpInjAnfmSqE9tbk,615
13
13
  local_openai2anthropic/server_tools/base.py,sha256=pNFsv-jSgxVrkY004AHAcYMNZgVSO8ZOeCzQBUtQ3vU,5633
14
- local_openai2anthropic/server_tools/web_search.py,sha256=1C7lX_cm-tMaN3MsCjinEZYPJc_Hj4yAxYay9h8Zbvs,6543
14
+ local_openai2anthropic/server_tools/web_search.py,sha256=98EYdpS9kAAoSHl9vdUABPCYqYFmo4YSPSBZSGyUI40,6812
15
15
  local_openai2anthropic/streaming/__init__.py,sha256=RFKYQnc0zlhWK-Dm7GZpmabmszbZhY5NcXaaSsQ7Sys,227
16
16
  local_openai2anthropic/streaming/handler.py,sha256=X8viml6b40p-vr-A4HlEi5iCqmTsIMyQgj3S2RfweVE,22033
17
17
  local_openai2anthropic/tools/__init__.py,sha256=OM_6YAwy3G1kbrF7n5NvmBwWPGO0hwq4xLrYZFMHANA,318
18
18
  local_openai2anthropic/tools/handler.py,sha256=SO8AmEUfNIg16s6jOKBaYdajYc0fiI8ciOoiKXIJe_c,14106
19
19
  local_openai2anthropic/utils/__init__.py,sha256=0Apd3lQCmWpQHol4AfjtQe6A3Cpex9Zn-8dyK_FU8Z0,372
20
20
  local_openai2anthropic/utils/tokens.py,sha256=TV3vGAjoGZeyo1xPvwb5jto43p1U1f4HteCApB86X0g,3187
21
- local_openai2anthropic-0.3.7.dist-info/METADATA,sha256=ZqgaeWvxJAKD1fDKC0XWiSwf3BI1DbH5UQpjvOzFewo,11270
22
- local_openai2anthropic-0.3.7.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
23
- local_openai2anthropic-0.3.7.dist-info/entry_points.txt,sha256=hdc9tSJUNxyNLXcTYye5SuD2K0bEQhxBhGnWTFup6ZM,116
24
- local_openai2anthropic-0.3.7.dist-info/licenses/LICENSE,sha256=X3_kZy3lJvd_xp8IeyUcIAO2Y367MXZc6aaRx8BYR_s,11369
25
- local_openai2anthropic-0.3.7.dist-info/RECORD,,
21
+ local_openai2anthropic-0.3.8.dist-info/METADATA,sha256=uUbJJfhlsv-I54la3bD_Dkpmpe-pnUku60wgkXzXMVM,9276
22
+ local_openai2anthropic-0.3.8.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
23
+ local_openai2anthropic-0.3.8.dist-info/entry_points.txt,sha256=hdc9tSJUNxyNLXcTYye5SuD2K0bEQhxBhGnWTFup6ZM,116
24
+ local_openai2anthropic-0.3.8.dist-info/licenses/LICENSE,sha256=X3_kZy3lJvd_xp8IeyUcIAO2Y367MXZc6aaRx8BYR_s,11369
25
+ local_openai2anthropic-0.3.8.dist-info/RECORD,,