local-openai2anthropic 0.3.3__py3-none-any.whl → 0.3.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,357 @@
1
+ # SPDX-License-Identifier: Apache-2.0
2
+ """Server tool handling."""
3
+
4
+ import json
5
+ import logging
6
+ from http import HTTPStatus
7
+ from typing import Any
8
+
9
+ import httpx
10
+ from fastapi.responses import JSONResponse
11
+
12
+ from local_openai2anthropic.config import Settings
13
+ from local_openai2anthropic.converter import convert_openai_to_anthropic
14
+ from local_openai2anthropic.protocol import AnthropicError, AnthropicErrorResponse
15
+ from local_openai2anthropic.server_tools import ServerToolRegistry
16
+ from local_openai2anthropic.utils.tokens import (
17
+ _generate_server_tool_id,
18
+ _normalize_usage,
19
+ )
20
+
21
+ logger = logging.getLogger(__name__)
22
+
23
+
24
+ class ServerToolHandler:
25
+ """Handles server tool execution for non-streaming requests."""
26
+
27
+ def __init__(
28
+ self,
29
+ server_tools: list[type],
30
+ configs: dict[str, dict[str, Any]],
31
+ settings: Settings,
32
+ ):
33
+ self.server_tools = {t.tool_name: t for t in server_tools}
34
+ self.configs = configs
35
+ self.settings = settings
36
+ self.usage: dict[str, int] = {}
37
+
38
+ def is_server_tool_call(self, tool_call: dict[str, Any]) -> bool:
39
+ """Check if a tool call is for a server tool."""
40
+ func_name = tool_call.get("function", {}).get("name")
41
+ return func_name in self.server_tools
42
+
43
+ async def execute_tool(
44
+ self,
45
+ tool_call: dict[str, Any],
46
+ ) -> tuple[list[dict[str, Any]], dict[str, Any]]:
47
+ """
48
+ Execute a server tool and return content blocks + tool result message.
49
+ Returns: (content_blocks, tool_result_message)
50
+ """
51
+ func_name = tool_call.get("function", {}).get("name")
52
+ call_id = tool_call.get("id", "")
53
+ openai_call_id = tool_call.get("openai_id", call_id)
54
+
55
+ tool_class = self.server_tools[func_name]
56
+ config = self.configs.get(tool_class.tool_type, {})
57
+
58
+ # Extract call arguments
59
+ args = tool_class.extract_call_args(tool_call)
60
+ if args is None:
61
+ args = {}
62
+
63
+ # Execute the tool
64
+ result = await tool_class.execute(call_id, args, config, self.settings)
65
+
66
+ # Update usage
67
+ for key, value in result.usage_increment.items():
68
+ self.usage[key] = self.usage.get(key, 0) + value
69
+
70
+ # Build content blocks
71
+ content_blocks = tool_class.build_content_blocks(call_id, args, result)
72
+
73
+ # Build tool result message for OpenAI
74
+ tool_result_msg = tool_class.build_tool_result_message(
75
+ openai_call_id, args, result
76
+ )
77
+
78
+ return content_blocks, tool_result_msg
79
+
80
+
81
+ async def _handle_with_server_tools(
82
+ openai_params: dict[str, Any],
83
+ url: str,
84
+ headers: dict[str, str],
85
+ settings: Settings,
86
+ server_tools: list[type],
87
+ model: str,
88
+ ) -> JSONResponse:
89
+ """Handle request with server tool execution loop."""
90
+ params = dict(openai_params)
91
+ configs = params.pop("_server_tools_config", {})
92
+
93
+ handler = ServerToolHandler(server_tools, configs, settings)
94
+ accumulated_content: list[dict[str, Any]] = []
95
+
96
+ # Get max_uses from configs (default to settings or 5)
97
+ max_uses = settings.websearch_max_uses
98
+ for config in configs.values():
99
+ if config.get("max_uses"):
100
+ max_uses = config["max_uses"]
101
+ break
102
+
103
+ total_tool_calls = 0
104
+
105
+ while True:
106
+ async with httpx.AsyncClient(timeout=settings.request_timeout) as client:
107
+ try:
108
+ # Log full request for debugging
109
+ logger.debug(
110
+ f"Request body: {json.dumps(params, indent=2, default=str)[:3000]}"
111
+ )
112
+
113
+ response = await client.post(url, headers=headers, json=params)
114
+
115
+ if response.status_code != 200:
116
+ logger.error(
117
+ f"OpenAI API error: {response.status_code} - {response.text}"
118
+ )
119
+ raw_text = response.text
120
+ try:
121
+ if not raw_text:
122
+ raw_text = response.content.decode(
123
+ "utf-8", errors="replace"
124
+ )
125
+ except Exception:
126
+ raw_text = ""
127
+ if not raw_text:
128
+ raw_text = response.reason_phrase or ""
129
+ error_message = (raw_text or "").strip()
130
+ error_response = AnthropicErrorResponse(
131
+ error=AnthropicError(
132
+ type="api_error",
133
+ message=error_message
134
+ or f"Upstream API error ({response.status_code})",
135
+ )
136
+ )
137
+ return JSONResponse(
138
+ status_code=response.status_code,
139
+ content=error_response.model_dump(),
140
+ )
141
+
142
+ completion_data = response.json()
143
+ logger.debug(
144
+ f"OpenAI response: {json.dumps(completion_data, indent=2)[:500]}..."
145
+ )
146
+ from openai.types.chat import ChatCompletion
147
+
148
+ completion = ChatCompletion.model_validate(completion_data)
149
+
150
+ # Check for server tool calls
151
+ server_tool_calls = []
152
+ other_tool_calls = []
153
+
154
+ tool_calls = completion.choices[0].message.tool_calls
155
+ logger.info(
156
+ f"Model returned tool_calls: {len(tool_calls) if tool_calls else 0}"
157
+ )
158
+
159
+ if tool_calls:
160
+ for tc in tool_calls:
161
+ func = getattr(tc, "function", None)
162
+ func_name = func.name if func else ""
163
+ logger.info(f" Tool call: {func_name}")
164
+
165
+ # Generate Anthropic-style ID for server tools
166
+ is_server = handler.is_server_tool_call(
167
+ {
168
+ "id": tc.id,
169
+ "function": {"name": func_name, "arguments": ""},
170
+ }
171
+ )
172
+
173
+ # Use Anthropic-style ID for server tools, original ID otherwise
174
+ client_tool_id = (
175
+ _generate_server_tool_id() if is_server else tc.id
176
+ )
177
+
178
+ tc_dict = {
179
+ "id": client_tool_id,
180
+ "openai_id": tc.id,
181
+ "function": {
182
+ "name": func_name,
183
+ "arguments": func.arguments if func else "{}",
184
+ },
185
+ }
186
+ logger.info(
187
+ f" Is server tool: {is_server}, ID: {client_tool_id}"
188
+ )
189
+ if is_server:
190
+ server_tool_calls.append(tc_dict)
191
+ else:
192
+ other_tool_calls.append(tc)
193
+
194
+ # No server tool calls - we're done
195
+ logger.info(
196
+ f"Server tool calls: {len(server_tool_calls)}, Other: {len(other_tool_calls)}"
197
+ )
198
+ if not server_tool_calls:
199
+ message = convert_openai_to_anthropic(completion, model)
200
+
201
+ if accumulated_content:
202
+ message_dict = message.model_dump()
203
+ message_dict["content"] = (
204
+ accumulated_content + message_dict.get("content", [])
205
+ )
206
+
207
+ if message_dict.get("usage"):
208
+ message_dict["usage"]["server_tool_use"] = handler.usage
209
+ message_dict["usage"] = _normalize_usage(
210
+ message_dict.get("usage")
211
+ )
212
+
213
+ # Log full response for debugging
214
+ logger.info(
215
+ f"Response content blocks: {json.dumps(message_dict.get('content', []), ensure_ascii=False)[:1000]}"
216
+ )
217
+ logger.info(f"Response usage: {message_dict.get('usage')}")
218
+ logger.info(f"Server tool use count: {handler.usage}")
219
+
220
+ return JSONResponse(content=message_dict)
221
+
222
+ message_dict = message.model_dump()
223
+ message_dict["usage"] = _normalize_usage(message_dict.get("usage"))
224
+ return JSONResponse(content=message_dict)
225
+
226
+ # Check max_uses limit
227
+ if total_tool_calls >= max_uses:
228
+ logger.warning(f"Server tool max_uses ({max_uses}) exceeded")
229
+ # Return error for each call
230
+ for call in server_tool_calls:
231
+ func_name = call.get("function", {}).get("name", "")
232
+ tool_class = handler.server_tools.get(func_name)
233
+ if tool_class:
234
+ from local_openai2anthropic.server_tools import ToolResult
235
+
236
+ error_result = ToolResult(
237
+ success=False,
238
+ content=[],
239
+ error_code="max_uses_exceeded",
240
+ )
241
+ error_blocks = tool_class.build_content_blocks(
242
+ call["id"],
243
+ {},
244
+ error_result,
245
+ )
246
+ accumulated_content.extend(error_blocks)
247
+
248
+ # Continue with modified messages
249
+ assistant_tool_calls = []
250
+ for call in server_tool_calls:
251
+ assistant_tool_calls.append(
252
+ {
253
+ "id": call.get("openai_id", call.get("id", "")),
254
+ "type": "function",
255
+ "function": {
256
+ "name": call.get("function", {}).get("name", ""),
257
+ "arguments": call.get("function", {}).get(
258
+ "arguments", "{}"
259
+ ),
260
+ },
261
+ }
262
+ )
263
+ messages = params.get("messages", [])
264
+ messages = _add_tool_results_to_messages(
265
+ messages, assistant_tool_calls, handler, is_error=True
266
+ )
267
+ params["messages"] = messages
268
+ continue
269
+
270
+ # Execute server tools
271
+ messages = params.get("messages", [])
272
+ assistant_tool_calls = []
273
+ tool_results = []
274
+
275
+ for call in server_tool_calls:
276
+ total_tool_calls += 1
277
+ content_blocks, tool_result = await handler.execute_tool(call)
278
+ accumulated_content.extend(content_blocks)
279
+
280
+ # Track for assistant message
281
+ assistant_tool_calls.append(
282
+ {
283
+ "id": call.get("openai_id", call.get("id", "")),
284
+ "type": "function",
285
+ "function": {
286
+ "name": call["function"]["name"],
287
+ "arguments": call["function"]["arguments"],
288
+ },
289
+ }
290
+ )
291
+ tool_results.append(tool_result)
292
+
293
+ # Add to messages for next iteration
294
+ messages = _add_tool_results_to_messages(
295
+ messages, assistant_tool_calls, handler, tool_results=tool_results
296
+ )
297
+ params["messages"] = messages
298
+
299
+ except httpx.TimeoutException:
300
+ error_response = AnthropicErrorResponse(
301
+ error=AnthropicError(
302
+ type="timeout_error", message="Request timed out"
303
+ )
304
+ )
305
+ return JSONResponse(
306
+ status_code=HTTPStatus.GATEWAY_TIMEOUT,
307
+ content=error_response.model_dump(),
308
+ )
309
+ except httpx.RequestError as e:
310
+ error_response = AnthropicErrorResponse(
311
+ error=AnthropicError(type="connection_error", message=str(e))
312
+ )
313
+ return JSONResponse(
314
+ status_code=HTTPStatus.BAD_GATEWAY,
315
+ content=error_response.model_dump(),
316
+ )
317
+
318
+
319
+ def _add_tool_results_to_messages(
320
+ messages: list[dict[str, Any]],
321
+ tool_calls: list[dict[str, Any]],
322
+ handler: ServerToolHandler,
323
+ tool_results: list[dict[str, Any]] | None = None,
324
+ is_error: bool = False,
325
+ ) -> list[dict[str, Any]]:
326
+ """Add assistant tool call and results to messages."""
327
+ messages = list(messages)
328
+
329
+ # Add assistant message with tool calls
330
+ # SGLang requires content to be a string, not None
331
+ assistant_msg: dict[str, Any] = {
332
+ "role": "assistant",
333
+ "content": "", # Empty string instead of None for SGLang compatibility
334
+ "tool_calls": tool_calls,
335
+ }
336
+ messages.append(assistant_msg)
337
+
338
+ # Add tool results
339
+ if is_error:
340
+ for call in tool_calls:
341
+ tool_call_id = call.get("openai_id", call.get("id", ""))
342
+ messages.append(
343
+ {
344
+ "role": "tool",
345
+ "tool_call_id": tool_call_id,
346
+ "content": json.dumps(
347
+ {
348
+ "error": "max_uses_exceeded",
349
+ "message": "Maximum tool uses exceeded.",
350
+ }
351
+ ),
352
+ }
353
+ )
354
+ elif tool_results:
355
+ messages.extend(tool_results)
356
+
357
+ return messages
@@ -0,0 +1,18 @@
1
+ # SPDX-License-Identifier: Apache-2.0
2
+ """Utility functions for local_openai2anthropic."""
3
+
4
+ from .tokens import (
5
+ _chunk_text,
6
+ _count_tokens,
7
+ _estimate_input_tokens,
8
+ _generate_server_tool_id,
9
+ _normalize_usage,
10
+ )
11
+
12
+ __all__ = [
13
+ "_chunk_text",
14
+ "_count_tokens",
15
+ "_estimate_input_tokens",
16
+ "_generate_server_tool_id",
17
+ "_normalize_usage",
18
+ ]
@@ -0,0 +1,96 @@
1
+ # SPDX-License-Identifier: Apache-2.0
2
+ """Token-related utility functions."""
3
+
4
+ import json
5
+ import secrets
6
+ import string
7
+ from typing import Any
8
+
9
+
10
+ def _generate_server_tool_id() -> str:
11
+ """Generate Anthropic-style server tool use ID (srvtoolu_...)."""
12
+ # Generate 24 random alphanumeric characters
13
+ chars = string.ascii_lowercase + string.digits
14
+ random_part = "".join(secrets.choice(chars) for _ in range(24))
15
+ return f"srvtoolu_{random_part}"
16
+
17
+
18
+ def _normalize_usage(usage: dict[str, Any] | None) -> dict[str, Any] | None:
19
+ if not isinstance(usage, dict):
20
+ return usage
21
+ allowed_keys = {
22
+ "input_tokens",
23
+ "output_tokens",
24
+ "cache_creation_input_tokens",
25
+ "cache_read_input_tokens",
26
+ "server_tool_use",
27
+ }
28
+ normalized = {k: v for k, v in usage.items() if k in allowed_keys}
29
+ return normalized or None
30
+
31
+
32
+ def _count_tokens(text: str) -> int:
33
+ try:
34
+ import tiktoken # type: ignore[import-not-found]
35
+ except Exception:
36
+ return 0
37
+
38
+ encoding = tiktoken.get_encoding("cl100k_base")
39
+ return len(encoding.encode(text))
40
+
41
+
42
+ def _chunk_text(text: str, chunk_size: int = 200) -> list[str]:
43
+ if not text:
44
+ return []
45
+ return [text[i : i + chunk_size] for i in range(0, len(text), chunk_size)]
46
+
47
+
48
+ def _estimate_input_tokens(openai_params: dict[str, Any]) -> int:
49
+ try:
50
+ import tiktoken # type: ignore[import-not-found]
51
+ except Exception:
52
+ return 0
53
+
54
+ encoding = tiktoken.get_encoding("cl100k_base")
55
+ total_tokens = 0
56
+
57
+ system = openai_params.get("system")
58
+ if isinstance(system, str):
59
+ total_tokens += len(encoding.encode(system))
60
+
61
+ messages = openai_params.get("messages", [])
62
+ if isinstance(messages, list):
63
+ for msg in messages:
64
+ if not isinstance(msg, dict):
65
+ continue
66
+ content = msg.get("content", "")
67
+ if isinstance(content, str):
68
+ total_tokens += len(encoding.encode(content))
69
+ elif isinstance(content, list):
70
+ for block in content:
71
+ if not isinstance(block, dict):
72
+ total_tokens += len(encoding.encode(str(block)))
73
+ continue
74
+ block_type = block.get("type")
75
+ if block_type == "text":
76
+ total_tokens += len(encoding.encode(block.get("text", "")))
77
+ elif block_type == "image_url":
78
+ total_tokens += 85
79
+
80
+ tool_calls = msg.get("tool_calls")
81
+ if isinstance(tool_calls, list) and tool_calls:
82
+ total_tokens += len(encoding.encode(json.dumps(tool_calls)))
83
+
84
+ tools = openai_params.get("tools")
85
+ if isinstance(tools, list) and tools:
86
+ total_tokens += len(encoding.encode(json.dumps(tools)))
87
+
88
+ tool_choice = openai_params.get("tool_choice")
89
+ if tool_choice is not None:
90
+ total_tokens += len(encoding.encode(json.dumps(tool_choice)))
91
+
92
+ response_format = openai_params.get("response_format")
93
+ if response_format is not None:
94
+ total_tokens += len(encoding.encode(json.dumps(response_format)))
95
+
96
+ return total_tokens
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: local-openai2anthropic
3
- Version: 0.3.3
3
+ Version: 0.3.5
4
4
  Summary: A lightweight proxy server that converts Anthropic Messages API to OpenAI API
5
5
  Project-URL: Homepage, https://github.com/dongfangzan/local-openai2anthropic
6
6
  Project-URL: Repository, https://github.com/dongfangzan/local-openai2anthropic
@@ -0,0 +1,25 @@
1
+ local_openai2anthropic/__init__.py,sha256=YHTno5vTDXG-rjXCkH_JxAJZJNIPMDuAcphmJJV_pQA,1059
2
+ local_openai2anthropic/__main__.py,sha256=K21u5u7FN8-DbO67TT_XDF0neGqJeFrVNkteRauCRQk,179
3
+ local_openai2anthropic/config.py,sha256=ZOZKbyWL4oCYHkEcfl-HgTmDbgJoqHRahdZQbTXqkVg,1985
4
+ local_openai2anthropic/converter.py,sha256=og94I514M9km_Wbk9c1ddU6fyaQNEbpd2zfpfnBQaTQ,16029
5
+ local_openai2anthropic/daemon.py,sha256=pZnRojGFcuIpR8yLDNjV-b0LJRBVhgRAa-dKeRRse44,10017
6
+ local_openai2anthropic/daemon_runner.py,sha256=rguOH0PgpbjqNsKYei0uCQX8JQOQ1wmtQH1CtW95Dbw,3274
7
+ local_openai2anthropic/main.py,sha256=oImU_AhD6mNGf7qUiA7u8R5QddlZDBUpDHADa0EIvpA,12145
8
+ local_openai2anthropic/openai_types.py,sha256=jFdCvLwtXYoo5gGRqOhbHQcVaxcsxNnCP_yFPIv7rG4,3823
9
+ local_openai2anthropic/protocol.py,sha256=VW3B1YrbYg5UAo7PveQv0Ny5vfuNa6yG6IlHtkuyXiI,5178
10
+ local_openai2anthropic/router.py,sha256=gwSGCYQGd0tAj4B4cl30UDkIJDIfBP4D8T9KEMKnxyk,16196
11
+ local_openai2anthropic/tavily_client.py,sha256=QsBhnyF8BFWPAxB4XtWCCpHCquNL5SW93-zjTTi4Meg,3774
12
+ local_openai2anthropic/server_tools/__init__.py,sha256=QlJfjEta-HOCtLe7NaY_fpbEKv-ZpInjAnfmSqE9tbk,615
13
+ local_openai2anthropic/server_tools/base.py,sha256=pNFsv-jSgxVrkY004AHAcYMNZgVSO8ZOeCzQBUtQ3vU,5633
14
+ local_openai2anthropic/server_tools/web_search.py,sha256=1C7lX_cm-tMaN3MsCjinEZYPJc_Hj4yAxYay9h8Zbvs,6543
15
+ local_openai2anthropic/streaming/__init__.py,sha256=RFKYQnc0zlhWK-Dm7GZpmabmszbZhY5NcXaaSsQ7Sys,227
16
+ local_openai2anthropic/streaming/handler.py,sha256=X8viml6b40p-vr-A4HlEi5iCqmTsIMyQgj3S2RfweVE,22033
17
+ local_openai2anthropic/tools/__init__.py,sha256=OM_6YAwy3G1kbrF7n5NvmBwWPGO0hwq4xLrYZFMHANA,318
18
+ local_openai2anthropic/tools/handler.py,sha256=SO8AmEUfNIg16s6jOKBaYdajYc0fiI8ciOoiKXIJe_c,14106
19
+ local_openai2anthropic/utils/__init__.py,sha256=0Apd3lQCmWpQHol4AfjtQe6A3Cpex9Zn-8dyK_FU8Z0,372
20
+ local_openai2anthropic/utils/tokens.py,sha256=TV3vGAjoGZeyo1xPvwb5jto43p1U1f4HteCApB86X0g,3187
21
+ local_openai2anthropic-0.3.5.dist-info/METADATA,sha256=Z26zrkTqEmfovoEt091Bj702JRNlY-JT3poLb--AEKs,11240
22
+ local_openai2anthropic-0.3.5.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
23
+ local_openai2anthropic-0.3.5.dist-info/entry_points.txt,sha256=hdc9tSJUNxyNLXcTYye5SuD2K0bEQhxBhGnWTFup6ZM,116
24
+ local_openai2anthropic-0.3.5.dist-info/licenses/LICENSE,sha256=X3_kZy3lJvd_xp8IeyUcIAO2Y367MXZc6aaRx8BYR_s,11369
25
+ local_openai2anthropic-0.3.5.dist-info/RECORD,,
@@ -1,19 +0,0 @@
1
- local_openai2anthropic/__init__.py,sha256=IEn8YcQGsaEaCr04s3hS2AcgsIt5NU5Qa2C8Uwz7RdY,1059
2
- local_openai2anthropic/__main__.py,sha256=K21u5u7FN8-DbO67TT_XDF0neGqJeFrVNkteRauCRQk,179
3
- local_openai2anthropic/config.py,sha256=3M5ZAz3uYNMGxaottEBseEOZF-GnVaGuioH9Hpmgnd8,1918
4
- local_openai2anthropic/converter.py,sha256=Q1_AZSzY_DgIFC_jAfXe6Niw0P7hf45m-NCCWjK3C3w,15856
5
- local_openai2anthropic/daemon.py,sha256=pZnRojGFcuIpR8yLDNjV-b0LJRBVhgRAa-dKeRRse44,10017
6
- local_openai2anthropic/daemon_runner.py,sha256=rguOH0PgpbjqNsKYei0uCQX8JQOQ1wmtQH1CtW95Dbw,3274
7
- local_openai2anthropic/main.py,sha256=FK5JBBpzB_T44y3N16lPl1hK4ht4LEQqRKzVmkIjIoo,9866
8
- local_openai2anthropic/openai_types.py,sha256=jFdCvLwtXYoo5gGRqOhbHQcVaxcsxNnCP_yFPIv7rG4,3823
9
- local_openai2anthropic/protocol.py,sha256=vUEgxtRPFll6jEtLc4DyxTLCBjrWIEScZXhEqe4uibk,5185
10
- local_openai2anthropic/router.py,sha256=4-m57gowlHW7A0qDaVuwnm7V0qhP6wgj1EU2TAlZ01M,52791
11
- local_openai2anthropic/tavily_client.py,sha256=QsBhnyF8BFWPAxB4XtWCCpHCquNL5SW93-zjTTi4Meg,3774
12
- local_openai2anthropic/server_tools/__init__.py,sha256=QlJfjEta-HOCtLe7NaY_fpbEKv-ZpInjAnfmSqE9tbk,615
13
- local_openai2anthropic/server_tools/base.py,sha256=pNFsv-jSgxVrkY004AHAcYMNZgVSO8ZOeCzQBUtQ3vU,5633
14
- local_openai2anthropic/server_tools/web_search.py,sha256=1C7lX_cm-tMaN3MsCjinEZYPJc_Hj4yAxYay9h8Zbvs,6543
15
- local_openai2anthropic-0.3.3.dist-info/METADATA,sha256=feZdfAGz5m72eD81_q3RwT2DFNe-b-zhkF5G_lwKWT4,11240
16
- local_openai2anthropic-0.3.3.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
17
- local_openai2anthropic-0.3.3.dist-info/entry_points.txt,sha256=hdc9tSJUNxyNLXcTYye5SuD2K0bEQhxBhGnWTFup6ZM,116
18
- local_openai2anthropic-0.3.3.dist-info/licenses/LICENSE,sha256=X3_kZy3lJvd_xp8IeyUcIAO2Y367MXZc6aaRx8BYR_s,11369
19
- local_openai2anthropic-0.3.3.dist-info/RECORD,,