local-openai2anthropic 0.1.0__py3-none-any.whl → 0.2.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,116 @@
1
+ #!/usr/bin/env python3
2
+ # SPDX-License-Identifier: Apache-2.0
3
+ """
4
+ Daemon runner - this module is executed as a standalone script in the child process.
5
+ """
6
+
7
+ import atexit
8
+ import os
9
+ import signal
10
+ import sys
11
+ from pathlib import Path
12
+ from datetime import datetime
13
+
14
+ PID_FILE = Path.home() / ".local" / "share" / "oa2a" / "oa2a.pid"
15
+
16
+
17
+ def log_message(msg: str) -> None:
18
+ """Write message to both stdout and parent process communication"""
19
+ timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
20
+ line = f"[{timestamp}] {msg}"
21
+ print(line, file=sys.stderr)
22
+ sys.stderr.flush()
23
+
24
+
25
+ def _write_pid(pid: int) -> None:
26
+ """Write PID to pidfile."""
27
+ try:
28
+ PID_FILE.parent.mkdir(parents=True, exist_ok=True)
29
+ PID_FILE.write_text(str(pid))
30
+ log_message(f"PID written to {PID_FILE}: {pid}")
31
+ except Exception as e:
32
+ log_message(f"Failed to write PID file: {e}")
33
+
34
+
35
+ def _remove_pid() -> None:
36
+ """Remove pidfile."""
37
+ try:
38
+ if PID_FILE.exists():
39
+ PID_FILE.unlink()
40
+ log_message(f"PID file removed: {PID_FILE}")
41
+ except OSError:
42
+ pass
43
+
44
+
45
+ def _signal_handler(signum, frame):
46
+ """Handle termination signals."""
47
+ sig_name = signal.Signals(signum).name
48
+ log_message(f"Received signal {sig_name}, shutting down...")
49
+ _remove_pid()
50
+ sys.exit(0)
51
+
52
+
53
+ def run_server():
54
+ """Run the server in daemon mode."""
55
+ try:
56
+ # Write current PID to file (this is the correct PID for the daemon)
57
+ current_pid = os.getpid()
58
+ _write_pid(current_pid)
59
+
60
+ # Register cleanup on exit
61
+ atexit.register(_remove_pid)
62
+
63
+ # Setup signal handlers
64
+ signal.signal(signal.SIGTERM, _signal_handler)
65
+ signal.signal(signal.SIGINT, _signal_handler)
66
+
67
+ log_message(f"Starting daemon server (PID: {current_pid})...")
68
+
69
+ # Add the src directory to path if needed
70
+ current_file = Path(__file__).resolve()
71
+ package_dir = current_file.parent
72
+
73
+ if str(package_dir) not in sys.path:
74
+ sys.path.insert(0, str(package_dir))
75
+
76
+ # Import and run the main server
77
+ from local_openai2anthropic.main import create_app
78
+ from local_openai2anthropic.config import get_settings
79
+
80
+ import uvicorn
81
+
82
+ settings = get_settings()
83
+
84
+ log_message(f"Configuration loaded:")
85
+ log_message(f" Host: {settings.host}")
86
+ log_message(f" Port: {settings.port}")
87
+ log_message(f" Log Level: {settings.log_level}")
88
+ log_message(f" OpenAI Base URL: {settings.openai_base_url}")
89
+
90
+ # Validate required settings
91
+ if not settings.openai_api_key:
92
+ log_message("Error: OA2A_OPENAI_API_KEY is required but not set")
93
+ sys.exit(1)
94
+
95
+ app = create_app(settings)
96
+
97
+ log_message(f"Starting uvicorn on {settings.host}:{settings.port}")
98
+
99
+ uvicorn.run(
100
+ app,
101
+ host=settings.host,
102
+ port=settings.port,
103
+ log_level=settings.log_level.lower(),
104
+ timeout_keep_alive=300,
105
+ )
106
+
107
+ except Exception as e:
108
+ log_message(f"Fatal error in daemon: {e}")
109
+ import traceback
110
+ traceback.print_exc()
111
+ _remove_pid()
112
+ sys.exit(1)
113
+
114
+
115
+ if __name__ == "__main__":
116
+ run_server()
@@ -3,6 +3,7 @@
3
3
  Main entry point for the local-openai2anthropic proxy server.
4
4
  """
5
5
 
6
+ import argparse
6
7
  import logging
7
8
  import sys
8
9
 
@@ -20,25 +21,25 @@ def create_app(settings: Settings | None = None) -> FastAPI:
20
21
  """Create and configure the FastAPI application."""
21
22
  if settings is None:
22
23
  settings = get_settings()
23
-
24
+
24
25
  # Configure logging
25
26
  logging.basicConfig(
26
27
  level=getattr(logging, settings.log_level.upper()),
27
28
  format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
28
29
  )
29
-
30
+
30
31
  # Create FastAPI app
31
32
  app = FastAPI(
32
33
  title="local-openai2anthropic",
33
34
  description="A proxy server that converts Anthropic Messages API to OpenAI API",
34
- version="0.1.0",
35
+ version="0.2.0",
35
36
  docs_url="/docs",
36
37
  redoc_url="/redoc",
37
38
  )
38
-
39
+
39
40
  # Store settings in app state
40
41
  app.state.settings = settings
41
-
42
+
42
43
  # Add CORS middleware
43
44
  app.add_middleware(
44
45
  CORSMiddleware,
@@ -47,16 +48,17 @@ def create_app(settings: Settings | None = None) -> FastAPI:
47
48
  allow_methods=settings.cors_methods,
48
49
  allow_headers=settings.cors_headers,
49
50
  )
50
-
51
+
51
52
  # Add API key authentication middleware if configured
52
53
  if settings.api_key:
54
+
53
55
  @app.middleware("http")
54
56
  async def auth_middleware(request: Request, call_next):
55
57
  """Validate API key if configured."""
56
58
  # Skip auth for docs and health check
57
59
  if request.url.path in ["/docs", "/redoc", "/openapi.json", "/health"]:
58
60
  return await call_next(request)
59
-
61
+
60
62
  auth_header = request.headers.get("Authorization", "")
61
63
  if not auth_header.startswith("Bearer "):
62
64
  error_response = AnthropicErrorResponse(
@@ -69,7 +71,7 @@ def create_app(settings: Settings | None = None) -> FastAPI:
69
71
  status_code=401,
70
72
  content=error_response.model_dump(),
71
73
  )
72
-
74
+
73
75
  token = auth_header[7:] # Remove "Bearer " prefix
74
76
  if token != settings.api_key:
75
77
  error_response = AnthropicErrorResponse(
@@ -82,12 +84,12 @@ def create_app(settings: Settings | None = None) -> FastAPI:
82
84
  status_code=401,
83
85
  content=error_response.model_dump(),
84
86
  )
85
-
87
+
86
88
  return await call_next(request)
87
-
89
+
88
90
  # Include routers
89
91
  app.include_router(router)
90
-
92
+
91
93
  # Exception handlers
92
94
  @app.exception_handler(HTTPException)
93
95
  async def http_exception_handler(request: Request, exc: HTTPException):
@@ -98,7 +100,7 @@ def create_app(settings: Settings | None = None) -> FastAPI:
98
100
  status_code=exc.status_code,
99
101
  content=exc.detail,
100
102
  )
101
-
103
+
102
104
  error_response = AnthropicErrorResponse(
103
105
  error=AnthropicError(
104
106
  type="api_error",
@@ -109,7 +111,7 @@ def create_app(settings: Settings | None = None) -> FastAPI:
109
111
  status_code=exc.status_code,
110
112
  content=error_response.model_dump(),
111
113
  )
112
-
114
+
113
115
  @app.exception_handler(Exception)
114
116
  async def general_exception_handler(request: Request, exc: Exception):
115
117
  """Handle unexpected exceptions."""
@@ -124,33 +126,31 @@ def create_app(settings: Settings | None = None) -> FastAPI:
124
126
  status_code=500,
125
127
  content=error_response.model_dump(),
126
128
  )
127
-
129
+
128
130
  return app
129
131
 
130
132
 
131
- def main() -> None:
132
- """Main entry point."""
133
- # Load settings
134
- settings = get_settings()
135
-
133
+ def run_foreground(settings: Settings) -> None:
134
+ """Run server in foreground mode (blocking)."""
136
135
  # Validate required settings
137
136
  if not settings.openai_api_key:
138
137
  print(
139
- "Error: OPENAI_API_KEY environment variable is required.\n"
138
+ "Error: OA2A_OPENAI_API_KEY environment variable is required.\n"
140
139
  "Set it via:\n"
141
140
  " - Environment variable: export OA2A_OPENAI_API_KEY='your-key'\n"
142
- " - Or create a .env file with OPENAI_API_KEY=your-key",
141
+ " - Or create a .env file with OA2A_OPENAI_API_KEY=your-key",
143
142
  file=sys.stderr,
144
143
  )
145
144
  sys.exit(1)
146
-
145
+
147
146
  # Create app
148
147
  app = create_app(settings)
149
-
148
+
150
149
  # Run server
151
- print(f"Starting local-openai2anthropic server on {settings.host}:{settings.port}")
150
+ print(f"Starting server on {settings.host}:{settings.port}")
152
151
  print(f"Proxying to: {settings.openai_base_url}")
153
-
152
+ print("Press Ctrl+C to stop\n")
153
+
154
154
  uvicorn.run(
155
155
  app,
156
156
  host=settings.host,
@@ -160,5 +160,157 @@ def main() -> None:
160
160
  )
161
161
 
162
162
 
163
+ def main() -> None:
164
+ """Main entry point with subcommand support."""
165
+ # Create main parser
166
+ parser = argparse.ArgumentParser(
167
+ prog="oa2a",
168
+ description="A proxy server that converts Anthropic Messages API to OpenAI API",
169
+ formatter_class=argparse.RawDescriptionHelpFormatter,
170
+ epilog="""
171
+ Examples:
172
+ oa2a start # Start server in background
173
+ oa2a stop # Stop background server
174
+ oa2a restart # Restart background server
175
+ oa2a status # Check server status
176
+ oa2a logs # View server logs
177
+ oa2a logs -f # Follow server logs (tail -f)
178
+ oa2a # Run server in foreground (default behavior)
179
+ """.strip(),
180
+ )
181
+
182
+ parser.add_argument(
183
+ "--version",
184
+ action="version",
185
+ version="%(prog)s 0.2.0",
186
+ )
187
+
188
+ # Create subparsers for commands
189
+ subparsers = parser.add_subparsers(dest="command", help="Available commands")
190
+
191
+ # start command
192
+ start_parser = subparsers.add_parser("start", help="Start server in background")
193
+ start_parser.add_argument(
194
+ "--host",
195
+ default=None,
196
+ help="Server host (default: 0.0.0.0)",
197
+ )
198
+ start_parser.add_argument(
199
+ "--port",
200
+ type=int,
201
+ default=None,
202
+ help="Server port (default: 8080)",
203
+ )
204
+ start_parser.add_argument(
205
+ "--log-level",
206
+ default="info",
207
+ choices=["debug", "info", "warning", "error"],
208
+ help="Logging level (default: info)",
209
+ )
210
+
211
+ # stop command
212
+ stop_parser = subparsers.add_parser("stop", help="Stop background server")
213
+ stop_parser.add_argument(
214
+ "-f", "--force",
215
+ action="store_true",
216
+ help="Force kill the server",
217
+ )
218
+
219
+ # restart command
220
+ restart_parser = subparsers.add_parser("restart", help="Restart background server")
221
+ restart_parser.add_argument(
222
+ "--host",
223
+ default=None,
224
+ help="Server host (default: 0.0.0.0)",
225
+ )
226
+ restart_parser.add_argument(
227
+ "--port",
228
+ type=int,
229
+ default=None,
230
+ help="Server port (default: 8080)",
231
+ )
232
+ restart_parser.add_argument(
233
+ "--log-level",
234
+ default="info",
235
+ choices=["debug", "info", "warning", "error"],
236
+ help="Logging level (default: info)",
237
+ )
238
+
239
+ # status command
240
+ status_parser = subparsers.add_parser("status", help="Check server status")
241
+
242
+ # logs command
243
+ logs_parser = subparsers.add_parser("logs", help="View server logs")
244
+ logs_parser.add_argument(
245
+ "-f", "--follow",
246
+ action="store_true",
247
+ help="Follow log output (like tail -f)",
248
+ )
249
+ logs_parser.add_argument(
250
+ "-n", "--lines",
251
+ type=int,
252
+ default=50,
253
+ help="Number of lines to show (default: 50)",
254
+ )
255
+
256
+ args = parser.parse_args()
257
+
258
+ # Import daemon module only when needed
259
+ from local_openai2anthropic import daemon
260
+
261
+ # Handle subcommands
262
+ if args.command == "start":
263
+ # Get settings for defaults
264
+ settings = get_settings()
265
+ host = args.host or settings.host
266
+ port = args.port or settings.port
267
+
268
+ success = daemon.start_daemon(
269
+ host=host,
270
+ port=port,
271
+ log_level=args.log_level,
272
+ )
273
+ sys.exit(0 if success else 1)
274
+
275
+ elif args.command == "stop":
276
+ success = daemon.stop_daemon(force=args.force)
277
+ sys.exit(0 if success else 1)
278
+
279
+ elif args.command == "restart":
280
+ # Get settings for defaults
281
+ settings = get_settings()
282
+ host = args.host or settings.host
283
+ port = args.port or settings.port
284
+
285
+ success = daemon.restart_daemon(
286
+ host=host,
287
+ port=port,
288
+ log_level=args.log_level,
289
+ )
290
+ sys.exit(0 if success else 1)
291
+
292
+ elif args.command == "status":
293
+ running, pid, config = daemon.get_status()
294
+ if running and config:
295
+ host = config.get("host", "0.0.0.0")
296
+ port = config.get("port", 8080)
297
+ print(f"Server is running (PID: {pid})")
298
+ print(f"Listening on: {host}:{port}")
299
+ elif running:
300
+ print(f"Server is running (PID: {pid})")
301
+ else:
302
+ print("Server is not running")
303
+ sys.exit(0)
304
+
305
+ elif args.command == "logs":
306
+ success = daemon.show_logs(follow=args.follow, lines=args.lines)
307
+ sys.exit(0 if success else 1)
308
+
309
+ else:
310
+ # No command - run in foreground (original behavior)
311
+ settings = get_settings()
312
+ run_foreground(settings)
313
+
314
+
163
315
  if __name__ == "__main__":
164
316
  main()
@@ -0,0 +1,149 @@
1
+ # SPDX-License-Identifier: Apache-2.0
2
+ """
3
+ OpenAI API type definitions for compatibility with vLLM/SGLang responses.
4
+
5
+ This module defines Pydantic models compatible with OpenAI API responses,
6
+ these models support additional fields like `reasoning_content` that are
7
+ returned by vLLM/SGLang but not present in the official OpenAI SDK.
8
+ """
9
+
10
+ from typing import Any, Literal, Optional, TypedDict
11
+
12
+ from pydantic import BaseModel
13
+
14
+
15
+ # TypedDict types for parameters (used as dict in code)
16
+ class ChatCompletionToolFunction(TypedDict):
17
+ """Function definition for a tool."""
18
+
19
+ name: str
20
+ description: str
21
+ parameters: dict[str, Any]
22
+
23
+
24
+ class ChatCompletionToolParam(TypedDict):
25
+ """Tool parameter for chat completion."""
26
+
27
+ type: Literal["function"]
28
+ function: ChatCompletionToolFunction
29
+
30
+
31
+ class CompletionCreateParams(TypedDict, total=False):
32
+ """Parameters for creating a chat completion."""
33
+
34
+ model: str
35
+ messages: list[dict[str, Any]]
36
+ max_tokens: int
37
+ temperature: float
38
+ top_p: float
39
+ top_k: int
40
+ stream: bool
41
+ stop: list[str]
42
+ tools: list[ChatCompletionToolParam]
43
+ tool_choice: str | dict[str, Any]
44
+ stream_options: dict[str, Any]
45
+ # Additional fields for vLLM/SGLang compatibility
46
+ chat_template_kwargs: dict[str, Any]
47
+ # Internal field for server tools config
48
+ _server_tools_config: dict[str, dict[str, Any]]
49
+
50
+
51
+ # Pydantic models for API responses
52
+ class Function(BaseModel):
53
+ """A function call."""
54
+
55
+ name: str
56
+ arguments: str
57
+
58
+
59
+ class ChatCompletionMessageToolCall(BaseModel):
60
+ """A tool call in a chat completion message."""
61
+
62
+ id: str
63
+ type: str = "function"
64
+ function: Function
65
+
66
+
67
+ class ChatCompletionMessage(BaseModel):
68
+ """A chat completion message."""
69
+
70
+ role: str
71
+ content: Optional[str] = None
72
+ tool_calls: Optional[list[ChatCompletionMessageToolCall]] = None
73
+ # Additional field for reasoning content (thinking) from vLLM/SGLang
74
+ reasoning_content: Optional[str] = None
75
+
76
+
77
+ class Choice(BaseModel):
78
+ """A choice in a chat completion response."""
79
+
80
+ index: int = 0
81
+ message: ChatCompletionMessage
82
+ finish_reason: Optional[str] = None
83
+
84
+
85
+ class FunctionDelta(BaseModel):
86
+ """A function call delta."""
87
+
88
+ name: Optional[str] = None
89
+ arguments: Optional[str] = None
90
+
91
+
92
+ class ChatCompletionDeltaToolCall(BaseModel):
93
+ """A tool call delta in a streaming response."""
94
+
95
+ index: int = 0
96
+ id: Optional[str] = None
97
+ type: Optional[str] = None
98
+ function: Optional[FunctionDelta] = None
99
+
100
+
101
+ class ChoiceDelta(BaseModel):
102
+ """A delta in a streaming chat completion response."""
103
+
104
+ role: Optional[str] = None
105
+ content: Optional[str] = None
106
+ tool_calls: Optional[list[ChatCompletionDeltaToolCall]] = None
107
+ # Additional field for reasoning content (thinking) from vLLM/SGLang
108
+ reasoning_content: Optional[str] = None
109
+
110
+
111
+ class StreamingChoice(BaseModel):
112
+ """A choice in a streaming chat completion response."""
113
+
114
+ index: int = 0
115
+ delta: ChoiceDelta
116
+ finish_reason: Optional[str] = None
117
+
118
+
119
+ class CompletionUsage(BaseModel):
120
+ """Usage statistics for a completion request."""
121
+
122
+ prompt_tokens: int
123
+ completion_tokens: int
124
+ total_tokens: int
125
+ # Optional cache-related fields
126
+ cache_creation_input_tokens: Optional[int] = None
127
+ cache_read_input_tokens: Optional[int] = None
128
+
129
+
130
+ class ChatCompletion(BaseModel):
131
+ """A chat completion response."""
132
+
133
+ id: str
134
+ object: str = "chat.completion"
135
+ created: int
136
+ model: str
137
+ choices: list[Choice]
138
+ usage: Optional[CompletionUsage] = None
139
+
140
+
141
+ class ChatCompletionChunk(BaseModel):
142
+ """A chunk in a streaming chat completion response."""
143
+
144
+ id: str
145
+ object: str = "chat.completion.chunk"
146
+ created: int
147
+ model: str
148
+ choices: list[StreamingChoice]
149
+ usage: Optional[CompletionUsage] = None