local-openai2anthropic 0.1.0__py3-none-any.whl → 0.3.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -3,42 +3,112 @@
3
3
  Main entry point for the local-openai2anthropic proxy server.
4
4
  """
5
5
 
6
+ import argparse
6
7
  import logging
8
+ import os
7
9
  import sys
10
+ from logging.handlers import TimedRotatingFileHandler
11
+ from pathlib import Path
8
12
 
9
13
  import uvicorn
10
14
  from fastapi import FastAPI, HTTPException, Request
11
15
  from fastapi.middleware.cors import CORSMiddleware
12
16
  from fastapi.responses import JSONResponse
13
17
 
14
- from local_openai2anthropic.config import Settings, get_settings
18
+ from local_openai2anthropic.config import Settings, get_config_file, get_settings
15
19
  from local_openai2anthropic.protocol import AnthropicError, AnthropicErrorResponse
16
20
  from local_openai2anthropic.router import router
17
21
 
18
22
 
23
+ def get_default_log_dir() -> str:
24
+ """Get default log directory based on platform.
25
+
26
+ Returns:
27
+ Path to log directory
28
+ """
29
+ if sys.platform == 'win32':
30
+ # Windows: use %LOCALAPPDATA%\local-openai2anthropic\logs
31
+ base_dir = os.environ.get('LOCALAPPDATA', os.path.expanduser('~\\AppData\\Local'))
32
+ return os.path.join(base_dir, 'local-openai2anthropic', 'logs')
33
+ else:
34
+ # macOS/Linux: use ~/.local/share/local-openai2anthropic/logs
35
+ return os.path.expanduser("~/.local/share/local-openai2anthropic/logs")
36
+
37
+
38
+ def setup_logging(log_level: str, log_dir: str | None = None) -> None:
39
+ """Setup logging with daily rotation, keeping only today's logs.
40
+
41
+ Args:
42
+ log_level: Logging level (DEBUG, INFO, WARNING, ERROR)
43
+ log_dir: Directory for log files (platform-specific default)
44
+ """
45
+ # Default log directory based on platform
46
+ if log_dir is None:
47
+ log_dir = get_default_log_dir()
48
+
49
+ # Expand user directory if specified
50
+ log_dir = os.path.expanduser(log_dir)
51
+
52
+ # Create log directory if it doesn't exist
53
+ Path(log_dir).mkdir(parents=True, exist_ok=True)
54
+
55
+ log_file = os.path.join(log_dir, "server.log")
56
+
57
+ # Create formatter
58
+ formatter = logging.Formatter(
59
+ "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
60
+ )
61
+
62
+ # Setup root logger
63
+ root_logger = logging.getLogger()
64
+ root_logger.setLevel(getattr(logging, log_level.upper()))
65
+
66
+ # Clear existing handlers
67
+ root_logger.handlers = []
68
+
69
+ # Console handler
70
+ console_handler = logging.StreamHandler(sys.stdout)
71
+ console_handler.setFormatter(formatter)
72
+ root_logger.addHandler(console_handler)
73
+
74
+ # File handler with daily rotation
75
+ # backupCount=0 means no backup files are kept (only today's log)
76
+ # when='midnight' rotates at midnight
77
+ file_handler = TimedRotatingFileHandler(
78
+ log_file,
79
+ when='midnight',
80
+ interval=1,
81
+ backupCount=0, # Keep only today's log
82
+ encoding='utf-8'
83
+ )
84
+ file_handler.setFormatter(formatter)
85
+ root_logger.addHandler(file_handler)
86
+
87
+ logging.info(f"Logging configured. Log file: {log_file}")
88
+
89
+
19
90
  def create_app(settings: Settings | None = None) -> FastAPI:
20
91
  """Create and configure the FastAPI application."""
21
92
  if settings is None:
22
93
  settings = get_settings()
23
-
24
- # Configure logging
25
- logging.basicConfig(
26
- level=getattr(logging, settings.log_level.upper()),
27
- format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
28
- )
29
-
94
+
95
+ # Configure logging with daily rotation
96
+ # Use platform-specific default if log_dir is not set
97
+ log_dir = settings.log_dir if settings.log_dir else None
98
+ setup_logging(settings.log_level, log_dir)
99
+
30
100
  # Create FastAPI app
31
101
  app = FastAPI(
32
102
  title="local-openai2anthropic",
33
103
  description="A proxy server that converts Anthropic Messages API to OpenAI API",
34
- version="0.1.0",
104
+ version="0.3.6",
35
105
  docs_url="/docs",
36
106
  redoc_url="/redoc",
37
107
  )
38
-
108
+
39
109
  # Store settings in app state
40
110
  app.state.settings = settings
41
-
111
+
42
112
  # Add CORS middleware
43
113
  app.add_middleware(
44
114
  CORSMiddleware,
@@ -47,16 +117,17 @@ def create_app(settings: Settings | None = None) -> FastAPI:
47
117
  allow_methods=settings.cors_methods,
48
118
  allow_headers=settings.cors_headers,
49
119
  )
50
-
120
+
51
121
  # Add API key authentication middleware if configured
52
122
  if settings.api_key:
123
+
53
124
  @app.middleware("http")
54
125
  async def auth_middleware(request: Request, call_next):
55
126
  """Validate API key if configured."""
56
127
  # Skip auth for docs and health check
57
128
  if request.url.path in ["/docs", "/redoc", "/openapi.json", "/health"]:
58
129
  return await call_next(request)
59
-
130
+
60
131
  auth_header = request.headers.get("Authorization", "")
61
132
  if not auth_header.startswith("Bearer "):
62
133
  error_response = AnthropicErrorResponse(
@@ -69,7 +140,7 @@ def create_app(settings: Settings | None = None) -> FastAPI:
69
140
  status_code=401,
70
141
  content=error_response.model_dump(),
71
142
  )
72
-
143
+
73
144
  token = auth_header[7:] # Remove "Bearer " prefix
74
145
  if token != settings.api_key:
75
146
  error_response = AnthropicErrorResponse(
@@ -82,12 +153,12 @@ def create_app(settings: Settings | None = None) -> FastAPI:
82
153
  status_code=401,
83
154
  content=error_response.model_dump(),
84
155
  )
85
-
156
+
86
157
  return await call_next(request)
87
-
158
+
88
159
  # Include routers
89
160
  app.include_router(router)
90
-
161
+
91
162
  # Exception handlers
92
163
  @app.exception_handler(HTTPException)
93
164
  async def http_exception_handler(request: Request, exc: HTTPException):
@@ -98,7 +169,7 @@ def create_app(settings: Settings | None = None) -> FastAPI:
98
169
  status_code=exc.status_code,
99
170
  content=exc.detail,
100
171
  )
101
-
172
+
102
173
  error_response = AnthropicErrorResponse(
103
174
  error=AnthropicError(
104
175
  type="api_error",
@@ -109,7 +180,7 @@ def create_app(settings: Settings | None = None) -> FastAPI:
109
180
  status_code=exc.status_code,
110
181
  content=error_response.model_dump(),
111
182
  )
112
-
183
+
113
184
  @app.exception_handler(Exception)
114
185
  async def general_exception_handler(request: Request, exc: Exception):
115
186
  """Handle unexpected exceptions."""
@@ -124,33 +195,33 @@ def create_app(settings: Settings | None = None) -> FastAPI:
124
195
  status_code=500,
125
196
  content=error_response.model_dump(),
126
197
  )
127
-
198
+
128
199
  return app
129
200
 
130
201
 
131
- def main() -> None:
132
- """Main entry point."""
133
- # Load settings
134
- settings = get_settings()
135
-
202
+ def run_foreground(settings: Settings) -> None:
203
+ """Run server in foreground mode (blocking)."""
136
204
  # Validate required settings
137
205
  if not settings.openai_api_key:
206
+ config_file = get_config_file()
138
207
  print(
139
- "Error: OPENAI_API_KEY environment variable is required.\n"
140
- "Set it via:\n"
141
- " - Environment variable: export OA2A_OPENAI_API_KEY='your-key'\n"
142
- " - Or create a .env file with OPENAI_API_KEY=your-key",
208
+ f"Error: openai_api_key is required.\n"
209
+ f"Please edit the configuration file:\n"
210
+ f" {config_file}\n"
211
+ f"\nSet your OpenAI API key:\n"
212
+ f' openai_api_key = "your-api-key"',
143
213
  file=sys.stderr,
144
214
  )
145
215
  sys.exit(1)
146
-
216
+
147
217
  # Create app
148
218
  app = create_app(settings)
149
-
219
+
150
220
  # Run server
151
- print(f"Starting local-openai2anthropic server on {settings.host}:{settings.port}")
221
+ print(f"Starting server on {settings.host}:{settings.port}")
152
222
  print(f"Proxying to: {settings.openai_base_url}")
153
-
223
+ print("Press Ctrl+C to stop\n")
224
+
154
225
  uvicorn.run(
155
226
  app,
156
227
  host=settings.host,
@@ -160,5 +231,157 @@ def main() -> None:
160
231
  )
161
232
 
162
233
 
234
+ def main() -> None:
235
+ """Main entry point with subcommand support."""
236
+ # Create main parser
237
+ parser = argparse.ArgumentParser(
238
+ prog="oa2a",
239
+ description="A proxy server that converts Anthropic Messages API to OpenAI API",
240
+ formatter_class=argparse.RawDescriptionHelpFormatter,
241
+ epilog="""
242
+ Examples:
243
+ oa2a start # Start server in background
244
+ oa2a stop # Stop background server
245
+ oa2a restart # Restart background server
246
+ oa2a status # Check server status
247
+ oa2a logs # View server logs
248
+ oa2a logs -f # Follow server logs (tail -f)
249
+ oa2a # Run server in foreground (default behavior)
250
+ """.strip(),
251
+ )
252
+
253
+ parser.add_argument(
254
+ "--version",
255
+ action="version",
256
+ version="%(prog)s 0.3.6",
257
+ )
258
+
259
+ # Create subparsers for commands
260
+ subparsers = parser.add_subparsers(dest="command", help="Available commands")
261
+
262
+ # start command
263
+ start_parser = subparsers.add_parser("start", help="Start server in background")
264
+ start_parser.add_argument(
265
+ "--host",
266
+ default=None,
267
+ help="Server host (default: 0.0.0.0)",
268
+ )
269
+ start_parser.add_argument(
270
+ "--port",
271
+ type=int,
272
+ default=None,
273
+ help="Server port (default: 8080)",
274
+ )
275
+ start_parser.add_argument(
276
+ "--log-level",
277
+ default="info",
278
+ choices=["debug", "info", "warning", "error"],
279
+ help="Logging level (default: info)",
280
+ )
281
+
282
+ # stop command
283
+ stop_parser = subparsers.add_parser("stop", help="Stop background server")
284
+ stop_parser.add_argument(
285
+ "-f", "--force",
286
+ action="store_true",
287
+ help="Force kill the server",
288
+ )
289
+
290
+ # restart command
291
+ restart_parser = subparsers.add_parser("restart", help="Restart background server")
292
+ restart_parser.add_argument(
293
+ "--host",
294
+ default=None,
295
+ help="Server host (default: 0.0.0.0)",
296
+ )
297
+ restart_parser.add_argument(
298
+ "--port",
299
+ type=int,
300
+ default=None,
301
+ help="Server port (default: 8080)",
302
+ )
303
+ restart_parser.add_argument(
304
+ "--log-level",
305
+ default="info",
306
+ choices=["debug", "info", "warning", "error"],
307
+ help="Logging level (default: info)",
308
+ )
309
+
310
+ # status command
311
+ status_parser = subparsers.add_parser("status", help="Check server status")
312
+
313
+ # logs command
314
+ logs_parser = subparsers.add_parser("logs", help="View server logs")
315
+ logs_parser.add_argument(
316
+ "-f", "--follow",
317
+ action="store_true",
318
+ help="Follow log output (like tail -f)",
319
+ )
320
+ logs_parser.add_argument(
321
+ "-n", "--lines",
322
+ type=int,
323
+ default=50,
324
+ help="Number of lines to show (default: 50)",
325
+ )
326
+
327
+ args = parser.parse_args()
328
+
329
+ # Import daemon module only when needed
330
+ from local_openai2anthropic import daemon
331
+
332
+ # Handle subcommands
333
+ if args.command == "start":
334
+ # Get settings for defaults
335
+ settings = get_settings()
336
+ host = args.host or settings.host
337
+ port = args.port or settings.port
338
+
339
+ success = daemon.start_daemon(
340
+ host=host,
341
+ port=port,
342
+ log_level=args.log_level,
343
+ )
344
+ sys.exit(0 if success else 1)
345
+
346
+ elif args.command == "stop":
347
+ success = daemon.stop_daemon(force=args.force)
348
+ sys.exit(0 if success else 1)
349
+
350
+ elif args.command == "restart":
351
+ # Get settings for defaults
352
+ settings = get_settings()
353
+ host = args.host or settings.host
354
+ port = args.port or settings.port
355
+
356
+ success = daemon.restart_daemon(
357
+ host=host,
358
+ port=port,
359
+ log_level=args.log_level,
360
+ )
361
+ sys.exit(0 if success else 1)
362
+
363
+ elif args.command == "status":
364
+ running, pid, config = daemon.get_status()
365
+ if running and config:
366
+ host = config.get("host", "0.0.0.0")
367
+ port = config.get("port", 8080)
368
+ print(f"Server is running (PID: {pid})")
369
+ print(f"Listening on: {host}:{port}")
370
+ elif running:
371
+ print(f"Server is running (PID: {pid})")
372
+ else:
373
+ print("Server is not running")
374
+ sys.exit(0)
375
+
376
+ elif args.command == "logs":
377
+ success = daemon.show_logs(follow=args.follow, lines=args.lines)
378
+ sys.exit(0 if success else 1)
379
+
380
+ else:
381
+ # No command - run in foreground (original behavior)
382
+ settings = get_settings()
383
+ run_foreground(settings)
384
+
385
+
163
386
  if __name__ == "__main__":
164
387
  main()
@@ -0,0 +1,149 @@
1
+ # SPDX-License-Identifier: Apache-2.0
2
+ """
3
+ OpenAI API type definitions for compatibility with vLLM/SGLang responses.
4
+
5
+ This module defines Pydantic models compatible with OpenAI API responses,
6
+ these models support additional fields like `reasoning_content` that are
7
+ returned by vLLM/SGLang but not present in the official OpenAI SDK.
8
+ """
9
+
10
+ from typing import Any, Literal, Optional, TypedDict
11
+
12
+ from pydantic import BaseModel
13
+
14
+
15
+ # TypedDict types for parameters (used as dict in code)
16
+ class ChatCompletionToolFunction(TypedDict):
17
+ """Function definition for a tool."""
18
+
19
+ name: str
20
+ description: str
21
+ parameters: dict[str, Any]
22
+
23
+
24
+ class ChatCompletionToolParam(TypedDict):
25
+ """Tool parameter for chat completion."""
26
+
27
+ type: Literal["function"]
28
+ function: ChatCompletionToolFunction
29
+
30
+
31
+ class CompletionCreateParams(TypedDict, total=False):
32
+ """Parameters for creating a chat completion."""
33
+
34
+ model: str
35
+ messages: list[dict[str, Any]]
36
+ max_tokens: int
37
+ temperature: float
38
+ top_p: float
39
+ top_k: int
40
+ stream: bool
41
+ stop: list[str]
42
+ tools: list[ChatCompletionToolParam]
43
+ tool_choice: str | dict[str, Any]
44
+ stream_options: dict[str, Any]
45
+ # Additional fields for vLLM/SGLang compatibility
46
+ chat_template_kwargs: dict[str, Any]
47
+ # Internal field for server tools config
48
+ _server_tools_config: dict[str, dict[str, Any]]
49
+
50
+
51
+ # Pydantic models for API responses
52
+ class Function(BaseModel):
53
+ """A function call."""
54
+
55
+ name: str
56
+ arguments: str
57
+
58
+
59
+ class ChatCompletionMessageToolCall(BaseModel):
60
+ """A tool call in a chat completion message."""
61
+
62
+ id: str
63
+ type: str = "function"
64
+ function: Function
65
+
66
+
67
+ class ChatCompletionMessage(BaseModel):
68
+ """A chat completion message."""
69
+
70
+ role: str
71
+ content: Optional[str] = None
72
+ tool_calls: Optional[list[ChatCompletionMessageToolCall]] = None
73
+ # Additional field for reasoning content (thinking) from vLLM/SGLang
74
+ reasoning_content: Optional[str] = None
75
+
76
+
77
+ class Choice(BaseModel):
78
+ """A choice in a chat completion response."""
79
+
80
+ index: int = 0
81
+ message: ChatCompletionMessage
82
+ finish_reason: Optional[str] = None
83
+
84
+
85
+ class FunctionDelta(BaseModel):
86
+ """A function call delta."""
87
+
88
+ name: Optional[str] = None
89
+ arguments: Optional[str] = None
90
+
91
+
92
+ class ChatCompletionDeltaToolCall(BaseModel):
93
+ """A tool call delta in a streaming response."""
94
+
95
+ index: int = 0
96
+ id: Optional[str] = None
97
+ type: Optional[str] = None
98
+ function: Optional[FunctionDelta] = None
99
+
100
+
101
+ class ChoiceDelta(BaseModel):
102
+ """A delta in a streaming chat completion response."""
103
+
104
+ role: Optional[str] = None
105
+ content: Optional[str] = None
106
+ tool_calls: Optional[list[ChatCompletionDeltaToolCall]] = None
107
+ # Additional field for reasoning content (thinking) from vLLM/SGLang
108
+ reasoning_content: Optional[str] = None
109
+
110
+
111
+ class StreamingChoice(BaseModel):
112
+ """A choice in a streaming chat completion response."""
113
+
114
+ index: int = 0
115
+ delta: ChoiceDelta
116
+ finish_reason: Optional[str] = None
117
+
118
+
119
+ class CompletionUsage(BaseModel):
120
+ """Usage statistics for a completion request."""
121
+
122
+ prompt_tokens: int
123
+ completion_tokens: int
124
+ total_tokens: int
125
+ # Optional cache-related fields
126
+ cache_creation_input_tokens: Optional[int] = None
127
+ cache_read_input_tokens: Optional[int] = None
128
+
129
+
130
+ class ChatCompletion(BaseModel):
131
+ """A chat completion response."""
132
+
133
+ id: str
134
+ object: str = "chat.completion"
135
+ created: int
136
+ model: str
137
+ choices: list[Choice]
138
+ usage: Optional[CompletionUsage] = None
139
+
140
+
141
+ class ChatCompletionChunk(BaseModel):
142
+ """A chunk in a streaming chat completion response."""
143
+
144
+ id: str
145
+ object: str = "chat.completion.chunk"
146
+ created: int
147
+ model: str
148
+ choices: list[StreamingChoice]
149
+ usage: Optional[CompletionUsage] = None
@@ -6,7 +6,7 @@ Uses Anthropic SDK types for request/response models.
6
6
 
7
7
  from typing import Any, Literal, Optional
8
8
 
9
- from pydantic import BaseModel, Field
9
+ from pydantic import BaseModel
10
10
 
11
11
  # Re-export all Anthropic types for convenience
12
12
  from anthropic.types import (