local-openai2anthropic 0.2.3__py3-none-any.whl → 0.3.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -3,7 +3,7 @@
3
3
  local-openai2anthropic: A proxy server that converts Anthropic Messages API to OpenAI API.
4
4
  """
5
5
 
6
- __version__ = "0.2.3"
6
+ __version__ = "0.3.6"
7
7
 
8
8
  from local_openai2anthropic.protocol import (
9
9
  AnthropicError,
@@ -3,44 +3,137 @@
3
3
  Configuration settings for the proxy server.
4
4
  """
5
5
 
6
+ import sys
6
7
  from functools import lru_cache
8
+ from pathlib import Path
7
9
  from typing import Optional
8
10
 
9
- from pydantic_settings import BaseSettings, SettingsConfigDict
11
+ from pydantic import BaseModel, ConfigDict
10
12
 
11
13
 
12
- class Settings(BaseSettings):
13
- """Application settings loaded from environment variables."""
14
-
15
- model_config = SettingsConfigDict(
16
- env_prefix="OA2A_", # OpenAI-to-Anthropic prefix
17
- env_file=".env",
18
- env_file_encoding="utf-8",
19
- case_sensitive=False,
20
- )
21
-
14
+ def get_config_dir() -> Path:
15
+ """Get platform-specific config directory.
16
+
17
+ Returns:
18
+ Path to the config directory (~/.oa2a)
19
+ """
20
+ return Path.home() / ".oa2a"
21
+
22
+
23
+ def get_config_file() -> Path:
24
+ """Get config file path.
25
+
26
+ Returns:
27
+ Path to the config file (~/.oa2a/config.toml)
28
+ """
29
+ return get_config_dir() / "config.toml"
30
+
31
+
32
+ def create_default_config() -> bool:
33
+ """Create default config file if not exists.
34
+
35
+ Returns:
36
+ True if a new config file was created, False if it already exists
37
+ """
38
+ config_file = get_config_file()
39
+ if config_file.exists():
40
+ return False
41
+
42
+ config_dir = get_config_dir()
43
+ config_dir.mkdir(parents=True, exist_ok=True)
44
+
45
+ # Set restrictive permissions (0o600) for the config directory on Unix-like systems
46
+ if sys.platform != "win32":
47
+ config_dir.chmod(0o700)
48
+
49
+ default_config = """# OA2A Configuration File
50
+ # Place this file at ~/.oa2a/config.toml
51
+
52
+ # OpenAI API Configuration
53
+ openai_api_key = ""
54
+ openai_base_url = "https://api.openai.com/v1"
55
+ openai_org_id = ""
56
+ openai_project_id = ""
57
+
58
+ # Server Configuration
59
+ host = "0.0.0.0"
60
+ port = 8080
61
+ request_timeout = 300.0
62
+
63
+ # API Key for authenticating requests to this server (optional)
64
+ api_key = ""
65
+
66
+ # CORS settings
67
+ cors_origins = ["*"]
68
+ cors_credentials = true
69
+ cors_methods = ["*"]
70
+ cors_headers = ["*"]
71
+
72
+ # Logging
73
+ log_level = "INFO"
74
+ log_dir = "" # Empty uses platform-specific default
75
+
76
+ # Tavily Web Search Configuration
77
+ tavily_api_key = ""
78
+ tavily_timeout = 30.0
79
+ tavily_max_results = 5
80
+ websearch_max_uses = 5
81
+ """
82
+ config_file.write_text(default_config, encoding="utf-8")
83
+
84
+ # Set restrictive permissions (0o600) for the config file on Unix-like systems
85
+ if sys.platform != "win32":
86
+ config_file.chmod(0o600)
87
+
88
+ return True
89
+
90
+
91
+ def load_config_from_file() -> dict:
92
+ """Load configuration from TOML file.
93
+
94
+ Returns:
95
+ Dictionary containing configuration values, empty dict if file doesn't exist
96
+ """
97
+ if sys.version_info >= (3, 11):
98
+ import tomllib
99
+ else:
100
+ import tomli as tomllib
101
+
102
+ config_file = get_config_file()
103
+ if not config_file.exists():
104
+ return {}
105
+ with open(config_file, "rb") as f:
106
+ return tomllib.load(f)
107
+
108
+
109
+ class Settings(BaseModel):
110
+ """Application settings loaded from config file."""
111
+
112
+ model_config = ConfigDict(extra="ignore")
113
+
22
114
  # OpenAI API Configuration
23
115
  openai_api_key: Optional[str] = None
24
116
  openai_base_url: str = "https://api.openai.com/v1"
25
117
  openai_org_id: Optional[str] = None
26
118
  openai_project_id: Optional[str] = None
27
-
119
+
28
120
  # Server Configuration
29
121
  host: str = "0.0.0.0"
30
122
  port: int = 8080
31
123
  request_timeout: float = 300.0 # 5 minutes
32
-
124
+
33
125
  # API Key for authenticating requests to this server (optional)
34
126
  api_key: Optional[str] = None
35
-
127
+
36
128
  # CORS settings
37
129
  cors_origins: list[str] = ["*"]
38
130
  cors_credentials: bool = True
39
131
  cors_methods: list[str] = ["*"]
40
132
  cors_headers: list[str] = ["*"]
41
-
133
+
42
134
  # Logging
43
- log_level: str = "DEBUG"
135
+ log_level: str = "INFO"
136
+ log_dir: str = "" # Empty means use platform-specific default
44
137
 
45
138
  # Tavily Web Search Configuration
46
139
  tavily_api_key: Optional[str] = None
@@ -60,8 +153,29 @@ class Settings(BaseSettings):
60
153
  headers["OpenAI-Project"] = self.openai_project_id
61
154
  return headers
62
155
 
156
+ @classmethod
157
+ def from_toml(cls) -> "Settings":
158
+ """Load settings from TOML config file.
159
+
160
+ Returns:
161
+ Settings instance populated from config file
162
+ """
163
+ config_data = load_config_from_file()
164
+ return cls(**config_data)
165
+
63
166
 
64
167
  @lru_cache
65
168
  def get_settings() -> Settings:
66
- """Get cached settings instance."""
67
- return Settings()
169
+ """Get cached settings instance.
170
+
171
+ Creates default config file if it doesn't exist and notifies the user.
172
+
173
+ Returns:
174
+ Settings instance loaded from config file
175
+ """
176
+ created = create_default_config()
177
+ if created:
178
+ config_file = get_config_file()
179
+ print(f"Created default config file: {config_file}")
180
+ print("Please edit it to add your API keys and settings.")
181
+ return Settings.from_toml()
@@ -17,13 +17,11 @@ from anthropic.types import (
17
17
  from anthropic.types.message_create_params import MessageCreateParams
18
18
  from openai.types.chat import (
19
19
  ChatCompletion,
20
- ChatCompletionChunk,
21
20
  ChatCompletionToolParam,
22
21
  )
23
22
  from openai.types.chat.completion_create_params import CompletionCreateParams
24
23
 
25
24
  from local_openai2anthropic.protocol import UsageWithCache
26
- from local_openai2anthropic.server_tools import ServerToolRegistry
27
25
 
28
26
  logger = logging.getLogger(__name__)
29
27
 
@@ -49,11 +47,12 @@ def convert_anthropic_to_openai(
49
47
  system = anthropic_params.get("system")
50
48
  stop_sequences = anthropic_params.get("stop_sequences")
51
49
  stream = anthropic_params.get("stream", False)
52
- temperature = anthropic_params.get("temperature")
50
+ temperature = anthropic_params.get("temperature", 0.6)
53
51
  tool_choice = anthropic_params.get("tool_choice")
54
52
  tools = anthropic_params.get("tools")
55
53
  top_k = anthropic_params.get("top_k")
56
- top_p = anthropic_params.get("top_p")
54
+ top_p = anthropic_params.get("top_p", 0.95)
55
+ repetition_penalty = anthropic_params.get("repetition_penalty", 1.1)
57
56
  thinking = anthropic_params.get("thinking")
58
57
  # metadata is accepted but not forwarded to OpenAI
59
58
 
@@ -92,7 +91,9 @@ def convert_anthropic_to_openai(
92
91
  converted_messages = _convert_anthropic_message_to_openai(msg)
93
92
  openai_messages.extend(converted_messages)
94
93
  msg_count += 1
95
- logger.debug(f"Converted {msg_count} messages, total OpenAI messages: {len(openai_messages)}")
94
+ logger.debug(
95
+ f"Converted {msg_count} messages, total OpenAI messages: {len(openai_messages)}"
96
+ )
96
97
 
97
98
  # Build OpenAI params
98
99
  params: dict[str, Any] = {
@@ -100,6 +101,7 @@ def convert_anthropic_to_openai(
100
101
  "messages": openai_messages,
101
102
  "max_tokens": max_tokens,
102
103
  "stream": stream,
104
+ "repetition_penalty": repetition_penalty,
103
105
  }
104
106
 
105
107
  # Always include usage in stream for accurate token counting
@@ -139,17 +141,21 @@ def convert_anthropic_to_openai(
139
141
  openai_tools.append(openai_tool)
140
142
 
141
143
  # Add server tools as OpenAI function tools
142
- for tool_class in (enabled_server_tools or []):
144
+ for tool_class in enabled_server_tools or []:
143
145
  if tool_class.tool_type in server_tools_config:
144
146
  config = server_tools_config[tool_class.tool_type]
145
147
  openai_tools.append(tool_class.to_openai_tool(config))
146
148
 
147
149
  if openai_tools:
148
150
  params["tools"] = openai_tools
149
-
151
+
150
152
  # Convert tool_choice
151
153
  if tool_choice:
152
- tc = tool_choice if isinstance(tool_choice, dict) else tool_choice.model_dump()
154
+ tc = (
155
+ tool_choice
156
+ if isinstance(tool_choice, dict)
157
+ else tool_choice.model_dump()
158
+ )
153
159
  tc_type = tc.get("type")
154
160
  if tc_type == "auto":
155
161
  params["tool_choice"] = "auto"
@@ -162,7 +168,7 @@ def convert_anthropic_to_openai(
162
168
  }
163
169
  else:
164
170
  params["tool_choice"] = "auto"
165
-
171
+
166
172
  # Handle thinking parameter
167
173
  # vLLM/SGLang use chat_template_kwargs.thinking to toggle thinking mode
168
174
  # Some models use "thinking", others use "enable_thinking", so we include both
@@ -181,7 +187,7 @@ def convert_anthropic_to_openai(
181
187
  logger.debug(
182
188
  "thinking.budget_tokens (%s) is accepted but not supported by "
183
189
  "vLLM/SGLang. Using default thinking configuration.",
184
- budget_tokens
190
+ budget_tokens,
185
191
  )
186
192
  else:
187
193
  # Default to disabled thinking mode if not explicitly enabled
@@ -208,32 +214,32 @@ def _convert_anthropic_message_to_openai(
208
214
  ) -> list[dict[str, Any]]:
209
215
  """
210
216
  Convert a single Anthropic message to OpenAI format.
211
-
212
- Returns a list of messages because tool_results need to be
217
+
218
+ Returns a list of messages because tool_results need to be
213
219
  separate tool messages in OpenAI format.
214
220
  """
215
221
  role = msg.get("role", "user")
216
222
  content = msg.get("content", "")
217
-
223
+
218
224
  if isinstance(content, str):
219
225
  return [{"role": role, "content": content}]
220
-
226
+
221
227
  # Handle list of content blocks
222
228
  openai_content: list[dict[str, Any]] = []
223
229
  tool_calls: list[dict[str, Any]] = []
224
230
  tool_call_results: list[dict[str, Any]] = []
225
-
231
+
226
232
  for block in content:
227
233
  if isinstance(block, str):
228
234
  openai_content.append({"type": "text", "text": block})
229
235
  continue
230
-
236
+
231
237
  block_type = block.get("type") if isinstance(block, dict) else block.type
232
-
238
+
233
239
  if block_type == "text":
234
240
  text = block.get("text") if isinstance(block, dict) else block.text
235
241
  openai_content.append({"type": "text", "text": text})
236
-
242
+
237
243
  elif block_type == "image":
238
244
  # Convert image to image_url format
239
245
  source = block.get("source") if isinstance(block, dict) else block.source
@@ -246,11 +252,13 @@ def _convert_anthropic_message_to_openai(
246
252
  data = source.data
247
253
  # Build data URL
248
254
  url = f"data:{media_type};base64,{data}"
249
- openai_content.append({
250
- "type": "image_url",
251
- "image_url": {"url": url},
252
- })
253
-
255
+ openai_content.append(
256
+ {
257
+ "type": "image_url",
258
+ "image_url": {"url": url},
259
+ }
260
+ )
261
+
254
262
  elif block_type == "tool_use":
255
263
  # Convert to function call
256
264
  if isinstance(block, dict):
@@ -261,27 +269,31 @@ def _convert_anthropic_message_to_openai(
261
269
  tool_id = block.id
262
270
  name = block.name
263
271
  input_data = block.input
264
-
265
- tool_calls.append({
266
- "id": tool_id,
267
- "type": "function",
268
- "function": {
269
- "name": name,
270
- "arguments": json.dumps(input_data) if isinstance(input_data, dict) else str(input_data),
271
- },
272
- })
273
-
272
+
273
+ tool_calls.append(
274
+ {
275
+ "id": tool_id,
276
+ "type": "function",
277
+ "function": {
278
+ "name": name,
279
+ "arguments": json.dumps(input_data)
280
+ if isinstance(input_data, dict)
281
+ else str(input_data),
282
+ },
283
+ }
284
+ )
285
+
274
286
  elif block_type == "tool_result":
275
287
  # Tool results need to be separate tool messages
276
288
  if isinstance(block, dict):
277
289
  tool_use_id = block.get("tool_use_id", "")
278
290
  result_content = block.get("content", "")
279
- is_error = block.get("is_error", False)
291
+ # Note: is_error is not directly supported in OpenAI API
280
292
  else:
281
293
  tool_use_id = block.tool_use_id
282
294
  result_content = block.content
283
- is_error = getattr(block, "is_error", False)
284
-
295
+ # Note: is_error is not directly supported in OpenAI API
296
+
285
297
  # Handle content that might be a list or string
286
298
  if isinstance(result_content, list):
287
299
  # Extract text from content blocks
@@ -298,7 +310,7 @@ def _convert_anthropic_message_to_openai(
298
310
  result_text = "\n".join(text_parts)
299
311
  else:
300
312
  result_text = str(result_content)
301
-
313
+
302
314
  tool_msg: dict[str, Any] = {
303
315
  "role": "tool",
304
316
  "tool_call_id": tool_use_id,
@@ -306,28 +318,28 @@ def _convert_anthropic_message_to_openai(
306
318
  }
307
319
  # Note: is_error is not directly supported in OpenAI API
308
320
  # but we could add it to content if needed
309
-
321
+
310
322
  tool_call_results.append(tool_msg)
311
-
323
+
312
324
  # Build primary message
313
325
  messages: list[dict[str, Any]] = []
314
326
  # SGLang requires content field to be present, default to empty string
315
327
  primary_msg: dict[str, Any] = {"role": role, "content": ""}
316
-
328
+
317
329
  if openai_content:
318
330
  if len(openai_content) == 1 and openai_content[0]["type"] == "text":
319
331
  primary_msg["content"] = openai_content[0]["text"]
320
332
  else:
321
333
  primary_msg["content"] = openai_content
322
-
334
+
323
335
  if tool_calls:
324
336
  primary_msg["tool_calls"] = tool_calls
325
-
337
+
326
338
  messages.append(primary_msg)
327
-
339
+
328
340
  # Add tool result messages separately
329
341
  messages.extend(tool_call_results)
330
-
342
+
331
343
  return messages
332
344
 
333
345
 
@@ -353,24 +365,24 @@ def convert_openai_to_anthropic(
353
365
  ) -> Message:
354
366
  """
355
367
  Convert OpenAI ChatCompletion to Anthropic Message.
356
-
368
+
357
369
  Args:
358
370
  completion: OpenAI chat completion response
359
371
  model: Model name
360
-
372
+
361
373
  Returns:
362
374
  Anthropic Message response
363
375
  """
364
376
  from anthropic.types.beta import BetaThinkingBlock
365
-
377
+
366
378
  choice = completion.choices[0]
367
379
  message = choice.message
368
-
380
+
369
381
  # Convert content blocks
370
382
  content: list[ContentBlock] = []
371
-
383
+
372
384
  # Add reasoning content (thinking) first if present
373
- reasoning_content = getattr(message, 'reasoning_content', None)
385
+ reasoning_content = getattr(message, "reasoning_content", None)
374
386
  if reasoning_content:
375
387
  content.append(
376
388
  BetaThinkingBlock(
@@ -379,7 +391,7 @@ def convert_openai_to_anthropic(
379
391
  signature="", # Signature not available from OpenAI format
380
392
  )
381
393
  )
382
-
394
+
383
395
  # Add text content if present
384
396
  if message.content:
385
397
  if isinstance(message.content, str):
@@ -388,16 +400,20 @@ def convert_openai_to_anthropic(
388
400
  for part in message.content:
389
401
  if part.type == "text":
390
402
  content.append(TextBlock(type="text", text=part.text))
391
-
403
+
392
404
  # Convert tool calls
393
405
  if message.tool_calls:
394
406
  for tc in message.tool_calls:
407
+ # Handle case where function might be None
408
+ if not tc.function:
409
+ continue
410
+
395
411
  tool_input: dict[str, Any] = {}
396
412
  try:
397
413
  tool_input = json.loads(tc.function.arguments)
398
414
  except json.JSONDecodeError:
399
415
  tool_input = {"raw": tc.function.arguments}
400
-
416
+
401
417
  content.append(
402
418
  ToolUseBlock(
403
419
  type="tool_use",
@@ -406,7 +422,7 @@ def convert_openai_to_anthropic(
406
422
  input=tool_input,
407
423
  )
408
424
  )
409
-
425
+
410
426
  # Determine stop reason
411
427
  stop_reason_map = {
412
428
  "stop": "end_turn",
@@ -414,18 +430,24 @@ def convert_openai_to_anthropic(
414
430
  "tool_calls": "tool_use",
415
431
  "content_filter": "end_turn",
416
432
  }
417
- anthropic_stop_reason = stop_reason_map.get(choice.finish_reason or "stop", "end_turn")
418
-
433
+ anthropic_stop_reason = stop_reason_map.get(
434
+ choice.finish_reason or "stop", "end_turn"
435
+ )
436
+
419
437
  # Build usage dict with cache support (if available from upstream)
420
438
  usage_dict = None
421
439
  if completion.usage:
422
440
  usage_dict = {
423
441
  "input_tokens": completion.usage.prompt_tokens,
424
442
  "output_tokens": completion.usage.completion_tokens,
425
- "cache_creation_input_tokens": getattr(completion.usage, "cache_creation_input_tokens", None),
426
- "cache_read_input_tokens": getattr(completion.usage, "cache_read_input_tokens", None),
443
+ "cache_creation_input_tokens": getattr(
444
+ completion.usage, "cache_creation_input_tokens", None
445
+ ),
446
+ "cache_read_input_tokens": getattr(
447
+ completion.usage, "cache_read_input_tokens", None
448
+ ),
427
449
  }
428
-
450
+
429
451
  # Build message dict to avoid Pydantic validation issues
430
452
  message_dict = {
431
453
  "id": completion.id,
@@ -437,5 +459,5 @@ def convert_openai_to_anthropic(
437
459
  "stop_sequence": None,
438
460
  "usage": usage_dict,
439
461
  }
440
-
462
+
441
463
  return Message.model_validate(message_dict)
@@ -5,34 +5,103 @@ Main entry point for the local-openai2anthropic proxy server.
5
5
 
6
6
  import argparse
7
7
  import logging
8
+ import os
8
9
  import sys
10
+ from logging.handlers import TimedRotatingFileHandler
11
+ from pathlib import Path
9
12
 
10
13
  import uvicorn
11
14
  from fastapi import FastAPI, HTTPException, Request
12
15
  from fastapi.middleware.cors import CORSMiddleware
13
16
  from fastapi.responses import JSONResponse
14
17
 
15
- from local_openai2anthropic.config import Settings, get_settings
18
+ from local_openai2anthropic.config import Settings, get_config_file, get_settings
16
19
  from local_openai2anthropic.protocol import AnthropicError, AnthropicErrorResponse
17
20
  from local_openai2anthropic.router import router
18
21
 
19
22
 
23
+ def get_default_log_dir() -> str:
24
+ """Get default log directory based on platform.
25
+
26
+ Returns:
27
+ Path to log directory
28
+ """
29
+ if sys.platform == 'win32':
30
+ # Windows: use %LOCALAPPDATA%\local-openai2anthropic\logs
31
+ base_dir = os.environ.get('LOCALAPPDATA', os.path.expanduser('~\\AppData\\Local'))
32
+ return os.path.join(base_dir, 'local-openai2anthropic', 'logs')
33
+ else:
34
+ # macOS/Linux: use ~/.local/share/local-openai2anthropic/logs
35
+ return os.path.expanduser("~/.local/share/local-openai2anthropic/logs")
36
+
37
+
38
+ def setup_logging(log_level: str, log_dir: str | None = None) -> None:
39
+ """Setup logging with daily rotation, keeping only today's logs.
40
+
41
+ Args:
42
+ log_level: Logging level (DEBUG, INFO, WARNING, ERROR)
43
+ log_dir: Directory for log files (platform-specific default)
44
+ """
45
+ # Default log directory based on platform
46
+ if log_dir is None:
47
+ log_dir = get_default_log_dir()
48
+
49
+ # Expand user directory if specified
50
+ log_dir = os.path.expanduser(log_dir)
51
+
52
+ # Create log directory if it doesn't exist
53
+ Path(log_dir).mkdir(parents=True, exist_ok=True)
54
+
55
+ log_file = os.path.join(log_dir, "server.log")
56
+
57
+ # Create formatter
58
+ formatter = logging.Formatter(
59
+ "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
60
+ )
61
+
62
+ # Setup root logger
63
+ root_logger = logging.getLogger()
64
+ root_logger.setLevel(getattr(logging, log_level.upper()))
65
+
66
+ # Clear existing handlers
67
+ root_logger.handlers = []
68
+
69
+ # Console handler
70
+ console_handler = logging.StreamHandler(sys.stdout)
71
+ console_handler.setFormatter(formatter)
72
+ root_logger.addHandler(console_handler)
73
+
74
+ # File handler with daily rotation
75
+ # backupCount=0 means no backup files are kept (only today's log)
76
+ # when='midnight' rotates at midnight
77
+ file_handler = TimedRotatingFileHandler(
78
+ log_file,
79
+ when='midnight',
80
+ interval=1,
81
+ backupCount=0, # Keep only today's log
82
+ encoding='utf-8'
83
+ )
84
+ file_handler.setFormatter(formatter)
85
+ root_logger.addHandler(file_handler)
86
+
87
+ logging.info(f"Logging configured. Log file: {log_file}")
88
+
89
+
20
90
  def create_app(settings: Settings | None = None) -> FastAPI:
21
91
  """Create and configure the FastAPI application."""
22
92
  if settings is None:
23
93
  settings = get_settings()
24
94
 
25
- # Configure logging
26
- logging.basicConfig(
27
- level=getattr(logging, settings.log_level.upper()),
28
- format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
29
- )
95
+ # Configure logging with daily rotation
96
+ # Use platform-specific default if log_dir is not set
97
+ log_dir = settings.log_dir if settings.log_dir else None
98
+ setup_logging(settings.log_level, log_dir)
30
99
 
31
100
  # Create FastAPI app
32
101
  app = FastAPI(
33
102
  title="local-openai2anthropic",
34
103
  description="A proxy server that converts Anthropic Messages API to OpenAI API",
35
- version="0.2.0",
104
+ version="0.3.6",
36
105
  docs_url="/docs",
37
106
  redoc_url="/redoc",
38
107
  )
@@ -134,11 +203,13 @@ def run_foreground(settings: Settings) -> None:
134
203
  """Run server in foreground mode (blocking)."""
135
204
  # Validate required settings
136
205
  if not settings.openai_api_key:
206
+ config_file = get_config_file()
137
207
  print(
138
- "Error: OA2A_OPENAI_API_KEY environment variable is required.\n"
139
- "Set it via:\n"
140
- " - Environment variable: export OA2A_OPENAI_API_KEY='your-key'\n"
141
- " - Or create a .env file with OA2A_OPENAI_API_KEY=your-key",
208
+ f"Error: openai_api_key is required.\n"
209
+ f"Please edit the configuration file:\n"
210
+ f" {config_file}\n"
211
+ f"\nSet your OpenAI API key:\n"
212
+ f' openai_api_key = "your-api-key"',
142
213
  file=sys.stderr,
143
214
  )
144
215
  sys.exit(1)
@@ -182,7 +253,7 @@ Examples:
182
253
  parser.add_argument(
183
254
  "--version",
184
255
  action="version",
185
- version="%(prog)s 0.2.0",
256
+ version="%(prog)s 0.3.6",
186
257
  )
187
258
 
188
259
  # Create subparsers for commands
@@ -6,7 +6,7 @@ Uses Anthropic SDK types for request/response models.
6
6
 
7
7
  from typing import Any, Literal, Optional
8
8
 
9
- from pydantic import BaseModel, Field
9
+ from pydantic import BaseModel
10
10
 
11
11
  # Re-export all Anthropic types for convenience
12
12
  from anthropic.types import (