cli2api 0.1.0__tar.gz → 0.2.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (48) hide show
  1. {cli2api-0.1.0 → cli2api-0.2.0}/PKG-INFO +11 -34
  2. {cli2api-0.1.0 → cli2api-0.2.0}/README.md +10 -33
  3. cli2api-0.2.0/cli2api/api/v1/chat.py +54 -0
  4. {cli2api-0.1.0 → cli2api-0.2.0}/cli2api/api/v1/responses.py +63 -33
  5. cli2api-0.2.0/cli2api/constants.py +61 -0
  6. {cli2api-0.1.0 → cli2api-0.2.0}/cli2api/providers/claude.py +143 -58
  7. {cli2api-0.1.0 → cli2api-0.2.0}/cli2api/schemas/openai.py +3 -30
  8. cli2api-0.2.0/cli2api/services/__init__.py +5 -0
  9. cli2api-0.2.0/cli2api/services/completion.py +308 -0
  10. cli2api-0.2.0/cli2api/streaming/tool_parser.py +267 -0
  11. {cli2api-0.1.0 → cli2api-0.2.0}/cli2api/tools/handler.py +168 -64
  12. {cli2api-0.1.0 → cli2api-0.2.0}/pyproject.toml +1 -1
  13. cli2api-0.2.0/tests/test_streaming_tool_parser.py +300 -0
  14. cli2api-0.1.0/cli2api/api/v1/chat.py +0 -378
  15. {cli2api-0.1.0 → cli2api-0.2.0}/.dockerignore +0 -0
  16. {cli2api-0.1.0 → cli2api-0.2.0}/.env.example +0 -0
  17. {cli2api-0.1.0 → cli2api-0.2.0}/.github/workflows/publish.yml +0 -0
  18. {cli2api-0.1.0 → cli2api-0.2.0}/.gitignore +0 -0
  19. {cli2api-0.1.0 → cli2api-0.2.0}/Dockerfile +0 -0
  20. {cli2api-0.1.0 → cli2api-0.2.0}/cli2api/__init__.py +0 -0
  21. {cli2api-0.1.0 → cli2api-0.2.0}/cli2api/__main__.py +0 -0
  22. {cli2api-0.1.0 → cli2api-0.2.0}/cli2api/api/__init__.py +0 -0
  23. {cli2api-0.1.0 → cli2api-0.2.0}/cli2api/api/dependencies.py +0 -0
  24. {cli2api-0.1.0 → cli2api-0.2.0}/cli2api/api/router.py +0 -0
  25. {cli2api-0.1.0 → cli2api-0.2.0}/cli2api/api/utils.py +0 -0
  26. {cli2api-0.1.0 → cli2api-0.2.0}/cli2api/api/v1/__init__.py +0 -0
  27. {cli2api-0.1.0 → cli2api-0.2.0}/cli2api/api/v1/models.py +0 -0
  28. {cli2api-0.1.0 → cli2api-0.2.0}/cli2api/config/__init__.py +0 -0
  29. {cli2api-0.1.0 → cli2api-0.2.0}/cli2api/config/settings.py +0 -0
  30. {cli2api-0.1.0 → cli2api-0.2.0}/cli2api/main.py +0 -0
  31. {cli2api-0.1.0 → cli2api-0.2.0}/cli2api/providers/__init__.py +0 -0
  32. {cli2api-0.1.0 → cli2api-0.2.0}/cli2api/schemas/__init__.py +0 -0
  33. {cli2api-0.1.0 → cli2api-0.2.0}/cli2api/schemas/internal.py +0 -0
  34. {cli2api-0.1.0 → cli2api-0.2.0}/cli2api/streaming/__init__.py +0 -0
  35. {cli2api-0.1.0 → cli2api-0.2.0}/cli2api/streaming/sse.py +0 -0
  36. {cli2api-0.1.0 → cli2api-0.2.0}/cli2api/tools/__init__.py +0 -0
  37. {cli2api-0.1.0 → cli2api-0.2.0}/cli2api/utils/__init__.py +0 -0
  38. {cli2api-0.1.0 → cli2api-0.2.0}/cli2api/utils/logging.py +0 -0
  39. {cli2api-0.1.0 → cli2api-0.2.0}/cli2api.sh +0 -0
  40. {cli2api-0.1.0 → cli2api-0.2.0}/docker-compose.yaml +0 -0
  41. {cli2api-0.1.0 → cli2api-0.2.0}/tests/__init__.py +0 -0
  42. {cli2api-0.1.0 → cli2api-0.2.0}/tests/conftest.py +0 -0
  43. {cli2api-0.1.0 → cli2api-0.2.0}/tests/test_api.py +0 -0
  44. {cli2api-0.1.0 → cli2api-0.2.0}/tests/test_config.py +0 -0
  45. {cli2api-0.1.0 → cli2api-0.2.0}/tests/test_integration.py +0 -0
  46. {cli2api-0.1.0 → cli2api-0.2.0}/tests/test_providers.py +0 -0
  47. {cli2api-0.1.0 → cli2api-0.2.0}/tests/test_schemas.py +0 -0
  48. {cli2api-0.1.0 → cli2api-0.2.0}/tests/test_streaming.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: cli2api
3
- Version: 0.1.0
3
+ Version: 0.2.0
4
4
  Summary: OpenAI-compatible API over Claude Code CLI
5
5
  Requires-Python: >=3.11
6
6
  Requires-Dist: fastapi>=0.115.0
@@ -35,45 +35,22 @@ Claude Code CLI is powerful but not all tools support it directly. CLI2API bridg
35
35
 
36
36
  ## Installation
37
37
 
38
- ### Quick Start
38
+ ### Quick Start (pip)
39
39
 
40
40
  ```bash
41
- # Clone the repository
42
- git clone https://github.com/anoxis/CLI2API.git
43
- cd CLI2API
44
-
45
- # Install dependencies
46
- pip install -e .
47
-
48
- # Start server
49
- ./cli2api.sh
50
- ```
51
-
52
- ### Add to PATH (Recommended)
53
-
54
- For easy access from anywhere:
55
-
56
- ```bash
57
- # Add to your shell profile (~/.bashrc, ~/.zshrc, etc.)
58
- export PATH="$PATH:/path/to/CLI2API"
59
-
60
- # Then run from anywhere
61
- cli2api.sh
41
+ pip install cli2api
42
+ cli2api
62
43
  ```
63
44
 
64
- ### Running After Installation
45
+ Server starts at http://localhost:8000
65
46
 
66
- Once installed, you can start the server in several ways:
47
+ ### From Source
67
48
 
68
49
  ```bash
69
- # Using the script (recommended)
70
- ./cli2api.sh
71
-
72
- # Using Python module
73
- python -m cli2api
74
-
75
- # Using uvicorn directly
76
- uvicorn cli2api.main:app --host 0.0.0.0 --port 8000
50
+ git clone https://github.com/anoxis/CLI2API.git
51
+ cd CLI2API
52
+ pip install -e .
53
+ cli2api
77
54
  ```
78
55
 
79
56
  ### Docker
@@ -111,7 +88,7 @@ pytest tests/ -v
111
88
 
112
89
  ### Kilo Code / Roo Code
113
90
 
114
- 1. Start CLI2API server: `./cli2api.sh`
91
+ 1. Start CLI2API server: `cli2api`
115
92
  2. Open extension settings
116
93
  3. Add custom provider:
117
94
  - **Provider Name:** `CLI2API` (or any name)
@@ -20,45 +20,22 @@ Claude Code CLI is powerful but not all tools support it directly. CLI2API bridg
20
20
 
21
21
  ## Installation
22
22
 
23
- ### Quick Start
23
+ ### Quick Start (pip)
24
24
 
25
25
  ```bash
26
- # Clone the repository
27
- git clone https://github.com/anoxis/CLI2API.git
28
- cd CLI2API
29
-
30
- # Install dependencies
31
- pip install -e .
32
-
33
- # Start server
34
- ./cli2api.sh
35
- ```
36
-
37
- ### Add to PATH (Recommended)
38
-
39
- For easy access from anywhere:
40
-
41
- ```bash
42
- # Add to your shell profile (~/.bashrc, ~/.zshrc, etc.)
43
- export PATH="$PATH:/path/to/CLI2API"
44
-
45
- # Then run from anywhere
46
- cli2api.sh
26
+ pip install cli2api
27
+ cli2api
47
28
  ```
48
29
 
49
- ### Running After Installation
30
+ Server starts at http://localhost:8000
50
31
 
51
- Once installed, you can start the server in several ways:
32
+ ### From Source
52
33
 
53
34
  ```bash
54
- # Using the script (recommended)
55
- ./cli2api.sh
56
-
57
- # Using Python module
58
- python -m cli2api
59
-
60
- # Using uvicorn directly
61
- uvicorn cli2api.main:app --host 0.0.0.0 --port 8000
35
+ git clone https://github.com/anoxis/CLI2API.git
36
+ cd CLI2API
37
+ pip install -e .
38
+ cli2api
62
39
  ```
63
40
 
64
41
  ### Docker
@@ -96,7 +73,7 @@ pytest tests/ -v
96
73
 
97
74
  ### Kilo Code / Roo Code
98
75
 
99
- 1. Start CLI2API server: `./cli2api.sh`
76
+ 1. Start CLI2API server: `cli2api`
100
77
  2. Open extension settings
101
78
  3. Add custom provider:
102
79
  - **Provider Name:** `CLI2API` (or any name)
@@ -0,0 +1,54 @@
1
+ """Chat completions endpoint - OpenAI compatible."""
2
+
3
+ from fastapi import APIRouter, Depends, HTTPException
4
+ from fastapi.responses import StreamingResponse
5
+
6
+ from cli2api.api.dependencies import get_provider
7
+ from cli2api.api.utils import parse_model_name
8
+ from cli2api.constants import HTTP_GATEWAY_TIMEOUT, HTTP_INTERNAL_ERROR
9
+ from cli2api.providers.claude import ClaudeCodeProvider
10
+ from cli2api.schemas.openai import ChatCompletionRequest
11
+ from cli2api.services.completion import CompletionService
12
+
13
+ router = APIRouter()
14
+
15
+
16
+ @router.post("/chat/completions")
17
+ async def chat_completions(
18
+ request: ChatCompletionRequest,
19
+ provider: ClaudeCodeProvider = Depends(get_provider),
20
+ ):
21
+ """Create a chat completion (OpenAI-compatible endpoint)."""
22
+ actual_model = parse_model_name(request.model)
23
+ service = CompletionService(provider)
24
+ completion_id = service.generate_completion_id()
25
+
26
+ if request.stream:
27
+ return StreamingResponse(
28
+ service.stream_completion(
29
+ messages=request.messages,
30
+ model=actual_model,
31
+ completion_id=completion_id,
32
+ tools=request.tools,
33
+ reasoning_effort=request.reasoning_effort,
34
+ ),
35
+ media_type="text/event-stream",
36
+ headers={
37
+ "Cache-Control": "no-cache",
38
+ "Connection": "keep-alive",
39
+ "X-Accel-Buffering": "no",
40
+ },
41
+ )
42
+
43
+ try:
44
+ response = await service.create_completion(
45
+ messages=request.messages,
46
+ model=actual_model,
47
+ completion_id=completion_id,
48
+ tools=request.tools,
49
+ )
50
+ return response.model_dump(exclude_none=True)
51
+ except TimeoutError as e:
52
+ raise HTTPException(status_code=HTTP_GATEWAY_TIMEOUT, detail=str(e))
53
+ except RuntimeError as e:
54
+ raise HTTPException(status_code=HTTP_INTERNAL_ERROR, detail=str(e))
@@ -10,13 +10,17 @@ from typing import Any, AsyncIterator, Optional
10
10
 
11
11
  from fastapi import APIRouter, Depends, HTTPException
12
12
  from fastapi.responses import StreamingResponse
13
- from pydantic import BaseModel, ConfigDict
13
+ from pydantic import BaseModel, ConfigDict, Field, field_validator
14
14
 
15
15
  from cli2api.api.dependencies import get_provider
16
16
  from cli2api.api.utils import parse_model_name
17
+ from cli2api.constants import ID_HEX_LENGTH, MESSAGE_ID_PREFIX, RESPONSE_ID_PREFIX
17
18
  from cli2api.providers.claude import ClaudeCodeProvider
18
19
  from cli2api.schemas.openai import ChatMessage
19
20
  from cli2api.streaming.sse import sse_encode, sse_error
21
+ from cli2api.utils.logging import get_logger
22
+
23
+ logger = get_logger(__name__)
20
24
 
21
25
  router = APIRouter()
22
26
 
@@ -25,8 +29,6 @@ router = APIRouter()
25
29
 
26
30
 
27
31
  class ResponsesInputMessage(BaseModel):
28
- """Input message for Responses API."""
29
-
30
32
  model_config = ConfigDict(extra="ignore")
31
33
 
32
34
  role: str
@@ -42,20 +44,28 @@ class ResponsesRequest(BaseModel):
42
44
  input: list[ResponsesInputMessage] | str
43
45
  stream: bool = False
44
46
  instructions: Optional[str] = None
45
- temperature: Optional[float] = None
46
- max_output_tokens: Optional[int] = None
47
+ temperature: Optional[float] = Field(default=None, ge=0.0, le=2.0)
48
+ max_output_tokens: Optional[int] = Field(default=None, gt=0)
47
49
  # Additional fields
48
50
  tools: Optional[list[Any]] = None
49
51
  tool_choice: Optional[Any] = None
50
52
  metadata: Optional[dict] = None
51
53
 
54
+ @field_validator("input")
55
+ @classmethod
56
+ def input_not_empty(cls, v: list | str) -> list | str:
57
+ """Validate that input is not empty."""
58
+ if isinstance(v, str) and not v.strip():
59
+ raise ValueError("input cannot be empty string")
60
+ if isinstance(v, list) and len(v) == 0:
61
+ raise ValueError("input cannot be empty list")
62
+ return v
63
+
52
64
 
53
65
  # === Response Models ===
54
66
 
55
67
 
56
68
  class ResponsesOutput(BaseModel):
57
- """Output content in response."""
58
-
59
69
  type: str = "message"
60
70
  id: str
61
71
  role: str = "assistant"
@@ -63,8 +73,6 @@ class ResponsesOutput(BaseModel):
63
73
 
64
74
 
65
75
  class ResponsesResponse(BaseModel):
66
- """Response body for /v1/responses."""
67
-
68
76
  id: str
69
77
  object: str = "response"
70
78
  created_at: int
@@ -74,40 +82,56 @@ class ResponsesResponse(BaseModel):
74
82
 
75
83
 
76
84
  def convert_to_chat_messages(request: ResponsesRequest) -> list[ChatMessage]:
77
- """Convert Responses API input to ChatMessage list."""
78
- messages = []
85
+ """Convert Responses API input to ChatMessage list.
86
+
87
+ Args:
88
+ request: The ResponsesRequest with input messages.
89
+
90
+ Returns:
91
+ List of ChatMessage objects for the provider.
92
+ """
93
+ messages: list[ChatMessage] = []
79
94
 
80
95
  # Add instructions as system message
81
96
  if request.instructions:
82
97
  messages.append(ChatMessage(role="system", content=request.instructions))
83
98
 
84
- # Handle input
99
+ # Handle input - can be string or list of messages
85
100
  if isinstance(request.input, str):
86
101
  messages.append(ChatMessage(role="user", content=request.input))
87
102
  else:
88
103
  for msg in request.input:
89
- content = msg.content
90
- if isinstance(content, list):
91
- # Extract text from content blocks
92
- texts = []
93
- for item in content:
94
- if isinstance(item, dict) and item.get("type") == "text":
95
- texts.append(item.get("text", ""))
96
- elif isinstance(item, str):
97
- texts.append(item)
98
- content = "\n".join(texts)
99
- elif content is None:
100
- content = ""
101
-
102
- role = msg.role
103
- if role not in ("system", "user", "assistant"):
104
- role = "user" # Default to user for unknown roles
105
-
106
- messages.append(ChatMessage(role=role, content=str(content)))
104
+ content: str = _extract_message_content(msg.content)
105
+ role: str = msg.role if msg.role in ("system", "user", "assistant") else "user"
106
+ messages.append(ChatMessage(role=role, content=content))
107
107
 
108
108
  return messages
109
109
 
110
110
 
111
+ def _extract_message_content(content: Any) -> str:
112
+ """Extract text content from various message content formats.
113
+
114
+ Args:
115
+ content: Message content - can be str, list of content blocks, or None.
116
+
117
+ Returns:
118
+ Extracted text content as string.
119
+ """
120
+ if content is None:
121
+ return ""
122
+ if isinstance(content, str):
123
+ return content
124
+ if isinstance(content, list):
125
+ texts: list[str] = []
126
+ for item in content:
127
+ if isinstance(item, dict) and item.get("type") == "text":
128
+ texts.append(item.get("text", ""))
129
+ elif isinstance(item, str):
130
+ texts.append(item)
131
+ return "\n".join(texts)
132
+ return str(content)
133
+
134
+
111
135
  @router.post("/responses")
112
136
  async def create_response(
113
137
  request: ResponsesRequest,
@@ -121,7 +145,7 @@ async def create_response(
121
145
 
122
146
  # Convert to chat messages
123
147
  messages = convert_to_chat_messages(request)
124
- response_id = f"resp-{uuid.uuid4().hex[:24]}"
148
+ response_id = f"{RESPONSE_ID_PREFIX}{uuid.uuid4().hex[:ID_HEX_LENGTH]}"
125
149
 
126
150
  if request.stream:
127
151
  return StreamingResponse(
@@ -150,7 +174,7 @@ async def create_response(
150
174
  except RuntimeError as e:
151
175
  raise HTTPException(status_code=500, detail=str(e))
152
176
 
153
- output_id = f"msg-{uuid.uuid4().hex[:24]}"
177
+ output_id = f"{MESSAGE_ID_PREFIX}{uuid.uuid4().hex[:ID_HEX_LENGTH]}"
154
178
 
155
179
  return ResponsesResponse(
156
180
  id=response_id,
@@ -174,7 +198,7 @@ async def stream_response(
174
198
  ) -> AsyncIterator[str]:
175
199
  """Generate SSE events for streaming response."""
176
200
  created = int(time.time())
177
- output_id = f"msg-{uuid.uuid4().hex[:24]}"
201
+ output_id = f"{MESSAGE_ID_PREFIX}{uuid.uuid4().hex[:ID_HEX_LENGTH]}"
178
202
  content_buffer = ""
179
203
 
180
204
  try:
@@ -248,8 +272,14 @@ async def stream_response(
248
272
  }
249
273
  })
250
274
 
275
+ logger.info(f"[{response_id}] Stream completed successfully")
251
276
  yield "data: [DONE]\n\n"
252
277
 
278
+ except RuntimeError as e:
279
+ logger.error(f"[{response_id}] Provider error: {e}")
280
+ yield sse_error(str(e))
281
+ yield "data: [DONE]\n\n"
253
282
  except Exception as e:
283
+ logger.error(f"[{response_id}] Stream error: {e}")
254
284
  yield sse_error(str(e))
255
285
  yield "data: [DONE]\n\n"
@@ -0,0 +1,61 @@
1
+ """Application constants.
2
+
3
+ Centralized location for magic numbers and strings used throughout the codebase.
4
+ """
5
+
6
+ # ====================
7
+ # ID Generation
8
+ # ====================
9
+
10
+ # Length of hex suffix for generated IDs (24 hex chars = 12 bytes of randomness)
11
+ ID_HEX_LENGTH = 24
12
+
13
+ # ID prefixes for different entity types
14
+ CHAT_COMPLETION_ID_PREFIX = "chatcmpl-"
15
+ RESPONSE_ID_PREFIX = "resp-"
16
+ MESSAGE_ID_PREFIX = "msg-"
17
+ TOOL_CALL_ID_PREFIX = "call_"
18
+
19
+ # ====================
20
+ # Streaming
21
+ # ====================
22
+
23
+ # Maximum size of content chunks when splitting for streaming
24
+ # Chosen to balance between responsiveness and overhead
25
+ STREAM_CHUNK_MAX_SIZE = 150
26
+
27
+ # Preferred split points for chunking (in priority order)
28
+ CHUNK_SPLIT_SEPARATORS = (" ", "\n", ".", ",", ";")
29
+
30
+ # Minimum position ratio for split point (don't split too early)
31
+ CHUNK_SPLIT_MIN_RATIO = 0.5
32
+
33
+ # ====================
34
+ # HTTP Status Codes
35
+ # ====================
36
+
37
+ HTTP_OK = 200
38
+ HTTP_BAD_REQUEST = 400
39
+ HTTP_INTERNAL_ERROR = 500
40
+ HTTP_GATEWAY_TIMEOUT = 504
41
+
42
+ # ====================
43
+ # OpenAI API
44
+ # ====================
45
+
46
+ # Default object types for OpenAI compatibility
47
+ OBJECT_CHAT_COMPLETION = "chat.completion"
48
+ OBJECT_CHAT_COMPLETION_CHUNK = "chat.completion.chunk"
49
+ OBJECT_MODEL = "model"
50
+ OBJECT_LIST = "list"
51
+
52
+ # Finish reasons
53
+ FINISH_REASON_STOP = "stop"
54
+ FINISH_REASON_TOOL_CALLS = "tool_calls"
55
+
56
+ # ====================
57
+ # Tool Call Markers
58
+ # ====================
59
+
60
+ TOOL_CALL_START_MARKER = "<tool_call>"
61
+ TOOL_CALL_END_MARKER = "</tool_call>"