ccproxy-api 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ccproxy/__init__.py +4 -0
- ccproxy/__main__.py +7 -0
- ccproxy/_version.py +21 -0
- ccproxy/adapters/__init__.py +11 -0
- ccproxy/adapters/base.py +80 -0
- ccproxy/adapters/openai/__init__.py +43 -0
- ccproxy/adapters/openai/adapter.py +915 -0
- ccproxy/adapters/openai/models.py +412 -0
- ccproxy/adapters/openai/streaming.py +449 -0
- ccproxy/api/__init__.py +28 -0
- ccproxy/api/app.py +225 -0
- ccproxy/api/dependencies.py +140 -0
- ccproxy/api/middleware/__init__.py +11 -0
- ccproxy/api/middleware/auth.py +0 -0
- ccproxy/api/middleware/cors.py +55 -0
- ccproxy/api/middleware/errors.py +703 -0
- ccproxy/api/middleware/headers.py +51 -0
- ccproxy/api/middleware/logging.py +175 -0
- ccproxy/api/middleware/request_id.py +69 -0
- ccproxy/api/middleware/server_header.py +62 -0
- ccproxy/api/responses.py +84 -0
- ccproxy/api/routes/__init__.py +16 -0
- ccproxy/api/routes/claude.py +181 -0
- ccproxy/api/routes/health.py +489 -0
- ccproxy/api/routes/metrics.py +1033 -0
- ccproxy/api/routes/proxy.py +238 -0
- ccproxy/auth/__init__.py +75 -0
- ccproxy/auth/bearer.py +68 -0
- ccproxy/auth/credentials_adapter.py +93 -0
- ccproxy/auth/dependencies.py +229 -0
- ccproxy/auth/exceptions.py +79 -0
- ccproxy/auth/manager.py +102 -0
- ccproxy/auth/models.py +118 -0
- ccproxy/auth/oauth/__init__.py +26 -0
- ccproxy/auth/oauth/models.py +49 -0
- ccproxy/auth/oauth/routes.py +396 -0
- ccproxy/auth/oauth/storage.py +0 -0
- ccproxy/auth/storage/__init__.py +12 -0
- ccproxy/auth/storage/base.py +57 -0
- ccproxy/auth/storage/json_file.py +159 -0
- ccproxy/auth/storage/keyring.py +192 -0
- ccproxy/claude_sdk/__init__.py +20 -0
- ccproxy/claude_sdk/client.py +169 -0
- ccproxy/claude_sdk/converter.py +331 -0
- ccproxy/claude_sdk/options.py +120 -0
- ccproxy/cli/__init__.py +14 -0
- ccproxy/cli/commands/__init__.py +8 -0
- ccproxy/cli/commands/auth.py +553 -0
- ccproxy/cli/commands/config/__init__.py +14 -0
- ccproxy/cli/commands/config/commands.py +766 -0
- ccproxy/cli/commands/config/schema_commands.py +119 -0
- ccproxy/cli/commands/serve.py +630 -0
- ccproxy/cli/docker/__init__.py +34 -0
- ccproxy/cli/docker/adapter_factory.py +157 -0
- ccproxy/cli/docker/params.py +278 -0
- ccproxy/cli/helpers.py +144 -0
- ccproxy/cli/main.py +193 -0
- ccproxy/cli/options/__init__.py +14 -0
- ccproxy/cli/options/claude_options.py +216 -0
- ccproxy/cli/options/core_options.py +40 -0
- ccproxy/cli/options/security_options.py +48 -0
- ccproxy/cli/options/server_options.py +117 -0
- ccproxy/config/__init__.py +40 -0
- ccproxy/config/auth.py +154 -0
- ccproxy/config/claude.py +124 -0
- ccproxy/config/cors.py +79 -0
- ccproxy/config/discovery.py +87 -0
- ccproxy/config/docker_settings.py +265 -0
- ccproxy/config/loader.py +108 -0
- ccproxy/config/observability.py +158 -0
- ccproxy/config/pricing.py +88 -0
- ccproxy/config/reverse_proxy.py +31 -0
- ccproxy/config/scheduler.py +89 -0
- ccproxy/config/security.py +14 -0
- ccproxy/config/server.py +81 -0
- ccproxy/config/settings.py +534 -0
- ccproxy/config/validators.py +231 -0
- ccproxy/core/__init__.py +274 -0
- ccproxy/core/async_utils.py +675 -0
- ccproxy/core/constants.py +97 -0
- ccproxy/core/errors.py +256 -0
- ccproxy/core/http.py +328 -0
- ccproxy/core/http_transformers.py +428 -0
- ccproxy/core/interfaces.py +247 -0
- ccproxy/core/logging.py +189 -0
- ccproxy/core/middleware.py +114 -0
- ccproxy/core/proxy.py +143 -0
- ccproxy/core/system.py +38 -0
- ccproxy/core/transformers.py +259 -0
- ccproxy/core/types.py +129 -0
- ccproxy/core/validators.py +288 -0
- ccproxy/docker/__init__.py +67 -0
- ccproxy/docker/adapter.py +588 -0
- ccproxy/docker/docker_path.py +207 -0
- ccproxy/docker/middleware.py +103 -0
- ccproxy/docker/models.py +228 -0
- ccproxy/docker/protocol.py +192 -0
- ccproxy/docker/stream_process.py +264 -0
- ccproxy/docker/validators.py +173 -0
- ccproxy/models/__init__.py +123 -0
- ccproxy/models/errors.py +42 -0
- ccproxy/models/messages.py +243 -0
- ccproxy/models/requests.py +85 -0
- ccproxy/models/responses.py +227 -0
- ccproxy/models/types.py +102 -0
- ccproxy/observability/__init__.py +51 -0
- ccproxy/observability/access_logger.py +400 -0
- ccproxy/observability/context.py +447 -0
- ccproxy/observability/metrics.py +539 -0
- ccproxy/observability/pushgateway.py +366 -0
- ccproxy/observability/sse_events.py +303 -0
- ccproxy/observability/stats_printer.py +755 -0
- ccproxy/observability/storage/__init__.py +1 -0
- ccproxy/observability/storage/duckdb_simple.py +665 -0
- ccproxy/observability/storage/models.py +55 -0
- ccproxy/pricing/__init__.py +19 -0
- ccproxy/pricing/cache.py +212 -0
- ccproxy/pricing/loader.py +267 -0
- ccproxy/pricing/models.py +106 -0
- ccproxy/pricing/updater.py +309 -0
- ccproxy/scheduler/__init__.py +39 -0
- ccproxy/scheduler/core.py +335 -0
- ccproxy/scheduler/exceptions.py +34 -0
- ccproxy/scheduler/manager.py +186 -0
- ccproxy/scheduler/registry.py +150 -0
- ccproxy/scheduler/tasks.py +484 -0
- ccproxy/services/__init__.py +10 -0
- ccproxy/services/claude_sdk_service.py +614 -0
- ccproxy/services/credentials/__init__.py +55 -0
- ccproxy/services/credentials/config.py +105 -0
- ccproxy/services/credentials/manager.py +562 -0
- ccproxy/services/credentials/oauth_client.py +482 -0
- ccproxy/services/proxy_service.py +1536 -0
- ccproxy/static/.keep +0 -0
- ccproxy/testing/__init__.py +34 -0
- ccproxy/testing/config.py +148 -0
- ccproxy/testing/content_generation.py +197 -0
- ccproxy/testing/mock_responses.py +262 -0
- ccproxy/testing/response_handlers.py +161 -0
- ccproxy/testing/scenarios.py +241 -0
- ccproxy/utils/__init__.py +6 -0
- ccproxy/utils/cost_calculator.py +210 -0
- ccproxy/utils/streaming_metrics.py +199 -0
- ccproxy_api-0.1.0.dist-info/METADATA +253 -0
- ccproxy_api-0.1.0.dist-info/RECORD +148 -0
- ccproxy_api-0.1.0.dist-info/WHEEL +4 -0
- ccproxy_api-0.1.0.dist-info/entry_points.txt +2 -0
- ccproxy_api-0.1.0.dist-info/licenses/LICENSE +21 -0
|
@@ -0,0 +1,331 @@
|
|
|
1
|
+
"""Message format converter for Claude SDK interactions."""
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
from typing import Any, cast
|
|
5
|
+
from xml.sax.saxutils import escape
|
|
6
|
+
|
|
7
|
+
import structlog
|
|
8
|
+
|
|
9
|
+
from ccproxy.core.async_utils import patched_typing
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
logger = structlog.get_logger(__name__)
|
|
13
|
+
|
|
14
|
+
with patched_typing():
|
|
15
|
+
from claude_code_sdk import (
|
|
16
|
+
AssistantMessage,
|
|
17
|
+
ResultMessage,
|
|
18
|
+
TextBlock,
|
|
19
|
+
ToolResultBlock,
|
|
20
|
+
ToolUseBlock,
|
|
21
|
+
)
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class MessageConverter:
|
|
25
|
+
"""
|
|
26
|
+
Handles conversion between Anthropic API format and Claude SDK format.
|
|
27
|
+
"""
|
|
28
|
+
|
|
29
|
+
@staticmethod
|
|
30
|
+
def format_messages_to_prompt(messages: list[dict[str, Any]]) -> str:
|
|
31
|
+
"""
|
|
32
|
+
Convert Anthropic messages format to a single prompt string.
|
|
33
|
+
|
|
34
|
+
Args:
|
|
35
|
+
messages: List of messages in Anthropic format
|
|
36
|
+
|
|
37
|
+
Returns:
|
|
38
|
+
Single prompt string formatted for Claude SDK
|
|
39
|
+
"""
|
|
40
|
+
prompt_parts = []
|
|
41
|
+
|
|
42
|
+
for message in messages:
|
|
43
|
+
role = message.get("role", "")
|
|
44
|
+
content = message.get("content", "")
|
|
45
|
+
|
|
46
|
+
if isinstance(content, list):
|
|
47
|
+
# Handle content blocks
|
|
48
|
+
text_parts = []
|
|
49
|
+
for block in content:
|
|
50
|
+
if block.get("type") == "text":
|
|
51
|
+
text_parts.append(block.get("text", ""))
|
|
52
|
+
content = " ".join(text_parts)
|
|
53
|
+
|
|
54
|
+
if role == "user":
|
|
55
|
+
prompt_parts.append(f"Human: {content}")
|
|
56
|
+
elif role == "assistant":
|
|
57
|
+
prompt_parts.append(f"Assistant: {content}")
|
|
58
|
+
elif role == "system":
|
|
59
|
+
# System messages are handled via options
|
|
60
|
+
continue
|
|
61
|
+
|
|
62
|
+
return "\n\n".join(prompt_parts)
|
|
63
|
+
|
|
64
|
+
@staticmethod
|
|
65
|
+
def extract_text_from_content(
|
|
66
|
+
content: TextBlock | ToolUseBlock | ToolResultBlock,
|
|
67
|
+
) -> str:
|
|
68
|
+
"""
|
|
69
|
+
Extract text content from Claude SDK content blocks.
|
|
70
|
+
|
|
71
|
+
Args:
|
|
72
|
+
content: List of content blocks from Claude SDK
|
|
73
|
+
|
|
74
|
+
Returns:
|
|
75
|
+
Extracted text content
|
|
76
|
+
"""
|
|
77
|
+
if isinstance(content, TextBlock):
|
|
78
|
+
return content.text
|
|
79
|
+
elif isinstance(content, ToolUseBlock):
|
|
80
|
+
# Return full XML representation of ToolUseBlock
|
|
81
|
+
tool_id = escape(str(getattr(content, "id", f"tool_{id(content)}")))
|
|
82
|
+
tool_name = escape(content.name)
|
|
83
|
+
tool_input = getattr(content, "input", {}) or {}
|
|
84
|
+
# Convert input dict to JSON string and escape for XML
|
|
85
|
+
input_json = escape(json.dumps(tool_input, ensure_ascii=False))
|
|
86
|
+
return f'<tooluseblock id="{tool_id}" name="{tool_name}">{input_json}</tooluseblock>'
|
|
87
|
+
elif isinstance(content, ToolResultBlock):
|
|
88
|
+
# Return full XML representation of ToolResultBlock
|
|
89
|
+
tool_use_id = escape(str(getattr(content, "tool_use_id", "")))
|
|
90
|
+
result_content = content.content if isinstance(content.content, str) else ""
|
|
91
|
+
escaped_content = escape(result_content)
|
|
92
|
+
return f'<toolresultblock tool_use_id="{tool_use_id}">{escaped_content}</toolresultblock>'
|
|
93
|
+
|
|
94
|
+
@staticmethod
|
|
95
|
+
def extract_contents(
|
|
96
|
+
contents: list[TextBlock | ToolUseBlock | ToolResultBlock],
|
|
97
|
+
) -> str:
|
|
98
|
+
"""
|
|
99
|
+
Extract content from Claude SDK blocks, preserving custom blocks.
|
|
100
|
+
|
|
101
|
+
Args:
|
|
102
|
+
content: List of content blocks from Claude SDK
|
|
103
|
+
|
|
104
|
+
Returns:
|
|
105
|
+
Content with thinking blocks preserved
|
|
106
|
+
"""
|
|
107
|
+
text_parts = []
|
|
108
|
+
|
|
109
|
+
for block in contents:
|
|
110
|
+
text_parts.append(MessageConverter.extract_text_from_content(block))
|
|
111
|
+
|
|
112
|
+
return " ".join(text_parts)
|
|
113
|
+
|
|
114
|
+
@staticmethod
|
|
115
|
+
def convert_to_anthropic_response(
|
|
116
|
+
assistant_message: AssistantMessage,
|
|
117
|
+
result_message: ResultMessage,
|
|
118
|
+
model: str,
|
|
119
|
+
) -> dict[str, Any]:
|
|
120
|
+
"""
|
|
121
|
+
Convert Claude SDK messages to Anthropic API response format.
|
|
122
|
+
|
|
123
|
+
Args:
|
|
124
|
+
assistant_message: The assistant message from Claude SDK
|
|
125
|
+
result_message: The result message from Claude SDK
|
|
126
|
+
model: The model name used
|
|
127
|
+
|
|
128
|
+
Returns:
|
|
129
|
+
Response in Anthropic API format
|
|
130
|
+
"""
|
|
131
|
+
# Extract token usage from result message
|
|
132
|
+
# First try to get usage from the usage field (preferred method)
|
|
133
|
+
usage = getattr(result_message, "usage", {})
|
|
134
|
+
if usage:
|
|
135
|
+
input_tokens = usage.get("input_tokens", 0)
|
|
136
|
+
output_tokens = usage.get("output_tokens", 0)
|
|
137
|
+
cache_read_tokens = usage.get("cache_read_input_tokens", 0)
|
|
138
|
+
cache_write_tokens = usage.get("cache_creation_input_tokens", 0)
|
|
139
|
+
else:
|
|
140
|
+
# Fallback to direct attributes
|
|
141
|
+
input_tokens = getattr(result_message, "input_tokens", 0)
|
|
142
|
+
output_tokens = getattr(result_message, "output_tokens", 0)
|
|
143
|
+
cache_read_tokens = getattr(result_message, "cache_read_tokens", 0)
|
|
144
|
+
cache_write_tokens = getattr(result_message, "cache_write_tokens", 0)
|
|
145
|
+
|
|
146
|
+
# Log token extraction for debugging
|
|
147
|
+
from structlog import get_logger
|
|
148
|
+
|
|
149
|
+
logger = get_logger(__name__)
|
|
150
|
+
|
|
151
|
+
logger.debug(
|
|
152
|
+
"assistant_message_content",
|
|
153
|
+
content_blocks=[
|
|
154
|
+
type(block).__name__ for block in assistant_message.content
|
|
155
|
+
],
|
|
156
|
+
content_count=len(assistant_message.content),
|
|
157
|
+
first_block_text=(
|
|
158
|
+
assistant_message.content[0].text[:100]
|
|
159
|
+
if assistant_message.content
|
|
160
|
+
and hasattr(assistant_message.content[0], "text")
|
|
161
|
+
else None
|
|
162
|
+
),
|
|
163
|
+
)
|
|
164
|
+
|
|
165
|
+
logger.debug(
|
|
166
|
+
"token_usage_extracted",
|
|
167
|
+
input_tokens=input_tokens,
|
|
168
|
+
output_tokens=output_tokens,
|
|
169
|
+
cache_read_tokens=cache_read_tokens,
|
|
170
|
+
cache_write_tokens=cache_write_tokens,
|
|
171
|
+
source="claude_sdk",
|
|
172
|
+
)
|
|
173
|
+
|
|
174
|
+
# Calculate total tokens
|
|
175
|
+
total_tokens = input_tokens + output_tokens
|
|
176
|
+
|
|
177
|
+
# Build usage information
|
|
178
|
+
usage_info = {
|
|
179
|
+
"input_tokens": input_tokens,
|
|
180
|
+
"output_tokens": output_tokens,
|
|
181
|
+
"cache_read_tokens": cache_read_tokens,
|
|
182
|
+
"cache_write_tokens": cache_write_tokens,
|
|
183
|
+
"total_tokens": total_tokens,
|
|
184
|
+
}
|
|
185
|
+
|
|
186
|
+
# Add cost information if available
|
|
187
|
+
total_cost_usd = getattr(result_message, "total_cost_usd", None)
|
|
188
|
+
if total_cost_usd is not None:
|
|
189
|
+
usage_info["cost_usd"] = total_cost_usd
|
|
190
|
+
|
|
191
|
+
# Convert content blocks to Anthropic format, preserving thinking blocks
|
|
192
|
+
content_blocks = []
|
|
193
|
+
|
|
194
|
+
for block in assistant_message.content:
|
|
195
|
+
if isinstance(block, TextBlock):
|
|
196
|
+
# Parse text content for thinking blocks
|
|
197
|
+
text = block.text
|
|
198
|
+
|
|
199
|
+
# Check if the text contains thinking blocks
|
|
200
|
+
import re
|
|
201
|
+
|
|
202
|
+
thinking_pattern = r'<thinking signature="([^"]*)">(.*?)</thinking>'
|
|
203
|
+
|
|
204
|
+
# Split the text by thinking blocks
|
|
205
|
+
last_end = 0
|
|
206
|
+
for match in re.finditer(thinking_pattern, text, re.DOTALL):
|
|
207
|
+
# Add any text before the thinking block
|
|
208
|
+
before_text = text[last_end : match.start()].strip()
|
|
209
|
+
if before_text:
|
|
210
|
+
content_blocks.append({"type": "text", "text": before_text})
|
|
211
|
+
|
|
212
|
+
# Add the thinking block
|
|
213
|
+
signature, thinking_text = match.groups()
|
|
214
|
+
content_blocks.append(
|
|
215
|
+
{
|
|
216
|
+
"type": "thinking",
|
|
217
|
+
"text": thinking_text,
|
|
218
|
+
"signature": signature,
|
|
219
|
+
}
|
|
220
|
+
)
|
|
221
|
+
|
|
222
|
+
last_end = match.end()
|
|
223
|
+
|
|
224
|
+
# Add any remaining text after the last thinking block
|
|
225
|
+
remaining_text = text[last_end:].strip()
|
|
226
|
+
if remaining_text:
|
|
227
|
+
content_blocks.append({"type": "text", "text": remaining_text})
|
|
228
|
+
|
|
229
|
+
# If no thinking blocks were found, add the entire text as a text block
|
|
230
|
+
if last_end == 0 and text:
|
|
231
|
+
content_blocks.append({"type": "text", "text": text})
|
|
232
|
+
|
|
233
|
+
elif isinstance(block, ToolUseBlock):
|
|
234
|
+
tool_input = getattr(block, "input", {}) or {}
|
|
235
|
+
content_blocks.append(
|
|
236
|
+
cast(
|
|
237
|
+
dict[str, Any],
|
|
238
|
+
{
|
|
239
|
+
"type": "tool_use",
|
|
240
|
+
"id": getattr(block, "id", f"tool_{id(block)}"),
|
|
241
|
+
"name": block.name,
|
|
242
|
+
"input": tool_input,
|
|
243
|
+
},
|
|
244
|
+
)
|
|
245
|
+
)
|
|
246
|
+
elif isinstance(block, ToolResultBlock):
|
|
247
|
+
content_blocks.append(
|
|
248
|
+
{
|
|
249
|
+
"type": "tool_result",
|
|
250
|
+
"tool_use_id": getattr(block, "tool_use_id", ""),
|
|
251
|
+
"content": block.content
|
|
252
|
+
if isinstance(block.content, str)
|
|
253
|
+
else "",
|
|
254
|
+
}
|
|
255
|
+
)
|
|
256
|
+
|
|
257
|
+
return {
|
|
258
|
+
"id": f"msg_{result_message.session_id}",
|
|
259
|
+
"type": "message",
|
|
260
|
+
"role": "assistant",
|
|
261
|
+
"content": content_blocks,
|
|
262
|
+
"model": model,
|
|
263
|
+
"stop_reason": getattr(result_message, "stop_reason", "end_turn"),
|
|
264
|
+
"stop_sequence": None,
|
|
265
|
+
"usage": usage_info,
|
|
266
|
+
}
|
|
267
|
+
|
|
268
|
+
@staticmethod
|
|
269
|
+
def create_streaming_start_chunk(message_id: str, model: str) -> dict[str, Any]:
|
|
270
|
+
"""
|
|
271
|
+
Create the initial streaming chunk for Anthropic API format.
|
|
272
|
+
|
|
273
|
+
Args:
|
|
274
|
+
message_id: The message ID
|
|
275
|
+
model: The model name
|
|
276
|
+
|
|
277
|
+
Returns:
|
|
278
|
+
Initial streaming chunk
|
|
279
|
+
"""
|
|
280
|
+
return {
|
|
281
|
+
"id": message_id,
|
|
282
|
+
"type": "message_start",
|
|
283
|
+
"message": {
|
|
284
|
+
"id": message_id,
|
|
285
|
+
"type": "message",
|
|
286
|
+
"role": "assistant",
|
|
287
|
+
"content": [],
|
|
288
|
+
"model": model,
|
|
289
|
+
"stop_reason": None,
|
|
290
|
+
"stop_sequence": None,
|
|
291
|
+
"usage": {
|
|
292
|
+
"input_tokens": 0,
|
|
293
|
+
"output_tokens": 0,
|
|
294
|
+
"total_tokens": 0,
|
|
295
|
+
},
|
|
296
|
+
},
|
|
297
|
+
}
|
|
298
|
+
|
|
299
|
+
@staticmethod
|
|
300
|
+
def create_streaming_delta_chunk(text: str) -> dict[str, Any]:
|
|
301
|
+
"""
|
|
302
|
+
Create a streaming delta chunk for Anthropic API format.
|
|
303
|
+
|
|
304
|
+
Args:
|
|
305
|
+
text: The text content to include
|
|
306
|
+
|
|
307
|
+
Returns:
|
|
308
|
+
Delta chunk
|
|
309
|
+
"""
|
|
310
|
+
return {
|
|
311
|
+
"type": "content_block_delta",
|
|
312
|
+
"index": 0,
|
|
313
|
+
"delta": {"type": "text_delta", "text": text},
|
|
314
|
+
}
|
|
315
|
+
|
|
316
|
+
@staticmethod
|
|
317
|
+
def create_streaming_end_chunk(stop_reason: str = "end_turn") -> dict[str, Any]:
|
|
318
|
+
"""
|
|
319
|
+
Create the final streaming chunk for Anthropic API format.
|
|
320
|
+
|
|
321
|
+
Args:
|
|
322
|
+
stop_reason: The reason for stopping
|
|
323
|
+
|
|
324
|
+
Returns:
|
|
325
|
+
Final streaming chunk
|
|
326
|
+
"""
|
|
327
|
+
return {
|
|
328
|
+
"type": "message_delta",
|
|
329
|
+
"delta": {"stop_reason": stop_reason},
|
|
330
|
+
"usage": {"output_tokens": 0},
|
|
331
|
+
}
|
|
@@ -0,0 +1,120 @@
|
|
|
1
|
+
"""Options handling for Claude SDK interactions."""
|
|
2
|
+
|
|
3
|
+
from typing import Any
|
|
4
|
+
|
|
5
|
+
from ccproxy.core.async_utils import patched_typing
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
with patched_typing():
|
|
9
|
+
from claude_code_sdk import ClaudeCodeOptions
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class OptionsHandler:
|
|
13
|
+
"""
|
|
14
|
+
Handles creation and management of Claude SDK options.
|
|
15
|
+
"""
|
|
16
|
+
|
|
17
|
+
@staticmethod
|
|
18
|
+
def create_options(
|
|
19
|
+
model: str,
|
|
20
|
+
temperature: float | None = None,
|
|
21
|
+
max_tokens: int | None = None,
|
|
22
|
+
system_message: str | None = None,
|
|
23
|
+
**kwargs: Any,
|
|
24
|
+
) -> ClaudeCodeOptions:
|
|
25
|
+
"""
|
|
26
|
+
Create Claude SDK options from API parameters.
|
|
27
|
+
|
|
28
|
+
Args:
|
|
29
|
+
model: The model name
|
|
30
|
+
temperature: Temperature for response generation
|
|
31
|
+
max_tokens: Maximum tokens in response
|
|
32
|
+
system_message: System message to include
|
|
33
|
+
**kwargs: Additional options
|
|
34
|
+
|
|
35
|
+
Returns:
|
|
36
|
+
Configured ClaudeCodeOptions instance
|
|
37
|
+
"""
|
|
38
|
+
options = ClaudeCodeOptions(model=model)
|
|
39
|
+
|
|
40
|
+
if temperature is not None:
|
|
41
|
+
options.temperature = temperature # type: ignore[attr-defined]
|
|
42
|
+
|
|
43
|
+
if max_tokens is not None:
|
|
44
|
+
options.max_tokens = max_tokens # type: ignore[attr-defined]
|
|
45
|
+
|
|
46
|
+
if system_message is not None:
|
|
47
|
+
options.system_prompt = system_message
|
|
48
|
+
|
|
49
|
+
# Handle other options as needed
|
|
50
|
+
for key, value in kwargs.items():
|
|
51
|
+
if hasattr(options, key):
|
|
52
|
+
setattr(options, key, value)
|
|
53
|
+
|
|
54
|
+
return options
|
|
55
|
+
|
|
56
|
+
@staticmethod
|
|
57
|
+
def extract_system_message(messages: list[dict[str, Any]]) -> str | None:
|
|
58
|
+
"""
|
|
59
|
+
Extract system message from Anthropic messages format.
|
|
60
|
+
|
|
61
|
+
Args:
|
|
62
|
+
messages: List of messages in Anthropic format
|
|
63
|
+
|
|
64
|
+
Returns:
|
|
65
|
+
System message content if found, None otherwise
|
|
66
|
+
"""
|
|
67
|
+
for message in messages:
|
|
68
|
+
if message.get("role") == "system":
|
|
69
|
+
content = message.get("content", "")
|
|
70
|
+
if isinstance(content, list):
|
|
71
|
+
# Handle content blocks
|
|
72
|
+
text_parts = []
|
|
73
|
+
for block in content:
|
|
74
|
+
if block.get("type") == "text":
|
|
75
|
+
text_parts.append(block.get("text", ""))
|
|
76
|
+
return " ".join(text_parts)
|
|
77
|
+
return str(content)
|
|
78
|
+
return None
|
|
79
|
+
|
|
80
|
+
@staticmethod
|
|
81
|
+
def get_supported_models() -> list[str]:
|
|
82
|
+
"""
|
|
83
|
+
Get list of supported Claude models.
|
|
84
|
+
|
|
85
|
+
Returns:
|
|
86
|
+
List of supported model names
|
|
87
|
+
"""
|
|
88
|
+
# Import here to avoid circular imports
|
|
89
|
+
from ccproxy.adapters.openai.adapter import OPENAI_TO_CLAUDE_MODEL_MAPPING
|
|
90
|
+
|
|
91
|
+
# Extract unique Claude models from OpenAI mapping
|
|
92
|
+
claude_models = list(set(OPENAI_TO_CLAUDE_MODEL_MAPPING.values()))
|
|
93
|
+
return sorted(claude_models)
|
|
94
|
+
|
|
95
|
+
@staticmethod
|
|
96
|
+
def validate_model(model: str) -> bool:
|
|
97
|
+
"""
|
|
98
|
+
Validate if a model is supported.
|
|
99
|
+
|
|
100
|
+
Args:
|
|
101
|
+
model: The model name to validate
|
|
102
|
+
|
|
103
|
+
Returns:
|
|
104
|
+
True if supported, False otherwise
|
|
105
|
+
"""
|
|
106
|
+
return model in OptionsHandler.get_supported_models()
|
|
107
|
+
|
|
108
|
+
@staticmethod
|
|
109
|
+
def get_default_options() -> dict[str, Any]:
|
|
110
|
+
"""
|
|
111
|
+
Get default options for Claude SDK.
|
|
112
|
+
|
|
113
|
+
Returns:
|
|
114
|
+
Dictionary of default options
|
|
115
|
+
"""
|
|
116
|
+
return {
|
|
117
|
+
"model": "claude-3-5-sonnet-20241022",
|
|
118
|
+
"temperature": 0.7,
|
|
119
|
+
"max_tokens": 4000,
|
|
120
|
+
}
|
ccproxy/cli/__init__.py
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
from .commands.serve import api, claude
|
|
2
|
+
from .helpers import get_rich_toolkit
|
|
3
|
+
from .main import app, app_main, main, version_callback
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
__all__ = [
|
|
7
|
+
"app",
|
|
8
|
+
"main",
|
|
9
|
+
"version_callback",
|
|
10
|
+
"api",
|
|
11
|
+
"claude",
|
|
12
|
+
"app_main",
|
|
13
|
+
"get_rich_toolkit",
|
|
14
|
+
]
|