ccproxy-api 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ccproxy/__init__.py +4 -0
- ccproxy/__main__.py +7 -0
- ccproxy/_version.py +21 -0
- ccproxy/adapters/__init__.py +11 -0
- ccproxy/adapters/base.py +80 -0
- ccproxy/adapters/openai/__init__.py +43 -0
- ccproxy/adapters/openai/adapter.py +915 -0
- ccproxy/adapters/openai/models.py +412 -0
- ccproxy/adapters/openai/streaming.py +449 -0
- ccproxy/api/__init__.py +28 -0
- ccproxy/api/app.py +225 -0
- ccproxy/api/dependencies.py +140 -0
- ccproxy/api/middleware/__init__.py +11 -0
- ccproxy/api/middleware/auth.py +0 -0
- ccproxy/api/middleware/cors.py +55 -0
- ccproxy/api/middleware/errors.py +703 -0
- ccproxy/api/middleware/headers.py +51 -0
- ccproxy/api/middleware/logging.py +175 -0
- ccproxy/api/middleware/request_id.py +69 -0
- ccproxy/api/middleware/server_header.py +62 -0
- ccproxy/api/responses.py +84 -0
- ccproxy/api/routes/__init__.py +16 -0
- ccproxy/api/routes/claude.py +181 -0
- ccproxy/api/routes/health.py +489 -0
- ccproxy/api/routes/metrics.py +1033 -0
- ccproxy/api/routes/proxy.py +238 -0
- ccproxy/auth/__init__.py +75 -0
- ccproxy/auth/bearer.py +68 -0
- ccproxy/auth/credentials_adapter.py +93 -0
- ccproxy/auth/dependencies.py +229 -0
- ccproxy/auth/exceptions.py +79 -0
- ccproxy/auth/manager.py +102 -0
- ccproxy/auth/models.py +118 -0
- ccproxy/auth/oauth/__init__.py +26 -0
- ccproxy/auth/oauth/models.py +49 -0
- ccproxy/auth/oauth/routes.py +396 -0
- ccproxy/auth/oauth/storage.py +0 -0
- ccproxy/auth/storage/__init__.py +12 -0
- ccproxy/auth/storage/base.py +57 -0
- ccproxy/auth/storage/json_file.py +159 -0
- ccproxy/auth/storage/keyring.py +192 -0
- ccproxy/claude_sdk/__init__.py +20 -0
- ccproxy/claude_sdk/client.py +169 -0
- ccproxy/claude_sdk/converter.py +331 -0
- ccproxy/claude_sdk/options.py +120 -0
- ccproxy/cli/__init__.py +14 -0
- ccproxy/cli/commands/__init__.py +8 -0
- ccproxy/cli/commands/auth.py +553 -0
- ccproxy/cli/commands/config/__init__.py +14 -0
- ccproxy/cli/commands/config/commands.py +766 -0
- ccproxy/cli/commands/config/schema_commands.py +119 -0
- ccproxy/cli/commands/serve.py +630 -0
- ccproxy/cli/docker/__init__.py +34 -0
- ccproxy/cli/docker/adapter_factory.py +157 -0
- ccproxy/cli/docker/params.py +278 -0
- ccproxy/cli/helpers.py +144 -0
- ccproxy/cli/main.py +193 -0
- ccproxy/cli/options/__init__.py +14 -0
- ccproxy/cli/options/claude_options.py +216 -0
- ccproxy/cli/options/core_options.py +40 -0
- ccproxy/cli/options/security_options.py +48 -0
- ccproxy/cli/options/server_options.py +117 -0
- ccproxy/config/__init__.py +40 -0
- ccproxy/config/auth.py +154 -0
- ccproxy/config/claude.py +124 -0
- ccproxy/config/cors.py +79 -0
- ccproxy/config/discovery.py +87 -0
- ccproxy/config/docker_settings.py +265 -0
- ccproxy/config/loader.py +108 -0
- ccproxy/config/observability.py +158 -0
- ccproxy/config/pricing.py +88 -0
- ccproxy/config/reverse_proxy.py +31 -0
- ccproxy/config/scheduler.py +89 -0
- ccproxy/config/security.py +14 -0
- ccproxy/config/server.py +81 -0
- ccproxy/config/settings.py +534 -0
- ccproxy/config/validators.py +231 -0
- ccproxy/core/__init__.py +274 -0
- ccproxy/core/async_utils.py +675 -0
- ccproxy/core/constants.py +97 -0
- ccproxy/core/errors.py +256 -0
- ccproxy/core/http.py +328 -0
- ccproxy/core/http_transformers.py +428 -0
- ccproxy/core/interfaces.py +247 -0
- ccproxy/core/logging.py +189 -0
- ccproxy/core/middleware.py +114 -0
- ccproxy/core/proxy.py +143 -0
- ccproxy/core/system.py +38 -0
- ccproxy/core/transformers.py +259 -0
- ccproxy/core/types.py +129 -0
- ccproxy/core/validators.py +288 -0
- ccproxy/docker/__init__.py +67 -0
- ccproxy/docker/adapter.py +588 -0
- ccproxy/docker/docker_path.py +207 -0
- ccproxy/docker/middleware.py +103 -0
- ccproxy/docker/models.py +228 -0
- ccproxy/docker/protocol.py +192 -0
- ccproxy/docker/stream_process.py +264 -0
- ccproxy/docker/validators.py +173 -0
- ccproxy/models/__init__.py +123 -0
- ccproxy/models/errors.py +42 -0
- ccproxy/models/messages.py +243 -0
- ccproxy/models/requests.py +85 -0
- ccproxy/models/responses.py +227 -0
- ccproxy/models/types.py +102 -0
- ccproxy/observability/__init__.py +51 -0
- ccproxy/observability/access_logger.py +400 -0
- ccproxy/observability/context.py +447 -0
- ccproxy/observability/metrics.py +539 -0
- ccproxy/observability/pushgateway.py +366 -0
- ccproxy/observability/sse_events.py +303 -0
- ccproxy/observability/stats_printer.py +755 -0
- ccproxy/observability/storage/__init__.py +1 -0
- ccproxy/observability/storage/duckdb_simple.py +665 -0
- ccproxy/observability/storage/models.py +55 -0
- ccproxy/pricing/__init__.py +19 -0
- ccproxy/pricing/cache.py +212 -0
- ccproxy/pricing/loader.py +267 -0
- ccproxy/pricing/models.py +106 -0
- ccproxy/pricing/updater.py +309 -0
- ccproxy/scheduler/__init__.py +39 -0
- ccproxy/scheduler/core.py +335 -0
- ccproxy/scheduler/exceptions.py +34 -0
- ccproxy/scheduler/manager.py +186 -0
- ccproxy/scheduler/registry.py +150 -0
- ccproxy/scheduler/tasks.py +484 -0
- ccproxy/services/__init__.py +10 -0
- ccproxy/services/claude_sdk_service.py +614 -0
- ccproxy/services/credentials/__init__.py +55 -0
- ccproxy/services/credentials/config.py +105 -0
- ccproxy/services/credentials/manager.py +562 -0
- ccproxy/services/credentials/oauth_client.py +482 -0
- ccproxy/services/proxy_service.py +1536 -0
- ccproxy/static/.keep +0 -0
- ccproxy/testing/__init__.py +34 -0
- ccproxy/testing/config.py +148 -0
- ccproxy/testing/content_generation.py +197 -0
- ccproxy/testing/mock_responses.py +262 -0
- ccproxy/testing/response_handlers.py +161 -0
- ccproxy/testing/scenarios.py +241 -0
- ccproxy/utils/__init__.py +6 -0
- ccproxy/utils/cost_calculator.py +210 -0
- ccproxy/utils/streaming_metrics.py +199 -0
- ccproxy_api-0.1.0.dist-info/METADATA +253 -0
- ccproxy_api-0.1.0.dist-info/RECORD +148 -0
- ccproxy_api-0.1.0.dist-info/WHEEL +4 -0
- ccproxy_api-0.1.0.dist-info/entry_points.txt +2 -0
- ccproxy_api-0.1.0.dist-info/licenses/LICENSE +21 -0
|
@@ -0,0 +1,123 @@
|
|
|
1
|
+
"""Pydantic models for Claude Proxy API Server."""
|
|
2
|
+
|
|
3
|
+
from .messages import (
|
|
4
|
+
MessageContentBlock,
|
|
5
|
+
MessageCreateParams,
|
|
6
|
+
MessageResponse,
|
|
7
|
+
MetadataParams,
|
|
8
|
+
SystemMessage,
|
|
9
|
+
ThinkingConfig,
|
|
10
|
+
ToolChoiceParams,
|
|
11
|
+
)
|
|
12
|
+
from .requests import (
|
|
13
|
+
ImageContent,
|
|
14
|
+
Message,
|
|
15
|
+
MessageContent,
|
|
16
|
+
TextContent,
|
|
17
|
+
ToolDefinition,
|
|
18
|
+
Usage,
|
|
19
|
+
)
|
|
20
|
+
from .responses import (
|
|
21
|
+
APIError,
|
|
22
|
+
AuthenticationError,
|
|
23
|
+
ChatCompletionResponse,
|
|
24
|
+
Choice,
|
|
25
|
+
ErrorResponse,
|
|
26
|
+
InternalServerError,
|
|
27
|
+
InvalidRequestError,
|
|
28
|
+
NotFoundError,
|
|
29
|
+
OverloadedError,
|
|
30
|
+
RateLimitError,
|
|
31
|
+
ResponseContent,
|
|
32
|
+
StreamingChatCompletionResponse,
|
|
33
|
+
StreamingChoice,
|
|
34
|
+
TextResponse,
|
|
35
|
+
ToolCall,
|
|
36
|
+
ToolUse,
|
|
37
|
+
)
|
|
38
|
+
from .types import (
|
|
39
|
+
ContentBlockType,
|
|
40
|
+
ErrorType,
|
|
41
|
+
ImageSourceType,
|
|
42
|
+
MessageRole,
|
|
43
|
+
ModalityType,
|
|
44
|
+
OpenAIFinishReason,
|
|
45
|
+
PermissionBehavior,
|
|
46
|
+
ResponseFormatType,
|
|
47
|
+
ServiceTier,
|
|
48
|
+
StopReason,
|
|
49
|
+
StreamEventType,
|
|
50
|
+
ToolChoiceType,
|
|
51
|
+
ToolType,
|
|
52
|
+
)
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
__all__ = [
|
|
56
|
+
# Type aliases
|
|
57
|
+
"ContentBlockType",
|
|
58
|
+
"ErrorType",
|
|
59
|
+
"ImageSourceType",
|
|
60
|
+
"MessageRole",
|
|
61
|
+
"ModalityType",
|
|
62
|
+
"OpenAIFinishReason",
|
|
63
|
+
"PermissionBehavior",
|
|
64
|
+
"ResponseFormatType",
|
|
65
|
+
"ServiceTier",
|
|
66
|
+
"StopReason",
|
|
67
|
+
"StreamEventType",
|
|
68
|
+
"ToolChoiceType",
|
|
69
|
+
"ToolType",
|
|
70
|
+
# Message models
|
|
71
|
+
"MessageContentBlock",
|
|
72
|
+
"MessageCreateParams",
|
|
73
|
+
"MessageResponse",
|
|
74
|
+
"MetadataParams",
|
|
75
|
+
"SystemMessage",
|
|
76
|
+
"ThinkingConfig",
|
|
77
|
+
"ToolChoiceParams",
|
|
78
|
+
# Request models
|
|
79
|
+
"ImageContent",
|
|
80
|
+
"Message",
|
|
81
|
+
"MessageContent",
|
|
82
|
+
"TextContent",
|
|
83
|
+
"ToolDefinition",
|
|
84
|
+
"Usage",
|
|
85
|
+
# Response models
|
|
86
|
+
"APIError",
|
|
87
|
+
"AuthenticationError",
|
|
88
|
+
"ChatCompletionResponse",
|
|
89
|
+
"Choice",
|
|
90
|
+
"ErrorResponse",
|
|
91
|
+
"InternalServerError",
|
|
92
|
+
"InvalidRequestError",
|
|
93
|
+
"NotFoundError",
|
|
94
|
+
"OverloadedError",
|
|
95
|
+
"RateLimitError",
|
|
96
|
+
"ResponseContent",
|
|
97
|
+
"StreamingChatCompletionResponse",
|
|
98
|
+
"StreamingChoice",
|
|
99
|
+
"TextResponse",
|
|
100
|
+
"ToolCall",
|
|
101
|
+
"ToolUse",
|
|
102
|
+
# OpenAI-compatible models
|
|
103
|
+
"OpenAIChatCompletionRequest",
|
|
104
|
+
"OpenAIChatCompletionResponse",
|
|
105
|
+
"OpenAIChoice",
|
|
106
|
+
"OpenAIErrorDetail",
|
|
107
|
+
"OpenAIErrorResponse",
|
|
108
|
+
"OpenAIFunction",
|
|
109
|
+
"OpenAILogprobs",
|
|
110
|
+
"OpenAIMessage",
|
|
111
|
+
"OpenAIMessageContent",
|
|
112
|
+
"OpenAIModelInfo",
|
|
113
|
+
"OpenAIModelsResponse",
|
|
114
|
+
"OpenAIResponseFormat",
|
|
115
|
+
"OpenAIResponseMessage",
|
|
116
|
+
"OpenAIStreamingChatCompletionResponse",
|
|
117
|
+
"OpenAIStreamingChoice",
|
|
118
|
+
"OpenAIStreamOptions",
|
|
119
|
+
"OpenAITool",
|
|
120
|
+
"OpenAIToolCall",
|
|
121
|
+
"OpenAIToolChoice",
|
|
122
|
+
"OpenAIUsage",
|
|
123
|
+
]
|
ccproxy/models/errors.py
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
1
|
+
"""Error response models for Anthropic API compatibility."""
|
|
2
|
+
|
|
3
|
+
from typing import Annotated, Any, Literal
|
|
4
|
+
|
|
5
|
+
from pydantic import BaseModel, Field
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class ErrorDetail(BaseModel):
|
|
9
|
+
"""Error detail information."""
|
|
10
|
+
|
|
11
|
+
type: Annotated[str, Field(description="Error type identifier")]
|
|
12
|
+
message: Annotated[str, Field(description="Human-readable error message")]
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class AnthropicError(BaseModel):
|
|
16
|
+
"""Anthropic API error response format."""
|
|
17
|
+
|
|
18
|
+
type: Annotated[Literal["error"], Field(description="Error type")] = "error"
|
|
19
|
+
error: Annotated[ErrorDetail, Field(description="Error details")]
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
# Note: Specific error model classes were removed as they were unused.
|
|
23
|
+
# Error responses are now forwarded directly from the upstream Claude API
|
|
24
|
+
# to preserve the exact error format and headers.
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
def create_error_response(
|
|
28
|
+
error_type: str, message: str, status_code: int = 500
|
|
29
|
+
) -> tuple[dict[str, Any], int]:
|
|
30
|
+
"""
|
|
31
|
+
Create a standardized error response.
|
|
32
|
+
|
|
33
|
+
Args:
|
|
34
|
+
error_type: Type of error (e.g., "invalid_request_error")
|
|
35
|
+
message: Human-readable error message
|
|
36
|
+
status_code: HTTP status code
|
|
37
|
+
|
|
38
|
+
Returns:
|
|
39
|
+
Tuple of (error_dict, status_code)
|
|
40
|
+
"""
|
|
41
|
+
error_response = AnthropicError(error=ErrorDetail(type=error_type, message=message))
|
|
42
|
+
return error_response.model_dump(), status_code
|
|
@@ -0,0 +1,243 @@
|
|
|
1
|
+
"""Message models for Anthropic Messages API endpoint."""
|
|
2
|
+
|
|
3
|
+
from typing import Annotated, Any, Literal
|
|
4
|
+
|
|
5
|
+
from pydantic import BaseModel, ConfigDict, Field, field_validator
|
|
6
|
+
|
|
7
|
+
from .requests import Message, ToolDefinition, Usage
|
|
8
|
+
from .types import ContentBlockType, ServiceTier, StopReason, ToolChoiceType
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class SystemMessage(BaseModel):
|
|
12
|
+
"""System message content block."""
|
|
13
|
+
|
|
14
|
+
type: Annotated[Literal["text"], Field(description="Content type")] = "text"
|
|
15
|
+
text: Annotated[str, Field(description="System message text")]
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class ThinkingConfig(BaseModel):
|
|
19
|
+
"""Configuration for extended thinking process."""
|
|
20
|
+
|
|
21
|
+
type: Annotated[Literal["enabled"], Field(description="Enable thinking mode")] = (
|
|
22
|
+
"enabled"
|
|
23
|
+
)
|
|
24
|
+
budget_tokens: Annotated[
|
|
25
|
+
int, Field(description="Token budget for thinking process", ge=1024)
|
|
26
|
+
]
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
class MetadataParams(BaseModel):
|
|
30
|
+
"""Metadata about the request."""
|
|
31
|
+
|
|
32
|
+
user_id: Annotated[
|
|
33
|
+
str | None,
|
|
34
|
+
Field(description="External identifier for the user", max_length=256),
|
|
35
|
+
] = None
|
|
36
|
+
|
|
37
|
+
class Config:
|
|
38
|
+
extra = "allow" # Allow additional fields in metadata
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
class ToolChoiceParams(BaseModel):
|
|
42
|
+
"""Tool choice configuration."""
|
|
43
|
+
|
|
44
|
+
type: Annotated[ToolChoiceType, Field(description="How the model should use tools")]
|
|
45
|
+
name: Annotated[
|
|
46
|
+
str | None, Field(description="Specific tool name (when type is 'tool')")
|
|
47
|
+
] = None
|
|
48
|
+
disable_parallel_tool_use: Annotated[
|
|
49
|
+
bool, Field(description="Disable parallel tool use")
|
|
50
|
+
] = False
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
class MessageCreateParams(BaseModel):
|
|
54
|
+
"""Request parameters for creating messages via Anthropic Messages API."""
|
|
55
|
+
|
|
56
|
+
# Required fields
|
|
57
|
+
model: Annotated[
|
|
58
|
+
str,
|
|
59
|
+
Field(
|
|
60
|
+
description="The model to use for the message",
|
|
61
|
+
pattern=r"^claude-.*",
|
|
62
|
+
),
|
|
63
|
+
]
|
|
64
|
+
messages: Annotated[
|
|
65
|
+
list[Message],
|
|
66
|
+
Field(
|
|
67
|
+
description="Array of messages in the conversation",
|
|
68
|
+
min_length=1,
|
|
69
|
+
),
|
|
70
|
+
]
|
|
71
|
+
max_tokens: Annotated[
|
|
72
|
+
int,
|
|
73
|
+
Field(
|
|
74
|
+
description="Maximum number of tokens to generate",
|
|
75
|
+
ge=1,
|
|
76
|
+
le=200000,
|
|
77
|
+
),
|
|
78
|
+
]
|
|
79
|
+
|
|
80
|
+
# Optional Anthropic API fields
|
|
81
|
+
system: Annotated[
|
|
82
|
+
str | list[SystemMessage] | None,
|
|
83
|
+
Field(description="System prompt to provide context and instructions"),
|
|
84
|
+
] = None
|
|
85
|
+
temperature: Annotated[
|
|
86
|
+
float | None,
|
|
87
|
+
Field(
|
|
88
|
+
description="Sampling temperature between 0.0 and 1.0",
|
|
89
|
+
ge=0.0,
|
|
90
|
+
le=1.0,
|
|
91
|
+
),
|
|
92
|
+
] = None
|
|
93
|
+
top_p: Annotated[
|
|
94
|
+
float | None,
|
|
95
|
+
Field(
|
|
96
|
+
description="Nucleus sampling parameter",
|
|
97
|
+
ge=0.0,
|
|
98
|
+
le=1.0,
|
|
99
|
+
),
|
|
100
|
+
] = None
|
|
101
|
+
top_k: Annotated[
|
|
102
|
+
int | None,
|
|
103
|
+
Field(
|
|
104
|
+
description="Top-k sampling parameter",
|
|
105
|
+
ge=0,
|
|
106
|
+
),
|
|
107
|
+
] = None
|
|
108
|
+
stop_sequences: Annotated[
|
|
109
|
+
list[str] | None,
|
|
110
|
+
Field(
|
|
111
|
+
description="Custom sequences where the model should stop generating",
|
|
112
|
+
max_length=4,
|
|
113
|
+
),
|
|
114
|
+
] = None
|
|
115
|
+
stream: Annotated[
|
|
116
|
+
bool | None,
|
|
117
|
+
Field(description="Whether to stream the response"),
|
|
118
|
+
] = False
|
|
119
|
+
metadata: Annotated[
|
|
120
|
+
MetadataParams | None,
|
|
121
|
+
Field(description="Metadata about the request, including optional user_id"),
|
|
122
|
+
] = None
|
|
123
|
+
tools: Annotated[
|
|
124
|
+
list[ToolDefinition] | None,
|
|
125
|
+
Field(description="Available tools/functions for the model to use"),
|
|
126
|
+
] = None
|
|
127
|
+
tool_choice: Annotated[
|
|
128
|
+
ToolChoiceParams | None,
|
|
129
|
+
Field(description="How the model should use the provided tools"),
|
|
130
|
+
] = None
|
|
131
|
+
service_tier: Annotated[
|
|
132
|
+
ServiceTier | None,
|
|
133
|
+
Field(description="Request priority level"),
|
|
134
|
+
] = None
|
|
135
|
+
thinking: Annotated[
|
|
136
|
+
ThinkingConfig | None,
|
|
137
|
+
Field(description="Configuration for extended thinking process"),
|
|
138
|
+
] = None
|
|
139
|
+
|
|
140
|
+
@field_validator("model")
|
|
141
|
+
@classmethod
|
|
142
|
+
def validate_model(cls, v: str) -> str:
|
|
143
|
+
"""Validate that the model is a supported Claude model."""
|
|
144
|
+
supported_models = {
|
|
145
|
+
"claude-opus-4-20250514",
|
|
146
|
+
"claude-sonnet-4-20250514",
|
|
147
|
+
"claude-3-7-sonnet-20250219",
|
|
148
|
+
"claude-3-5-sonnet-20241022",
|
|
149
|
+
"claude-3-5-sonnet-20240620",
|
|
150
|
+
"claude-3-5-haiku-20241022",
|
|
151
|
+
"claude-3-opus-20240229",
|
|
152
|
+
"claude-3-sonnet-20240229",
|
|
153
|
+
"claude-3-haiku-20240307",
|
|
154
|
+
"claude-3-5-sonnet",
|
|
155
|
+
"claude-3-5-haiku",
|
|
156
|
+
"claude-3-opus",
|
|
157
|
+
"claude-3-sonnet",
|
|
158
|
+
"claude-3-haiku",
|
|
159
|
+
}
|
|
160
|
+
|
|
161
|
+
if v not in supported_models and not v.startswith("claude-"):
|
|
162
|
+
raise ValueError(f"Model {v} is not supported")
|
|
163
|
+
|
|
164
|
+
return v
|
|
165
|
+
|
|
166
|
+
@field_validator("messages")
|
|
167
|
+
@classmethod
|
|
168
|
+
def validate_messages(cls, v: list[Message]) -> list[Message]:
|
|
169
|
+
"""Validate message alternation and content."""
|
|
170
|
+
if not v:
|
|
171
|
+
raise ValueError("At least one message is required")
|
|
172
|
+
|
|
173
|
+
# First message must be from user
|
|
174
|
+
if v[0].role != "user":
|
|
175
|
+
raise ValueError("First message must be from user")
|
|
176
|
+
|
|
177
|
+
# Check for proper alternation
|
|
178
|
+
for i in range(1, len(v)):
|
|
179
|
+
if v[i].role == v[i - 1].role:
|
|
180
|
+
raise ValueError("Messages must alternate between user and assistant")
|
|
181
|
+
|
|
182
|
+
return v
|
|
183
|
+
|
|
184
|
+
@field_validator("stop_sequences")
|
|
185
|
+
@classmethod
|
|
186
|
+
def validate_stop_sequences(cls, v: list[str] | None) -> list[str] | None:
|
|
187
|
+
"""Validate stop sequences."""
|
|
188
|
+
if v is not None:
|
|
189
|
+
if len(v) > 4:
|
|
190
|
+
raise ValueError("Maximum 4 stop sequences allowed")
|
|
191
|
+
for seq in v:
|
|
192
|
+
if len(seq) > 100:
|
|
193
|
+
raise ValueError("Stop sequences must be 100 characters or less")
|
|
194
|
+
return v
|
|
195
|
+
|
|
196
|
+
model_config = ConfigDict(extra="forbid", validate_assignment=True)
|
|
197
|
+
|
|
198
|
+
|
|
199
|
+
class MessageContentBlock(BaseModel):
|
|
200
|
+
"""Content block in a message response."""
|
|
201
|
+
|
|
202
|
+
type: Annotated[ContentBlockType, Field(description="Type of content block")]
|
|
203
|
+
text: Annotated[
|
|
204
|
+
str | None, Field(description="Text content (for text/thinking blocks)")
|
|
205
|
+
] = None
|
|
206
|
+
id: Annotated[str | None, Field(description="Unique ID (for tool_use blocks)")] = (
|
|
207
|
+
None
|
|
208
|
+
)
|
|
209
|
+
name: Annotated[
|
|
210
|
+
str | None, Field(description="Tool name (for tool_use blocks)")
|
|
211
|
+
] = None
|
|
212
|
+
input: Annotated[
|
|
213
|
+
dict[str, Any] | None, Field(description="Tool input (for tool_use blocks)")
|
|
214
|
+
] = None
|
|
215
|
+
|
|
216
|
+
|
|
217
|
+
class MessageResponse(BaseModel):
|
|
218
|
+
"""Response model for Anthropic Messages API endpoint."""
|
|
219
|
+
|
|
220
|
+
id: Annotated[str, Field(description="Unique identifier for the message")]
|
|
221
|
+
type: Annotated[Literal["message"], Field(description="Response type")] = "message"
|
|
222
|
+
role: Annotated[Literal["assistant"], Field(description="Message role")] = (
|
|
223
|
+
"assistant"
|
|
224
|
+
)
|
|
225
|
+
content: Annotated[
|
|
226
|
+
list[MessageContentBlock],
|
|
227
|
+
Field(description="Array of content blocks in the response"),
|
|
228
|
+
]
|
|
229
|
+
model: Annotated[str, Field(description="The model used for the response")]
|
|
230
|
+
stop_reason: Annotated[
|
|
231
|
+
StopReason | None, Field(description="Reason why the model stopped generating")
|
|
232
|
+
] = None
|
|
233
|
+
stop_sequence: Annotated[
|
|
234
|
+
str | None,
|
|
235
|
+
Field(description="The stop sequence that triggered stopping (if applicable)"),
|
|
236
|
+
] = None
|
|
237
|
+
usage: Annotated[Usage, Field(description="Token usage information")]
|
|
238
|
+
container: Annotated[
|
|
239
|
+
dict[str, Any] | None,
|
|
240
|
+
Field(description="Information about container used in the request"),
|
|
241
|
+
] = None
|
|
242
|
+
|
|
243
|
+
model_config = ConfigDict(extra="forbid", validate_assignment=True)
|
|
@@ -0,0 +1,85 @@
|
|
|
1
|
+
"""Request models for Claude Proxy API Server compatible with Anthropic's API format."""
|
|
2
|
+
|
|
3
|
+
from typing import Annotated, Any, Literal
|
|
4
|
+
|
|
5
|
+
from pydantic import BaseModel, ConfigDict, Field, field_validator, validator
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class ImageSource(BaseModel):
|
|
9
|
+
"""Image source data."""
|
|
10
|
+
|
|
11
|
+
type: Annotated[Literal["base64", "url"], Field(description="Source type")]
|
|
12
|
+
media_type: Annotated[
|
|
13
|
+
str, Field(description="Media type (e.g., image/jpeg, image/png)")
|
|
14
|
+
]
|
|
15
|
+
data: Annotated[str | None, Field(description="Base64 encoded image data")] = None
|
|
16
|
+
url: Annotated[str | None, Field(description="Image URL")] = None
|
|
17
|
+
|
|
18
|
+
model_config = ConfigDict(extra="forbid")
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class ImageContent(BaseModel):
|
|
22
|
+
"""Image content block for multimodal messages."""
|
|
23
|
+
|
|
24
|
+
type: Annotated[Literal["image"], Field(description="Content type")] = "image"
|
|
25
|
+
source: Annotated[
|
|
26
|
+
ImageSource,
|
|
27
|
+
Field(description="Image source data with type (base64 or url) and media_type"),
|
|
28
|
+
]
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
class TextContent(BaseModel):
|
|
32
|
+
"""Text content block for messages."""
|
|
33
|
+
|
|
34
|
+
type: Annotated[Literal["text"], Field(description="Content type")] = "text"
|
|
35
|
+
text: Annotated[str, Field(description="The text content")]
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
MessageContent = TextContent | ImageContent | str
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
class Message(BaseModel):
|
|
42
|
+
"""Individual message in the conversation."""
|
|
43
|
+
|
|
44
|
+
role: Annotated[
|
|
45
|
+
Literal["user", "assistant"],
|
|
46
|
+
Field(description="The role of the message sender"),
|
|
47
|
+
]
|
|
48
|
+
content: Annotated[
|
|
49
|
+
str | list[MessageContent], Field(description="The content of the message")
|
|
50
|
+
]
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
class FunctionDefinition(BaseModel):
|
|
54
|
+
"""Function definition for tool calling."""
|
|
55
|
+
|
|
56
|
+
name: Annotated[str, Field(description="Function name")]
|
|
57
|
+
description: Annotated[str, Field(description="Function description")]
|
|
58
|
+
parameters: Annotated[
|
|
59
|
+
dict[str, Any], Field(description="JSON Schema for function parameters")
|
|
60
|
+
]
|
|
61
|
+
|
|
62
|
+
model_config = ConfigDict(extra="forbid")
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
class ToolDefinition(BaseModel):
|
|
66
|
+
"""Tool definition for function calling."""
|
|
67
|
+
|
|
68
|
+
type: Annotated[Literal["function"], Field(description="Tool type")] = "function"
|
|
69
|
+
function: Annotated[
|
|
70
|
+
FunctionDefinition,
|
|
71
|
+
Field(description="Function definition with name, description, and parameters"),
|
|
72
|
+
]
|
|
73
|
+
|
|
74
|
+
|
|
75
|
+
class Usage(BaseModel):
|
|
76
|
+
"""Token usage information."""
|
|
77
|
+
|
|
78
|
+
input_tokens: Annotated[int, Field(description="Number of input tokens")] = 0
|
|
79
|
+
output_tokens: Annotated[int, Field(description="Number of output tokens")] = 0
|
|
80
|
+
cache_creation_input_tokens: Annotated[
|
|
81
|
+
int | None, Field(description="Number of tokens used for cache creation")
|
|
82
|
+
] = None
|
|
83
|
+
cache_read_input_tokens: Annotated[
|
|
84
|
+
int | None, Field(description="Number of tokens read from cache")
|
|
85
|
+
] = None
|