vectorvein 0.2.11__tar.gz → 0.2.13__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {vectorvein-0.2.11 → vectorvein-0.2.13}/PKG-INFO +1 -1
- {vectorvein-0.2.11 → vectorvein-0.2.13}/pyproject.toml +1 -1
- {vectorvein-0.2.11 → vectorvein-0.2.13}/src/vectorvein/chat_clients/openai_compatible_client.py +2 -2
- {vectorvein-0.2.11 → vectorvein-0.2.13}/src/vectorvein/settings/__init__.py +72 -13
- {vectorvein-0.2.11 → vectorvein-0.2.13}/src/vectorvein/types/llm_parameters.py +44 -0
- vectorvein-0.2.13/src/vectorvein/types/settings.py +130 -0
- vectorvein-0.2.11/src/vectorvein/types/settings.py +0 -105
- {vectorvein-0.2.11 → vectorvein-0.2.13}/README.md +0 -0
- {vectorvein-0.2.11 → vectorvein-0.2.13}/src/vectorvein/__init__.py +0 -0
- {vectorvein-0.2.11 → vectorvein-0.2.13}/src/vectorvein/api/__init__.py +0 -0
- {vectorvein-0.2.11 → vectorvein-0.2.13}/src/vectorvein/api/client.py +0 -0
- {vectorvein-0.2.11 → vectorvein-0.2.13}/src/vectorvein/api/exceptions.py +0 -0
- {vectorvein-0.2.11 → vectorvein-0.2.13}/src/vectorvein/api/models.py +0 -0
- {vectorvein-0.2.11 → vectorvein-0.2.13}/src/vectorvein/chat_clients/__init__.py +0 -0
- {vectorvein-0.2.11 → vectorvein-0.2.13}/src/vectorvein/chat_clients/anthropic_client.py +0 -0
- {vectorvein-0.2.11 → vectorvein-0.2.13}/src/vectorvein/chat_clients/baichuan_client.py +0 -0
- {vectorvein-0.2.11 → vectorvein-0.2.13}/src/vectorvein/chat_clients/base_client.py +0 -0
- {vectorvein-0.2.11 → vectorvein-0.2.13}/src/vectorvein/chat_clients/deepseek_client.py +0 -0
- {vectorvein-0.2.11 → vectorvein-0.2.13}/src/vectorvein/chat_clients/gemini_client.py +0 -0
- {vectorvein-0.2.11 → vectorvein-0.2.13}/src/vectorvein/chat_clients/groq_client.py +0 -0
- {vectorvein-0.2.11 → vectorvein-0.2.13}/src/vectorvein/chat_clients/local_client.py +0 -0
- {vectorvein-0.2.11 → vectorvein-0.2.13}/src/vectorvein/chat_clients/minimax_client.py +0 -0
- {vectorvein-0.2.11 → vectorvein-0.2.13}/src/vectorvein/chat_clients/mistral_client.py +0 -0
- {vectorvein-0.2.11 → vectorvein-0.2.13}/src/vectorvein/chat_clients/moonshot_client.py +0 -0
- {vectorvein-0.2.11 → vectorvein-0.2.13}/src/vectorvein/chat_clients/openai_client.py +0 -0
- {vectorvein-0.2.11 → vectorvein-0.2.13}/src/vectorvein/chat_clients/py.typed +0 -0
- {vectorvein-0.2.11 → vectorvein-0.2.13}/src/vectorvein/chat_clients/qwen_client.py +0 -0
- {vectorvein-0.2.11 → vectorvein-0.2.13}/src/vectorvein/chat_clients/stepfun_client.py +0 -0
- {vectorvein-0.2.11 → vectorvein-0.2.13}/src/vectorvein/chat_clients/utils.py +0 -0
- {vectorvein-0.2.11 → vectorvein-0.2.13}/src/vectorvein/chat_clients/xai_client.py +0 -0
- {vectorvein-0.2.11 → vectorvein-0.2.13}/src/vectorvein/chat_clients/yi_client.py +0 -0
- {vectorvein-0.2.11 → vectorvein-0.2.13}/src/vectorvein/chat_clients/zhipuai_client.py +0 -0
- {vectorvein-0.2.11 → vectorvein-0.2.13}/src/vectorvein/py.typed +0 -0
- {vectorvein-0.2.11 → vectorvein-0.2.13}/src/vectorvein/server/token_server.py +0 -0
- {vectorvein-0.2.11 → vectorvein-0.2.13}/src/vectorvein/settings/py.typed +0 -0
- {vectorvein-0.2.11 → vectorvein-0.2.13}/src/vectorvein/types/__init__.py +0 -0
- {vectorvein-0.2.11 → vectorvein-0.2.13}/src/vectorvein/types/defaults.py +0 -0
- {vectorvein-0.2.11 → vectorvein-0.2.13}/src/vectorvein/types/enums.py +0 -0
- {vectorvein-0.2.11 → vectorvein-0.2.13}/src/vectorvein/types/exception.py +0 -0
- {vectorvein-0.2.11 → vectorvein-0.2.13}/src/vectorvein/types/py.typed +0 -0
- {vectorvein-0.2.11 → vectorvein-0.2.13}/src/vectorvein/utilities/media_processing.py +0 -0
- {vectorvein-0.2.11 → vectorvein-0.2.13}/src/vectorvein/utilities/rate_limiter.py +0 -0
- {vectorvein-0.2.11 → vectorvein-0.2.13}/src/vectorvein/utilities/retry.py +0 -0
- {vectorvein-0.2.11 → vectorvein-0.2.13}/src/vectorvein/workflow/graph/edge.py +0 -0
- {vectorvein-0.2.11 → vectorvein-0.2.13}/src/vectorvein/workflow/graph/node.py +0 -0
- {vectorvein-0.2.11 → vectorvein-0.2.13}/src/vectorvein/workflow/graph/port.py +0 -0
- {vectorvein-0.2.11 → vectorvein-0.2.13}/src/vectorvein/workflow/graph/workflow.py +0 -0
- {vectorvein-0.2.11 → vectorvein-0.2.13}/src/vectorvein/workflow/nodes/__init__.py +0 -0
- {vectorvein-0.2.11 → vectorvein-0.2.13}/src/vectorvein/workflow/nodes/audio_generation.py +0 -0
- {vectorvein-0.2.11 → vectorvein-0.2.13}/src/vectorvein/workflow/nodes/control_flows.py +0 -0
- {vectorvein-0.2.11 → vectorvein-0.2.13}/src/vectorvein/workflow/nodes/file_processing.py +0 -0
- {vectorvein-0.2.11 → vectorvein-0.2.13}/src/vectorvein/workflow/nodes/image_generation.py +0 -0
- {vectorvein-0.2.11 → vectorvein-0.2.13}/src/vectorvein/workflow/nodes/llms.py +0 -0
- {vectorvein-0.2.11 → vectorvein-0.2.13}/src/vectorvein/workflow/nodes/media_editing.py +0 -0
- {vectorvein-0.2.11 → vectorvein-0.2.13}/src/vectorvein/workflow/nodes/media_processing.py +0 -0
- {vectorvein-0.2.11 → vectorvein-0.2.13}/src/vectorvein/workflow/nodes/output.py +0 -0
- {vectorvein-0.2.11 → vectorvein-0.2.13}/src/vectorvein/workflow/nodes/relational_db.py +0 -0
- {vectorvein-0.2.11 → vectorvein-0.2.13}/src/vectorvein/workflow/nodes/text_processing.py +0 -0
- {vectorvein-0.2.11 → vectorvein-0.2.13}/src/vectorvein/workflow/nodes/tools.py +0 -0
- {vectorvein-0.2.11 → vectorvein-0.2.13}/src/vectorvein/workflow/nodes/triggers.py +0 -0
- {vectorvein-0.2.11 → vectorvein-0.2.13}/src/vectorvein/workflow/nodes/vector_db.py +0 -0
- {vectorvein-0.2.11 → vectorvein-0.2.13}/src/vectorvein/workflow/nodes/video_generation.py +0 -0
- {vectorvein-0.2.11 → vectorvein-0.2.13}/src/vectorvein/workflow/nodes/web_crawlers.py +0 -0
- {vectorvein-0.2.11 → vectorvein-0.2.13}/src/vectorvein/workflow/utils/json_to_code.py +0 -0
{vectorvein-0.2.11 → vectorvein-0.2.13}/src/vectorvein/chat_clients/openai_compatible_client.py
RENAMED
@@ -99,7 +99,7 @@ class OpenAICompatibleChatClient(BaseChatClient):
|
|
99
99
|
return AzureOpenAI(
|
100
100
|
azure_endpoint=self.endpoint.api_base,
|
101
101
|
api_key=self.endpoint.api_key,
|
102
|
-
api_version="
|
102
|
+
api_version="2025-01-01-preview",
|
103
103
|
http_client=self.http_client,
|
104
104
|
)
|
105
105
|
else:
|
@@ -574,7 +574,7 @@ class AsyncOpenAICompatibleChatClient(BaseAsyncChatClient):
|
|
574
574
|
return AsyncAzureOpenAI(
|
575
575
|
azure_endpoint=self.endpoint.api_base,
|
576
576
|
api_key=self.endpoint.api_key,
|
577
|
-
api_version="
|
577
|
+
api_version="2025-01-01-preview",
|
578
578
|
http_client=self.http_client,
|
579
579
|
)
|
580
580
|
else:
|
@@ -1,11 +1,13 @@
|
|
1
1
|
# @Author: Bi Ying
|
2
2
|
# @Date: 2024-07-27 00:30:56
|
3
|
-
|
3
|
+
import warnings
|
4
|
+
from typing import List, Optional, Literal
|
4
5
|
|
5
6
|
from pydantic import BaseModel, Field
|
6
7
|
|
7
8
|
from ..types import defaults as defs
|
8
9
|
from ..types.enums import BackendType
|
10
|
+
from ..types.settings import SettingsDict
|
9
11
|
from ..types.llm_parameters import BackendSettings, EndpointSetting
|
10
12
|
|
11
13
|
|
@@ -35,12 +37,8 @@ class Server(BaseModel):
|
|
35
37
|
url: Optional[str]
|
36
38
|
|
37
39
|
|
38
|
-
class
|
39
|
-
|
40
|
-
default_factory=list, description="Available endpoints for the LLM service."
|
41
|
-
)
|
42
|
-
token_server: Optional[Server] = Field(default=None, description="Token server address. Format: host:port")
|
43
|
-
rate_limit: Optional[RateLimitConfig] = Field(default=None, description="Rate limit settings.")
|
40
|
+
class Backends(BaseModel):
|
41
|
+
"""Model containing all backend configurations in one place."""
|
44
42
|
|
45
43
|
anthropic: BackendSettings = Field(default_factory=BackendSettings, description="Anthropic models settings.")
|
46
44
|
deepseek: BackendSettings = Field(default_factory=BackendSettings, description="Deepseek models settings.")
|
@@ -58,6 +56,45 @@ class Settings(BaseModel):
|
|
58
56
|
stepfun: BackendSettings = Field(default_factory=BackendSettings, description="StepFun models settings.")
|
59
57
|
xai: BackendSettings = Field(default_factory=BackendSettings, description="XAI models settings.")
|
60
58
|
|
59
|
+
|
60
|
+
class Settings(BaseModel):
|
61
|
+
VERSION: Optional[str] = Field(
|
62
|
+
default=None, description="Configuration version. If provided, will use the corresponding format."
|
63
|
+
)
|
64
|
+
endpoints: List[EndpointSetting] = Field(
|
65
|
+
default_factory=list, description="Available endpoints for the LLM service."
|
66
|
+
)
|
67
|
+
token_server: Optional[Server] = Field(default=None, description="Token server address. Format: host:port")
|
68
|
+
rate_limit: Optional[RateLimitConfig] = Field(default=None, description="Rate limit settings.")
|
69
|
+
|
70
|
+
# V2 format: all model backend configs in a single dictionary
|
71
|
+
backends: Optional[Backends] = Field(default=None, description="All model backends in one place (V2 format).")
|
72
|
+
|
73
|
+
# V1 format: each model backend config
|
74
|
+
anthropic: Optional[BackendSettings] = Field(
|
75
|
+
default_factory=BackendSettings, description="Anthropic models settings."
|
76
|
+
)
|
77
|
+
deepseek: Optional[BackendSettings] = Field(
|
78
|
+
default_factory=BackendSettings, description="Deepseek models settings."
|
79
|
+
)
|
80
|
+
gemini: Optional[BackendSettings] = Field(default_factory=BackendSettings, description="Gemini models settings.")
|
81
|
+
groq: Optional[BackendSettings] = Field(default_factory=BackendSettings, description="Groq models settings.")
|
82
|
+
local: Optional[BackendSettings] = Field(default_factory=BackendSettings, description="Local models settings.")
|
83
|
+
minimax: Optional[BackendSettings] = Field(default_factory=BackendSettings, description="Minimax models settings.")
|
84
|
+
mistral: Optional[BackendSettings] = Field(default_factory=BackendSettings, description="Mistral models settings.")
|
85
|
+
moonshot: Optional[BackendSettings] = Field(
|
86
|
+
default_factory=BackendSettings, description="Moonshot models settings."
|
87
|
+
)
|
88
|
+
openai: Optional[BackendSettings] = Field(default_factory=BackendSettings, description="OpenAI models settings.")
|
89
|
+
qwen: Optional[BackendSettings] = Field(default_factory=BackendSettings, description="Qwen models settings.")
|
90
|
+
yi: Optional[BackendSettings] = Field(default_factory=BackendSettings, description="Yi models settings.")
|
91
|
+
zhipuai: Optional[BackendSettings] = Field(default_factory=BackendSettings, description="Zhipuai models settings.")
|
92
|
+
baichuan: Optional[BackendSettings] = Field(
|
93
|
+
default_factory=BackendSettings, description="Baichuan models settings."
|
94
|
+
)
|
95
|
+
stepfun: Optional[BackendSettings] = Field(default_factory=BackendSettings, description="StepFun models settings.")
|
96
|
+
xai: Optional[BackendSettings] = Field(default_factory=BackendSettings, description="XAI models settings.")
|
97
|
+
|
61
98
|
def __init__(self, **data):
|
62
99
|
model_types = {
|
63
100
|
"anthropic": defs.ANTHROPIC_MODELS,
|
@@ -74,15 +111,30 @@ class Settings(BaseModel):
|
|
74
111
|
"zhipuai": defs.ZHIPUAI_MODELS,
|
75
112
|
"baichuan": defs.BAICHUAN_MODELS,
|
76
113
|
"stepfun": defs.STEPFUN_MODELS,
|
114
|
+
"xai": defs.XAI_MODELS,
|
77
115
|
}
|
78
116
|
|
117
|
+
version = data.get("VERSION")
|
118
|
+
|
119
|
+
# If V2 format, model configs are in the backends dictionary
|
120
|
+
if version == "2":
|
121
|
+
if "backends" not in data:
|
122
|
+
raise ValueError("backends is required in V2 format.")
|
123
|
+
|
124
|
+
backends = data["backends"]
|
125
|
+
else:
|
126
|
+
backends = data
|
127
|
+
if len(data) > 0:
|
128
|
+
warnings.warn("You're using vectorvein's deprecated V1 format. Please use V2 format.")
|
129
|
+
|
79
130
|
for model_type, default_models in model_types.items():
|
80
|
-
if model_type in
|
131
|
+
if model_type in backends:
|
81
132
|
model_settings = BackendSettings()
|
82
|
-
|
83
|
-
|
133
|
+
user_models = backends[model_type].get("models", {})
|
134
|
+
model_settings.update_models(default_models, user_models)
|
135
|
+
backends[model_type] = model_settings
|
84
136
|
else:
|
85
|
-
|
137
|
+
backends[model_type] = BackendSettings(models=default_models)
|
86
138
|
|
87
139
|
for endpoint in data.get("endpoints", []):
|
88
140
|
if not endpoint.get("api_base"):
|
@@ -94,7 +146,7 @@ class Settings(BaseModel):
|
|
94
146
|
|
95
147
|
super().__init__(**data)
|
96
148
|
|
97
|
-
def load(self, settings_dict:
|
149
|
+
def load(self, settings_dict: SettingsDict):
|
98
150
|
self.__init__(**settings_dict)
|
99
151
|
|
100
152
|
def get_endpoint(self, endpoint_id: str) -> EndpointSetting:
|
@@ -104,7 +156,14 @@ class Settings(BaseModel):
|
|
104
156
|
raise ValueError(f"Endpoint {endpoint_id} not found.")
|
105
157
|
|
106
158
|
def get_backend(self, backend: BackendType) -> BackendSettings:
|
107
|
-
|
159
|
+
backend_name = backend.value.lower()
|
160
|
+
|
161
|
+
# Use VERSION 2 format backends field first
|
162
|
+
if self.VERSION == "2" and self.backends is not None:
|
163
|
+
return getattr(self.backends, backend_name)
|
164
|
+
|
165
|
+
# Compatible with VERSION 1 format
|
166
|
+
return getattr(self, backend_name)
|
108
167
|
|
109
168
|
|
110
169
|
settings = Settings()
|
@@ -2,6 +2,7 @@
|
|
2
2
|
# @Date: 2024-07-26 23:48:04
|
3
3
|
from typing import List, Dict, Optional, Union, Iterable
|
4
4
|
|
5
|
+
import httpx
|
5
6
|
from pydantic import BaseModel, Field
|
6
7
|
|
7
8
|
from anthropic._types import NotGiven as AnthropicNotGiven
|
@@ -10,6 +11,7 @@ from anthropic.types import ToolParam as AnthropicToolParam
|
|
10
11
|
from anthropic.types import ThinkingConfigParam, ThinkingConfigEnabledParam
|
11
12
|
from anthropic.types.message_create_params import ToolChoice as AnthropicToolChoice
|
12
13
|
|
14
|
+
from openai import OpenAI, AsyncOpenAI, AzureOpenAI, AsyncAzureOpenAI
|
13
15
|
from openai._types import NotGiven as OpenAINotGiven
|
14
16
|
from openai._types import NOT_GIVEN as OPENAI_NOT_GIVEN
|
15
17
|
from openai.types.chat.completion_create_params import ResponseFormat
|
@@ -45,6 +47,48 @@ class EndpointSetting(BaseModel):
|
|
45
47
|
)
|
46
48
|
proxy: Optional[str] = Field(None, description="The proxy URL for the endpoint.")
|
47
49
|
|
50
|
+
def model_list(self):
|
51
|
+
http_client = httpx.Client(proxy=self.proxy) if self.proxy is not None else None
|
52
|
+
|
53
|
+
if self.is_azure:
|
54
|
+
if self.api_base is None:
|
55
|
+
raise ValueError("Azure endpoint is not set")
|
56
|
+
_client = AzureOpenAI(
|
57
|
+
azure_endpoint=self.api_base,
|
58
|
+
api_key=self.api_key,
|
59
|
+
api_version="2025-01-01-preview",
|
60
|
+
http_client=http_client,
|
61
|
+
)
|
62
|
+
else:
|
63
|
+
_client = OpenAI(
|
64
|
+
api_key=self.api_key,
|
65
|
+
base_url=self.api_base,
|
66
|
+
http_client=http_client,
|
67
|
+
)
|
68
|
+
|
69
|
+
return _client.models.list().model_dump()
|
70
|
+
|
71
|
+
async def amodel_list(self):
|
72
|
+
http_client = httpx.AsyncClient(proxy=self.proxy) if self.proxy is not None else None
|
73
|
+
|
74
|
+
if self.is_azure:
|
75
|
+
if self.api_base is None:
|
76
|
+
raise ValueError("Azure endpoint is not set")
|
77
|
+
_client = AsyncAzureOpenAI(
|
78
|
+
azure_endpoint=self.api_base,
|
79
|
+
api_key=self.api_key,
|
80
|
+
api_version="2025-01-01-preview",
|
81
|
+
http_client=http_client,
|
82
|
+
)
|
83
|
+
else:
|
84
|
+
_client = AsyncOpenAI(
|
85
|
+
api_key=self.api_key,
|
86
|
+
base_url=self.api_base,
|
87
|
+
http_client=http_client,
|
88
|
+
)
|
89
|
+
|
90
|
+
return (await _client.models.list()).model_dump()
|
91
|
+
|
48
92
|
|
49
93
|
class ModelSetting(BaseModel):
|
50
94
|
id: str = Field(..., description="The id of the model.")
|
@@ -0,0 +1,130 @@
|
|
1
|
+
from typing import Dict, List, Optional, Union, Literal
|
2
|
+
from typing_extensions import TypedDict, NotRequired # Required by pydantic under Python < 3.12
|
3
|
+
|
4
|
+
|
5
|
+
class RedisConfigDict(TypedDict):
|
6
|
+
"""TypedDict representing the RedisConfig structure."""
|
7
|
+
|
8
|
+
host: str
|
9
|
+
port: int
|
10
|
+
db: int
|
11
|
+
|
12
|
+
|
13
|
+
class DiskCacheConfigDict(TypedDict):
|
14
|
+
"""TypedDict representing the DiskCacheConfig structure."""
|
15
|
+
|
16
|
+
cache_dir: str
|
17
|
+
|
18
|
+
|
19
|
+
class RateLimitConfigDict(TypedDict):
|
20
|
+
"""TypedDict representing the RateLimitConfig structure."""
|
21
|
+
|
22
|
+
enabled: bool
|
23
|
+
backend: Literal["memory", "redis", "diskcache"]
|
24
|
+
redis: NotRequired[RedisConfigDict]
|
25
|
+
diskcache: NotRequired[DiskCacheConfigDict]
|
26
|
+
default_rpm: int
|
27
|
+
default_tpm: int
|
28
|
+
|
29
|
+
|
30
|
+
class ServerDict(TypedDict):
|
31
|
+
"""TypedDict representing the Server structure."""
|
32
|
+
|
33
|
+
host: str
|
34
|
+
port: int
|
35
|
+
url: NotRequired[str]
|
36
|
+
|
37
|
+
|
38
|
+
class EndpointOptionDict(TypedDict):
|
39
|
+
"""TypedDict representing the model endpoint option structure."""
|
40
|
+
|
41
|
+
endpoint_id: str
|
42
|
+
model_id: str
|
43
|
+
rpm: NotRequired[int]
|
44
|
+
tpm: NotRequired[int]
|
45
|
+
concurrent_requests: NotRequired[int]
|
46
|
+
|
47
|
+
|
48
|
+
class ModelConfigDict(TypedDict):
|
49
|
+
"""TypedDict representing the model configuration structure."""
|
50
|
+
|
51
|
+
id: str
|
52
|
+
endpoints: List[Union[str, EndpointOptionDict]]
|
53
|
+
function_call_available: NotRequired[bool]
|
54
|
+
response_format_available: NotRequired[bool]
|
55
|
+
native_multimodal: NotRequired[bool]
|
56
|
+
context_length: NotRequired[int]
|
57
|
+
max_output_tokens: NotRequired[int]
|
58
|
+
|
59
|
+
|
60
|
+
class BackendSettingsDict(TypedDict):
|
61
|
+
"""TypedDict representing the BackendSettings structure."""
|
62
|
+
|
63
|
+
models: Dict[str, ModelConfigDict]
|
64
|
+
|
65
|
+
|
66
|
+
class EndpointSettingDict(TypedDict):
|
67
|
+
"""TypedDict representing the EndpointSetting structure."""
|
68
|
+
|
69
|
+
id: str
|
70
|
+
api_base: NotRequired[Optional[str]]
|
71
|
+
api_key: NotRequired[str]
|
72
|
+
region: NotRequired[str]
|
73
|
+
api_schema_type: NotRequired[str]
|
74
|
+
credentials: NotRequired[dict]
|
75
|
+
is_azure: NotRequired[bool]
|
76
|
+
is_vertex: NotRequired[bool]
|
77
|
+
is_bedrock: NotRequired[bool]
|
78
|
+
rpm: NotRequired[int]
|
79
|
+
tpm: NotRequired[int]
|
80
|
+
concurrent_requests: NotRequired[int]
|
81
|
+
proxy: NotRequired[str]
|
82
|
+
|
83
|
+
|
84
|
+
class BackendsDict(TypedDict):
|
85
|
+
"""TypedDict representing all model backends in a single dictionary."""
|
86
|
+
|
87
|
+
anthropic: NotRequired[BackendSettingsDict]
|
88
|
+
deepseek: NotRequired[BackendSettingsDict]
|
89
|
+
gemini: NotRequired[BackendSettingsDict]
|
90
|
+
groq: NotRequired[BackendSettingsDict]
|
91
|
+
local: NotRequired[BackendSettingsDict]
|
92
|
+
minimax: NotRequired[BackendSettingsDict]
|
93
|
+
mistral: NotRequired[BackendSettingsDict]
|
94
|
+
moonshot: NotRequired[BackendSettingsDict]
|
95
|
+
openai: NotRequired[BackendSettingsDict]
|
96
|
+
qwen: NotRequired[BackendSettingsDict]
|
97
|
+
yi: NotRequired[BackendSettingsDict]
|
98
|
+
zhipuai: NotRequired[BackendSettingsDict]
|
99
|
+
baichuan: NotRequired[BackendSettingsDict]
|
100
|
+
stepfun: NotRequired[BackendSettingsDict]
|
101
|
+
xai: NotRequired[BackendSettingsDict]
|
102
|
+
|
103
|
+
|
104
|
+
class SettingsDict(TypedDict):
|
105
|
+
"""TypedDict representing the expected structure of the settings dictionary."""
|
106
|
+
|
107
|
+
VERSION: NotRequired[str]
|
108
|
+
endpoints: List[EndpointSettingDict]
|
109
|
+
token_server: NotRequired[ServerDict]
|
110
|
+
rate_limit: NotRequired[RateLimitConfigDict]
|
111
|
+
|
112
|
+
# V2 format: all model backend configs in a single dictionary
|
113
|
+
backends: NotRequired[BackendsDict]
|
114
|
+
|
115
|
+
# V1 format: each model backend config
|
116
|
+
anthropic: NotRequired[BackendSettingsDict]
|
117
|
+
deepseek: NotRequired[BackendSettingsDict]
|
118
|
+
gemini: NotRequired[BackendSettingsDict]
|
119
|
+
groq: NotRequired[BackendSettingsDict]
|
120
|
+
local: NotRequired[BackendSettingsDict]
|
121
|
+
minimax: NotRequired[BackendSettingsDict]
|
122
|
+
mistral: NotRequired[BackendSettingsDict]
|
123
|
+
moonshot: NotRequired[BackendSettingsDict]
|
124
|
+
openai: NotRequired[BackendSettingsDict]
|
125
|
+
qwen: NotRequired[BackendSettingsDict]
|
126
|
+
yi: NotRequired[BackendSettingsDict]
|
127
|
+
zhipuai: NotRequired[BackendSettingsDict]
|
128
|
+
baichuan: NotRequired[BackendSettingsDict]
|
129
|
+
stepfun: NotRequired[BackendSettingsDict]
|
130
|
+
xai: NotRequired[BackendSettingsDict]
|
@@ -1,105 +0,0 @@
|
|
1
|
-
from typing import Dict, List, Optional, Union, Literal
|
2
|
-
from typing_extensions import TypedDict, NotRequired # Required by pydantic under Python < 3.12
|
3
|
-
|
4
|
-
|
5
|
-
class RedisConfigDict(TypedDict):
|
6
|
-
"""TypedDict representing the RedisConfig structure."""
|
7
|
-
|
8
|
-
host: str
|
9
|
-
port: int
|
10
|
-
db: int
|
11
|
-
|
12
|
-
|
13
|
-
class DiskCacheConfigDict(TypedDict):
|
14
|
-
"""TypedDict representing the DiskCacheConfig structure."""
|
15
|
-
|
16
|
-
cache_dir: str
|
17
|
-
|
18
|
-
|
19
|
-
class RateLimitConfigDict(TypedDict):
|
20
|
-
"""TypedDict representing the RateLimitConfig structure."""
|
21
|
-
|
22
|
-
enabled: bool
|
23
|
-
backend: Literal["memory", "redis", "diskcache"]
|
24
|
-
redis: Optional[RedisConfigDict]
|
25
|
-
diskcache: Optional[DiskCacheConfigDict]
|
26
|
-
default_rpm: int
|
27
|
-
default_tpm: int
|
28
|
-
|
29
|
-
|
30
|
-
class ServerDict(TypedDict):
|
31
|
-
"""TypedDict representing the Server structure."""
|
32
|
-
|
33
|
-
host: str
|
34
|
-
port: int
|
35
|
-
url: Optional[str]
|
36
|
-
|
37
|
-
|
38
|
-
class EndpointOptionDict(TypedDict):
|
39
|
-
"""TypedDict representing the model endpoint option structure."""
|
40
|
-
|
41
|
-
endpoint_id: str
|
42
|
-
model_id: str
|
43
|
-
rpm: NotRequired[int]
|
44
|
-
tpm: NotRequired[int]
|
45
|
-
concurrent_requests: NotRequired[int]
|
46
|
-
|
47
|
-
|
48
|
-
class ModelConfigDict(TypedDict):
|
49
|
-
"""TypedDict representing the model configuration structure."""
|
50
|
-
|
51
|
-
id: str
|
52
|
-
endpoints: List[Union[str, EndpointOptionDict]]
|
53
|
-
function_call_available: bool
|
54
|
-
response_format_available: bool
|
55
|
-
native_multimodal: bool
|
56
|
-
context_length: int
|
57
|
-
max_output_tokens: Optional[int]
|
58
|
-
|
59
|
-
|
60
|
-
class BackendSettingsDict(TypedDict):
|
61
|
-
"""TypedDict representing the BackendSettings structure."""
|
62
|
-
|
63
|
-
models: Dict[str, ModelConfigDict]
|
64
|
-
|
65
|
-
|
66
|
-
class EndpointSettingDict(TypedDict):
|
67
|
-
"""TypedDict representing the EndpointSetting structure."""
|
68
|
-
|
69
|
-
id: str
|
70
|
-
api_base: Optional[str]
|
71
|
-
api_key: str
|
72
|
-
region: Optional[str]
|
73
|
-
api_schema_type: Optional[str]
|
74
|
-
credentials: Optional[dict]
|
75
|
-
is_azure: Optional[bool]
|
76
|
-
is_vertex: Optional[bool]
|
77
|
-
is_bedrock: Optional[bool]
|
78
|
-
rpm: Optional[int]
|
79
|
-
tpm: Optional[int]
|
80
|
-
concurrent_requests: Optional[int]
|
81
|
-
proxy: Optional[str]
|
82
|
-
|
83
|
-
|
84
|
-
class SettingsDict(TypedDict):
|
85
|
-
"""TypedDict representing the expected structure of the settings dictionary."""
|
86
|
-
|
87
|
-
endpoints: List[EndpointSettingDict]
|
88
|
-
token_server: Optional[ServerDict]
|
89
|
-
rate_limit: Optional[RateLimitConfigDict]
|
90
|
-
# 各模型后端配置
|
91
|
-
anthropic: BackendSettingsDict
|
92
|
-
deepseek: BackendSettingsDict
|
93
|
-
gemini: BackendSettingsDict
|
94
|
-
groq: BackendSettingsDict
|
95
|
-
local: BackendSettingsDict
|
96
|
-
minimax: BackendSettingsDict
|
97
|
-
mistral: BackendSettingsDict
|
98
|
-
moonshot: BackendSettingsDict
|
99
|
-
openai: BackendSettingsDict
|
100
|
-
qwen: BackendSettingsDict
|
101
|
-
yi: BackendSettingsDict
|
102
|
-
zhipuai: BackendSettingsDict
|
103
|
-
baichuan: BackendSettingsDict
|
104
|
-
stepfun: BackendSettingsDict
|
105
|
-
xai: BackendSettingsDict
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|