vectorvein 0.2.12__tar.gz → 0.2.14__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (64) hide show
  1. {vectorvein-0.2.12 → vectorvein-0.2.14}/PKG-INFO +1 -1
  2. {vectorvein-0.2.12 → vectorvein-0.2.14}/pyproject.toml +1 -1
  3. vectorvein-0.2.14/src/vectorvein/settings/__init__.py +196 -0
  4. {vectorvein-0.2.12 → vectorvein-0.2.14}/src/vectorvein/types/__init__.py +4 -0
  5. {vectorvein-0.2.12 → vectorvein-0.2.14}/src/vectorvein/types/settings.py +39 -3
  6. vectorvein-0.2.12/src/vectorvein/settings/__init__.py +0 -110
  7. {vectorvein-0.2.12 → vectorvein-0.2.14}/README.md +0 -0
  8. {vectorvein-0.2.12 → vectorvein-0.2.14}/src/vectorvein/__init__.py +0 -0
  9. {vectorvein-0.2.12 → vectorvein-0.2.14}/src/vectorvein/api/__init__.py +0 -0
  10. {vectorvein-0.2.12 → vectorvein-0.2.14}/src/vectorvein/api/client.py +0 -0
  11. {vectorvein-0.2.12 → vectorvein-0.2.14}/src/vectorvein/api/exceptions.py +0 -0
  12. {vectorvein-0.2.12 → vectorvein-0.2.14}/src/vectorvein/api/models.py +0 -0
  13. {vectorvein-0.2.12 → vectorvein-0.2.14}/src/vectorvein/chat_clients/__init__.py +0 -0
  14. {vectorvein-0.2.12 → vectorvein-0.2.14}/src/vectorvein/chat_clients/anthropic_client.py +0 -0
  15. {vectorvein-0.2.12 → vectorvein-0.2.14}/src/vectorvein/chat_clients/baichuan_client.py +0 -0
  16. {vectorvein-0.2.12 → vectorvein-0.2.14}/src/vectorvein/chat_clients/base_client.py +0 -0
  17. {vectorvein-0.2.12 → vectorvein-0.2.14}/src/vectorvein/chat_clients/deepseek_client.py +0 -0
  18. {vectorvein-0.2.12 → vectorvein-0.2.14}/src/vectorvein/chat_clients/gemini_client.py +0 -0
  19. {vectorvein-0.2.12 → vectorvein-0.2.14}/src/vectorvein/chat_clients/groq_client.py +0 -0
  20. {vectorvein-0.2.12 → vectorvein-0.2.14}/src/vectorvein/chat_clients/local_client.py +0 -0
  21. {vectorvein-0.2.12 → vectorvein-0.2.14}/src/vectorvein/chat_clients/minimax_client.py +0 -0
  22. {vectorvein-0.2.12 → vectorvein-0.2.14}/src/vectorvein/chat_clients/mistral_client.py +0 -0
  23. {vectorvein-0.2.12 → vectorvein-0.2.14}/src/vectorvein/chat_clients/moonshot_client.py +0 -0
  24. {vectorvein-0.2.12 → vectorvein-0.2.14}/src/vectorvein/chat_clients/openai_client.py +0 -0
  25. {vectorvein-0.2.12 → vectorvein-0.2.14}/src/vectorvein/chat_clients/openai_compatible_client.py +0 -0
  26. {vectorvein-0.2.12 → vectorvein-0.2.14}/src/vectorvein/chat_clients/py.typed +0 -0
  27. {vectorvein-0.2.12 → vectorvein-0.2.14}/src/vectorvein/chat_clients/qwen_client.py +0 -0
  28. {vectorvein-0.2.12 → vectorvein-0.2.14}/src/vectorvein/chat_clients/stepfun_client.py +0 -0
  29. {vectorvein-0.2.12 → vectorvein-0.2.14}/src/vectorvein/chat_clients/utils.py +0 -0
  30. {vectorvein-0.2.12 → vectorvein-0.2.14}/src/vectorvein/chat_clients/xai_client.py +0 -0
  31. {vectorvein-0.2.12 → vectorvein-0.2.14}/src/vectorvein/chat_clients/yi_client.py +0 -0
  32. {vectorvein-0.2.12 → vectorvein-0.2.14}/src/vectorvein/chat_clients/zhipuai_client.py +0 -0
  33. {vectorvein-0.2.12 → vectorvein-0.2.14}/src/vectorvein/py.typed +0 -0
  34. {vectorvein-0.2.12 → vectorvein-0.2.14}/src/vectorvein/server/token_server.py +0 -0
  35. {vectorvein-0.2.12 → vectorvein-0.2.14}/src/vectorvein/settings/py.typed +0 -0
  36. {vectorvein-0.2.12 → vectorvein-0.2.14}/src/vectorvein/types/defaults.py +0 -0
  37. {vectorvein-0.2.12 → vectorvein-0.2.14}/src/vectorvein/types/enums.py +0 -0
  38. {vectorvein-0.2.12 → vectorvein-0.2.14}/src/vectorvein/types/exception.py +0 -0
  39. {vectorvein-0.2.12 → vectorvein-0.2.14}/src/vectorvein/types/llm_parameters.py +0 -0
  40. {vectorvein-0.2.12 → vectorvein-0.2.14}/src/vectorvein/types/py.typed +0 -0
  41. {vectorvein-0.2.12 → vectorvein-0.2.14}/src/vectorvein/utilities/media_processing.py +0 -0
  42. {vectorvein-0.2.12 → vectorvein-0.2.14}/src/vectorvein/utilities/rate_limiter.py +0 -0
  43. {vectorvein-0.2.12 → vectorvein-0.2.14}/src/vectorvein/utilities/retry.py +0 -0
  44. {vectorvein-0.2.12 → vectorvein-0.2.14}/src/vectorvein/workflow/graph/edge.py +0 -0
  45. {vectorvein-0.2.12 → vectorvein-0.2.14}/src/vectorvein/workflow/graph/node.py +0 -0
  46. {vectorvein-0.2.12 → vectorvein-0.2.14}/src/vectorvein/workflow/graph/port.py +0 -0
  47. {vectorvein-0.2.12 → vectorvein-0.2.14}/src/vectorvein/workflow/graph/workflow.py +0 -0
  48. {vectorvein-0.2.12 → vectorvein-0.2.14}/src/vectorvein/workflow/nodes/__init__.py +0 -0
  49. {vectorvein-0.2.12 → vectorvein-0.2.14}/src/vectorvein/workflow/nodes/audio_generation.py +0 -0
  50. {vectorvein-0.2.12 → vectorvein-0.2.14}/src/vectorvein/workflow/nodes/control_flows.py +0 -0
  51. {vectorvein-0.2.12 → vectorvein-0.2.14}/src/vectorvein/workflow/nodes/file_processing.py +0 -0
  52. {vectorvein-0.2.12 → vectorvein-0.2.14}/src/vectorvein/workflow/nodes/image_generation.py +0 -0
  53. {vectorvein-0.2.12 → vectorvein-0.2.14}/src/vectorvein/workflow/nodes/llms.py +0 -0
  54. {vectorvein-0.2.12 → vectorvein-0.2.14}/src/vectorvein/workflow/nodes/media_editing.py +0 -0
  55. {vectorvein-0.2.12 → vectorvein-0.2.14}/src/vectorvein/workflow/nodes/media_processing.py +0 -0
  56. {vectorvein-0.2.12 → vectorvein-0.2.14}/src/vectorvein/workflow/nodes/output.py +0 -0
  57. {vectorvein-0.2.12 → vectorvein-0.2.14}/src/vectorvein/workflow/nodes/relational_db.py +0 -0
  58. {vectorvein-0.2.12 → vectorvein-0.2.14}/src/vectorvein/workflow/nodes/text_processing.py +0 -0
  59. {vectorvein-0.2.12 → vectorvein-0.2.14}/src/vectorvein/workflow/nodes/tools.py +0 -0
  60. {vectorvein-0.2.12 → vectorvein-0.2.14}/src/vectorvein/workflow/nodes/triggers.py +0 -0
  61. {vectorvein-0.2.12 → vectorvein-0.2.14}/src/vectorvein/workflow/nodes/vector_db.py +0 -0
  62. {vectorvein-0.2.12 → vectorvein-0.2.14}/src/vectorvein/workflow/nodes/video_generation.py +0 -0
  63. {vectorvein-0.2.12 → vectorvein-0.2.14}/src/vectorvein/workflow/nodes/web_crawlers.py +0 -0
  64. {vectorvein-0.2.12 → vectorvein-0.2.14}/src/vectorvein/workflow/utils/json_to_code.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: vectorvein
3
- Version: 0.2.12
3
+ Version: 0.2.14
4
4
  Summary: VectorVein python SDK
5
5
  Author-Email: Anderson <andersonby@163.com>
6
6
  License: MIT
@@ -18,7 +18,7 @@ description = "VectorVein python SDK"
18
18
  name = "vectorvein"
19
19
  readme = "README.md"
20
20
  requires-python = ">=3.10"
21
- version = "0.2.12"
21
+ version = "0.2.14"
22
22
 
23
23
  [project.license]
24
24
  text = "MIT"
@@ -0,0 +1,196 @@
1
+ # @Author: Bi Ying
2
+ # @Date: 2024-07-27 00:30:56
3
+ import warnings
4
+ from typing import List, Optional, Literal
5
+
6
+ from pydantic import BaseModel, Field
7
+
8
+ from ..types import defaults as defs
9
+ from ..types.enums import BackendType
10
+ from ..types.settings import SettingsDict
11
+ from ..types.llm_parameters import BackendSettings, EndpointSetting
12
+
13
+
14
+ class RedisConfig(BaseModel):
15
+ host: str = "localhost"
16
+ port: int = 6379
17
+ db: int = 0
18
+
19
+
20
+ class DiskCacheConfig(BaseModel):
21
+ cache_dir: str = ".rate_limit_cache"
22
+
23
+
24
+ class RateLimitConfig(BaseModel):
25
+ enabled: bool = False
26
+
27
+ backend: Literal["memory", "redis", "diskcache"] = "memory"
28
+ redis: Optional[RedisConfig] = Field(default=None)
29
+ diskcache: Optional[DiskCacheConfig] = Field(default=None)
30
+ default_rpm: int = 60
31
+ default_tpm: int = 1000000
32
+
33
+
34
+ class Server(BaseModel):
35
+ host: str
36
+ port: int
37
+ url: Optional[str]
38
+
39
+
40
+ class Backends(BaseModel):
41
+ """Model containing all backend configurations in one place."""
42
+
43
+ anthropic: BackendSettings = Field(default_factory=BackendSettings, description="Anthropic models settings.")
44
+ deepseek: BackendSettings = Field(default_factory=BackendSettings, description="Deepseek models settings.")
45
+ gemini: BackendSettings = Field(default_factory=BackendSettings, description="Gemini models settings.")
46
+ groq: BackendSettings = Field(default_factory=BackendSettings, description="Groq models settings.")
47
+ local: BackendSettings = Field(default_factory=BackendSettings, description="Local models settings.")
48
+ minimax: BackendSettings = Field(default_factory=BackendSettings, description="Minimax models settings.")
49
+ mistral: BackendSettings = Field(default_factory=BackendSettings, description="Mistral models settings.")
50
+ moonshot: BackendSettings = Field(default_factory=BackendSettings, description="Moonshot models settings.")
51
+ openai: BackendSettings = Field(default_factory=BackendSettings, description="OpenAI models settings.")
52
+ qwen: BackendSettings = Field(default_factory=BackendSettings, description="Qwen models settings.")
53
+ yi: BackendSettings = Field(default_factory=BackendSettings, description="Yi models settings.")
54
+ zhipuai: BackendSettings = Field(default_factory=BackendSettings, description="Zhipuai models settings.")
55
+ baichuan: BackendSettings = Field(default_factory=BackendSettings, description="Baichuan models settings.")
56
+ stepfun: BackendSettings = Field(default_factory=BackendSettings, description="StepFun models settings.")
57
+ xai: BackendSettings = Field(default_factory=BackendSettings, description="XAI models settings.")
58
+
59
+
60
+ class Settings(BaseModel):
61
+ VERSION: Optional[str] = Field(
62
+ default="2", description="Configuration version. If provided, will use the corresponding format."
63
+ )
64
+ endpoints: List[EndpointSetting] = Field(
65
+ default_factory=list, description="Available endpoints for the LLM service."
66
+ )
67
+ token_server: Optional[Server] = Field(default=None, description="Token server address. Format: host:port")
68
+ rate_limit: Optional[RateLimitConfig] = Field(default=None, description="Rate limit settings.")
69
+
70
+ # V2 format: all model backend configs in a single dictionary
71
+ backends: Optional[Backends] = Field(default=None, description="All model backends in one place (V2 format).")
72
+
73
+ # V1 format: each model backend config
74
+ anthropic: Optional[BackendSettings] = Field(
75
+ default_factory=BackendSettings, description="Anthropic models settings."
76
+ )
77
+ deepseek: Optional[BackendSettings] = Field(
78
+ default_factory=BackendSettings, description="Deepseek models settings."
79
+ )
80
+ gemini: Optional[BackendSettings] = Field(default_factory=BackendSettings, description="Gemini models settings.")
81
+ groq: Optional[BackendSettings] = Field(default_factory=BackendSettings, description="Groq models settings.")
82
+ local: Optional[BackendSettings] = Field(default_factory=BackendSettings, description="Local models settings.")
83
+ minimax: Optional[BackendSettings] = Field(default_factory=BackendSettings, description="Minimax models settings.")
84
+ mistral: Optional[BackendSettings] = Field(default_factory=BackendSettings, description="Mistral models settings.")
85
+ moonshot: Optional[BackendSettings] = Field(
86
+ default_factory=BackendSettings, description="Moonshot models settings."
87
+ )
88
+ openai: Optional[BackendSettings] = Field(default_factory=BackendSettings, description="OpenAI models settings.")
89
+ qwen: Optional[BackendSettings] = Field(default_factory=BackendSettings, description="Qwen models settings.")
90
+ yi: Optional[BackendSettings] = Field(default_factory=BackendSettings, description="Yi models settings.")
91
+ zhipuai: Optional[BackendSettings] = Field(default_factory=BackendSettings, description="Zhipuai models settings.")
92
+ baichuan: Optional[BackendSettings] = Field(
93
+ default_factory=BackendSettings, description="Baichuan models settings."
94
+ )
95
+ stepfun: Optional[BackendSettings] = Field(default_factory=BackendSettings, description="StepFun models settings.")
96
+ xai: Optional[BackendSettings] = Field(default_factory=BackendSettings, description="XAI models settings.")
97
+
98
+ def __init__(self, **data):
99
+ model_types = {
100
+ "anthropic": defs.ANTHROPIC_MODELS,
101
+ "deepseek": defs.DEEPSEEK_MODELS,
102
+ "gemini": defs.GEMINI_MODELS,
103
+ "groq": defs.GROQ_MODELS,
104
+ "local": {},
105
+ "minimax": defs.MINIMAX_MODELS,
106
+ "mistral": defs.MISTRAL_MODELS,
107
+ "moonshot": defs.MOONSHOT_MODELS,
108
+ "openai": defs.OPENAI_MODELS,
109
+ "qwen": defs.QWEN_MODELS,
110
+ "yi": defs.YI_MODELS,
111
+ "zhipuai": defs.ZHIPUAI_MODELS,
112
+ "baichuan": defs.BAICHUAN_MODELS,
113
+ "stepfun": defs.STEPFUN_MODELS,
114
+ "xai": defs.XAI_MODELS,
115
+ }
116
+
117
+ version = data.get("VERSION")
118
+
119
+ if len(data) == 0:
120
+ version = "2"
121
+ data["backends"] = {}
122
+
123
+ # If V2 format, model configs are in the backends dictionary
124
+ if version == "2":
125
+ if "backends" not in data:
126
+ raise ValueError("backends is required in V2 format.")
127
+
128
+ backends = data["backends"]
129
+ else:
130
+ backends = data
131
+ warnings.warn("You're using vectorvein's deprecated V1 format. Please use V2 format.")
132
+
133
+ for model_type, default_models in model_types.items():
134
+ if model_type in backends:
135
+ model_settings = BackendSettings()
136
+ user_models = backends[model_type].get("models", {})
137
+ model_settings.update_models(default_models, user_models)
138
+ backends[model_type] = model_settings
139
+ else:
140
+ backends[model_type] = BackendSettings(models=default_models)
141
+
142
+ for endpoint in data.get("endpoints", []):
143
+ if not endpoint.get("api_base"):
144
+ continue
145
+ api_base = endpoint["api_base"]
146
+ if api_base.startswith("https://generativelanguage.googleapis.com/v1beta"):
147
+ if not api_base.endswith("openai/"):
148
+ endpoint["api_base"] = api_base.strip("/") + "/openai/"
149
+
150
+ super().__init__(**data)
151
+
152
+ def load(self, settings_dict: SettingsDict):
153
+ self.__init__(**settings_dict)
154
+
155
+ def get_endpoint(self, endpoint_id: str) -> EndpointSetting:
156
+ for endpoint in self.endpoints:
157
+ if endpoint.id == endpoint_id:
158
+ return endpoint
159
+ raise ValueError(f"Endpoint {endpoint_id} not found.")
160
+
161
+ def get_backend(self, backend: BackendType) -> BackendSettings:
162
+ backend_name = backend.value.lower()
163
+
164
+ # Use VERSION 2 format backends field first
165
+ if self.VERSION == "2" and self.backends is not None:
166
+ return getattr(self.backends, backend_name)
167
+
168
+ # Compatible with VERSION 1 format
169
+ return getattr(self, backend_name)
170
+
171
+ def export(self):
172
+ if self.VERSION == "2":
173
+ return super().model_dump(
174
+ exclude={
175
+ "anthropic",
176
+ "deepseek",
177
+ "gemini",
178
+ "groq",
179
+ "local",
180
+ "minimax",
181
+ "mistral",
182
+ "moonshot",
183
+ "openai",
184
+ "qwen",
185
+ "yi",
186
+ "zhipuai",
187
+ "baichuan",
188
+ "stepfun",
189
+ "xai",
190
+ }
191
+ )
192
+ else:
193
+ return super().model_dump(exclude={"backends"})
194
+
195
+
196
+ settings = Settings()
@@ -61,6 +61,8 @@ from .settings import (
61
61
  BackendSettingsDict,
62
62
  EndpointSettingDict,
63
63
  SettingsDict,
64
+ SettingsV1Dict,
65
+ SettingsV2Dict,
64
66
  )
65
67
 
66
68
 
@@ -126,4 +128,6 @@ __all__ = [
126
128
  "BackendSettingsDict",
127
129
  "EndpointSettingDict",
128
130
  "SettingsDict",
131
+ "SettingsV1Dict",
132
+ "SettingsV2Dict",
129
133
  ]
@@ -48,7 +48,7 @@ class EndpointOptionDict(TypedDict):
48
48
  class ModelConfigDict(TypedDict):
49
49
  """TypedDict representing the model configuration structure."""
50
50
 
51
- id: NotRequired[str]
51
+ id: str
52
52
  endpoints: List[Union[str, EndpointOptionDict]]
53
53
  function_call_available: NotRequired[bool]
54
54
  response_format_available: NotRequired[bool]
@@ -81,13 +81,34 @@ class EndpointSettingDict(TypedDict):
81
81
  proxy: NotRequired[str]
82
82
 
83
83
 
84
- class SettingsDict(TypedDict):
84
+ class BackendsDict(TypedDict):
85
+ """TypedDict representing all model backends in a single dictionary."""
86
+
87
+ anthropic: NotRequired[BackendSettingsDict]
88
+ deepseek: NotRequired[BackendSettingsDict]
89
+ gemini: NotRequired[BackendSettingsDict]
90
+ groq: NotRequired[BackendSettingsDict]
91
+ local: NotRequired[BackendSettingsDict]
92
+ minimax: NotRequired[BackendSettingsDict]
93
+ mistral: NotRequired[BackendSettingsDict]
94
+ moonshot: NotRequired[BackendSettingsDict]
95
+ openai: NotRequired[BackendSettingsDict]
96
+ qwen: NotRequired[BackendSettingsDict]
97
+ yi: NotRequired[BackendSettingsDict]
98
+ zhipuai: NotRequired[BackendSettingsDict]
99
+ baichuan: NotRequired[BackendSettingsDict]
100
+ stepfun: NotRequired[BackendSettingsDict]
101
+ xai: NotRequired[BackendSettingsDict]
102
+
103
+
104
+ class SettingsV1Dict(TypedDict):
85
105
  """TypedDict representing the expected structure of the settings dictionary."""
86
106
 
87
107
  endpoints: List[EndpointSettingDict]
88
108
  token_server: NotRequired[ServerDict]
89
109
  rate_limit: NotRequired[RateLimitConfigDict]
90
- # 各模型后端配置
110
+
111
+ # V1 format: each model backend config
91
112
  anthropic: NotRequired[BackendSettingsDict]
92
113
  deepseek: NotRequired[BackendSettingsDict]
93
114
  gemini: NotRequired[BackendSettingsDict]
@@ -103,3 +124,18 @@ class SettingsDict(TypedDict):
103
124
  baichuan: NotRequired[BackendSettingsDict]
104
125
  stepfun: NotRequired[BackendSettingsDict]
105
126
  xai: NotRequired[BackendSettingsDict]
127
+
128
+
129
+ class SettingsV2Dict(TypedDict):
130
+ """TypedDict representing the expected structure of the settings dictionary."""
131
+
132
+ VERSION: NotRequired[str]
133
+ endpoints: List[EndpointSettingDict]
134
+ token_server: NotRequired[ServerDict]
135
+ rate_limit: NotRequired[RateLimitConfigDict]
136
+
137
+ # V2 format: all model backend configs in a single dictionary
138
+ backends: NotRequired[BackendsDict]
139
+
140
+
141
+ SettingsDict = Union[SettingsV1Dict, SettingsV2Dict]
@@ -1,110 +0,0 @@
1
- # @Author: Bi Ying
2
- # @Date: 2024-07-27 00:30:56
3
- from typing import List, Dict, Optional, Literal
4
-
5
- from pydantic import BaseModel, Field
6
-
7
- from ..types import defaults as defs
8
- from ..types.enums import BackendType
9
- from ..types.llm_parameters import BackendSettings, EndpointSetting
10
-
11
-
12
- class RedisConfig(BaseModel):
13
- host: str = "localhost"
14
- port: int = 6379
15
- db: int = 0
16
-
17
-
18
- class DiskCacheConfig(BaseModel):
19
- cache_dir: str = ".rate_limit_cache"
20
-
21
-
22
- class RateLimitConfig(BaseModel):
23
- enabled: bool = False
24
-
25
- backend: Literal["memory", "redis", "diskcache"] = "memory"
26
- redis: Optional[RedisConfig] = Field(default=None)
27
- diskcache: Optional[DiskCacheConfig] = Field(default=None)
28
- default_rpm: int = 60
29
- default_tpm: int = 1000000
30
-
31
-
32
- class Server(BaseModel):
33
- host: str
34
- port: int
35
- url: Optional[str]
36
-
37
-
38
- class Settings(BaseModel):
39
- endpoints: List[EndpointSetting] = Field(
40
- default_factory=list, description="Available endpoints for the LLM service."
41
- )
42
- token_server: Optional[Server] = Field(default=None, description="Token server address. Format: host:port")
43
- rate_limit: Optional[RateLimitConfig] = Field(default=None, description="Rate limit settings.")
44
-
45
- anthropic: BackendSettings = Field(default_factory=BackendSettings, description="Anthropic models settings.")
46
- deepseek: BackendSettings = Field(default_factory=BackendSettings, description="Deepseek models settings.")
47
- gemini: BackendSettings = Field(default_factory=BackendSettings, description="Gemini models settings.")
48
- groq: BackendSettings = Field(default_factory=BackendSettings, description="Groq models settings.")
49
- local: BackendSettings = Field(default_factory=BackendSettings, description="Local models settings.")
50
- minimax: BackendSettings = Field(default_factory=BackendSettings, description="Minimax models settings.")
51
- mistral: BackendSettings = Field(default_factory=BackendSettings, description="Mistral models settings.")
52
- moonshot: BackendSettings = Field(default_factory=BackendSettings, description="Moonshot models settings.")
53
- openai: BackendSettings = Field(default_factory=BackendSettings, description="OpenAI models settings.")
54
- qwen: BackendSettings = Field(default_factory=BackendSettings, description="Qwen models settings.")
55
- yi: BackendSettings = Field(default_factory=BackendSettings, description="Yi models settings.")
56
- zhipuai: BackendSettings = Field(default_factory=BackendSettings, description="Zhipuai models settings.")
57
- baichuan: BackendSettings = Field(default_factory=BackendSettings, description="Baichuan models settings.")
58
- stepfun: BackendSettings = Field(default_factory=BackendSettings, description="StepFun models settings.")
59
- xai: BackendSettings = Field(default_factory=BackendSettings, description="XAI models settings.")
60
-
61
- def __init__(self, **data):
62
- model_types = {
63
- "anthropic": defs.ANTHROPIC_MODELS,
64
- "deepseek": defs.DEEPSEEK_MODELS,
65
- "gemini": defs.GEMINI_MODELS,
66
- "groq": defs.GROQ_MODELS,
67
- "local": {},
68
- "minimax": defs.MINIMAX_MODELS,
69
- "mistral": defs.MISTRAL_MODELS,
70
- "moonshot": defs.MOONSHOT_MODELS,
71
- "openai": defs.OPENAI_MODELS,
72
- "qwen": defs.QWEN_MODELS,
73
- "yi": defs.YI_MODELS,
74
- "zhipuai": defs.ZHIPUAI_MODELS,
75
- "baichuan": defs.BAICHUAN_MODELS,
76
- "stepfun": defs.STEPFUN_MODELS,
77
- }
78
-
79
- for model_type, default_models in model_types.items():
80
- if model_type in data:
81
- model_settings = BackendSettings()
82
- model_settings.update_models(default_models, data[model_type].get("models", {}))
83
- data[model_type] = model_settings
84
- else:
85
- data[model_type] = BackendSettings(models=default_models)
86
-
87
- for endpoint in data.get("endpoints", []):
88
- if not endpoint.get("api_base"):
89
- continue
90
- api_base = endpoint["api_base"]
91
- if api_base.startswith("https://generativelanguage.googleapis.com/v1beta"):
92
- if not api_base.endswith("openai/"):
93
- endpoint["api_base"] = api_base.strip("/") + "/openai/"
94
-
95
- super().__init__(**data)
96
-
97
- def load(self, settings_dict: Dict):
98
- self.__init__(**settings_dict)
99
-
100
- def get_endpoint(self, endpoint_id: str) -> EndpointSetting:
101
- for endpoint in self.endpoints:
102
- if endpoint.id == endpoint_id:
103
- return endpoint
104
- raise ValueError(f"Endpoint {endpoint_id} not found.")
105
-
106
- def get_backend(self, backend: BackendType) -> BackendSettings:
107
- return getattr(self, backend.value.lower())
108
-
109
-
110
- settings = Settings()
File without changes