vectorvein 0.2.12__py3-none-any.whl → 0.2.13__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,11 +1,13 @@
1
1
  # @Author: Bi Ying
2
2
  # @Date: 2024-07-27 00:30:56
3
- from typing import List, Dict, Optional, Literal
3
+ import warnings
4
+ from typing import List, Optional, Literal
4
5
 
5
6
  from pydantic import BaseModel, Field
6
7
 
7
8
  from ..types import defaults as defs
8
9
  from ..types.enums import BackendType
10
+ from ..types.settings import SettingsDict
9
11
  from ..types.llm_parameters import BackendSettings, EndpointSetting
10
12
 
11
13
 
@@ -35,12 +37,8 @@ class Server(BaseModel):
35
37
  url: Optional[str]
36
38
 
37
39
 
38
- class Settings(BaseModel):
39
- endpoints: List[EndpointSetting] = Field(
40
- default_factory=list, description="Available endpoints for the LLM service."
41
- )
42
- token_server: Optional[Server] = Field(default=None, description="Token server address. Format: host:port")
43
- rate_limit: Optional[RateLimitConfig] = Field(default=None, description="Rate limit settings.")
40
+ class Backends(BaseModel):
41
+ """Model containing all backend configurations in one place."""
44
42
 
45
43
  anthropic: BackendSettings = Field(default_factory=BackendSettings, description="Anthropic models settings.")
46
44
  deepseek: BackendSettings = Field(default_factory=BackendSettings, description="Deepseek models settings.")
@@ -58,6 +56,45 @@ class Settings(BaseModel):
58
56
  stepfun: BackendSettings = Field(default_factory=BackendSettings, description="StepFun models settings.")
59
57
  xai: BackendSettings = Field(default_factory=BackendSettings, description="XAI models settings.")
60
58
 
59
+
60
+ class Settings(BaseModel):
61
+ VERSION: Optional[str] = Field(
62
+ default=None, description="Configuration version. If provided, will use the corresponding format."
63
+ )
64
+ endpoints: List[EndpointSetting] = Field(
65
+ default_factory=list, description="Available endpoints for the LLM service."
66
+ )
67
+ token_server: Optional[Server] = Field(default=None, description="Token server address. Format: host:port")
68
+ rate_limit: Optional[RateLimitConfig] = Field(default=None, description="Rate limit settings.")
69
+
70
+ # V2 format: all model backend configs in a single dictionary
71
+ backends: Optional[Backends] = Field(default=None, description="All model backends in one place (V2 format).")
72
+
73
+ # V1 format: each model backend config
74
+ anthropic: Optional[BackendSettings] = Field(
75
+ default_factory=BackendSettings, description="Anthropic models settings."
76
+ )
77
+ deepseek: Optional[BackendSettings] = Field(
78
+ default_factory=BackendSettings, description="Deepseek models settings."
79
+ )
80
+ gemini: Optional[BackendSettings] = Field(default_factory=BackendSettings, description="Gemini models settings.")
81
+ groq: Optional[BackendSettings] = Field(default_factory=BackendSettings, description="Groq models settings.")
82
+ local: Optional[BackendSettings] = Field(default_factory=BackendSettings, description="Local models settings.")
83
+ minimax: Optional[BackendSettings] = Field(default_factory=BackendSettings, description="Minimax models settings.")
84
+ mistral: Optional[BackendSettings] = Field(default_factory=BackendSettings, description="Mistral models settings.")
85
+ moonshot: Optional[BackendSettings] = Field(
86
+ default_factory=BackendSettings, description="Moonshot models settings."
87
+ )
88
+ openai: Optional[BackendSettings] = Field(default_factory=BackendSettings, description="OpenAI models settings.")
89
+ qwen: Optional[BackendSettings] = Field(default_factory=BackendSettings, description="Qwen models settings.")
90
+ yi: Optional[BackendSettings] = Field(default_factory=BackendSettings, description="Yi models settings.")
91
+ zhipuai: Optional[BackendSettings] = Field(default_factory=BackendSettings, description="Zhipuai models settings.")
92
+ baichuan: Optional[BackendSettings] = Field(
93
+ default_factory=BackendSettings, description="Baichuan models settings."
94
+ )
95
+ stepfun: Optional[BackendSettings] = Field(default_factory=BackendSettings, description="StepFun models settings.")
96
+ xai: Optional[BackendSettings] = Field(default_factory=BackendSettings, description="XAI models settings.")
97
+
61
98
  def __init__(self, **data):
62
99
  model_types = {
63
100
  "anthropic": defs.ANTHROPIC_MODELS,
@@ -74,15 +111,30 @@ class Settings(BaseModel):
74
111
  "zhipuai": defs.ZHIPUAI_MODELS,
75
112
  "baichuan": defs.BAICHUAN_MODELS,
76
113
  "stepfun": defs.STEPFUN_MODELS,
114
+ "xai": defs.XAI_MODELS,
77
115
  }
78
116
 
117
+ version = data.get("VERSION")
118
+
119
+ # If V2 format, model configs are in the backends dictionary
120
+ if version == "2":
121
+ if "backends" not in data:
122
+ raise ValueError("backends is required in V2 format.")
123
+
124
+ backends = data["backends"]
125
+ else:
126
+ backends = data
127
+ if len(data) > 0:
128
+ warnings.warn("You're using vectorvein's deprecated V1 format. Please use V2 format.")
129
+
79
130
  for model_type, default_models in model_types.items():
80
- if model_type in data:
131
+ if model_type in backends:
81
132
  model_settings = BackendSettings()
82
- model_settings.update_models(default_models, data[model_type].get("models", {}))
83
- data[model_type] = model_settings
133
+ user_models = backends[model_type].get("models", {})
134
+ model_settings.update_models(default_models, user_models)
135
+ backends[model_type] = model_settings
84
136
  else:
85
- data[model_type] = BackendSettings(models=default_models)
137
+ backends[model_type] = BackendSettings(models=default_models)
86
138
 
87
139
  for endpoint in data.get("endpoints", []):
88
140
  if not endpoint.get("api_base"):
@@ -94,7 +146,7 @@ class Settings(BaseModel):
94
146
 
95
147
  super().__init__(**data)
96
148
 
97
- def load(self, settings_dict: Dict):
149
+ def load(self, settings_dict: SettingsDict):
98
150
  self.__init__(**settings_dict)
99
151
 
100
152
  def get_endpoint(self, endpoint_id: str) -> EndpointSetting:
@@ -104,7 +156,14 @@ class Settings(BaseModel):
104
156
  raise ValueError(f"Endpoint {endpoint_id} not found.")
105
157
 
106
158
  def get_backend(self, backend: BackendType) -> BackendSettings:
107
- return getattr(self, backend.value.lower())
159
+ backend_name = backend.value.lower()
160
+
161
+ # Use VERSION 2 format backends field first
162
+ if self.VERSION == "2" and self.backends is not None:
163
+ return getattr(self.backends, backend_name)
164
+
165
+ # Compatible with VERSION 1 format
166
+ return getattr(self, backend_name)
108
167
 
109
168
 
110
169
  settings = Settings()
@@ -48,7 +48,7 @@ class EndpointOptionDict(TypedDict):
48
48
  class ModelConfigDict(TypedDict):
49
49
  """TypedDict representing the model configuration structure."""
50
50
 
51
- id: NotRequired[str]
51
+ id: str
52
52
  endpoints: List[Union[str, EndpointOptionDict]]
53
53
  function_call_available: NotRequired[bool]
54
54
  response_format_available: NotRequired[bool]
@@ -81,13 +81,38 @@ class EndpointSettingDict(TypedDict):
81
81
  proxy: NotRequired[str]
82
82
 
83
83
 
84
+ class BackendsDict(TypedDict):
85
+ """TypedDict representing all model backends in a single dictionary."""
86
+
87
+ anthropic: NotRequired[BackendSettingsDict]
88
+ deepseek: NotRequired[BackendSettingsDict]
89
+ gemini: NotRequired[BackendSettingsDict]
90
+ groq: NotRequired[BackendSettingsDict]
91
+ local: NotRequired[BackendSettingsDict]
92
+ minimax: NotRequired[BackendSettingsDict]
93
+ mistral: NotRequired[BackendSettingsDict]
94
+ moonshot: NotRequired[BackendSettingsDict]
95
+ openai: NotRequired[BackendSettingsDict]
96
+ qwen: NotRequired[BackendSettingsDict]
97
+ yi: NotRequired[BackendSettingsDict]
98
+ zhipuai: NotRequired[BackendSettingsDict]
99
+ baichuan: NotRequired[BackendSettingsDict]
100
+ stepfun: NotRequired[BackendSettingsDict]
101
+ xai: NotRequired[BackendSettingsDict]
102
+
103
+
84
104
  class SettingsDict(TypedDict):
85
105
  """TypedDict representing the expected structure of the settings dictionary."""
86
106
 
107
+ VERSION: NotRequired[str]
87
108
  endpoints: List[EndpointSettingDict]
88
109
  token_server: NotRequired[ServerDict]
89
110
  rate_limit: NotRequired[RateLimitConfigDict]
90
- # 各模型后端配置
111
+
112
+ # V2 format: all model backend configs in a single dictionary
113
+ backends: NotRequired[BackendsDict]
114
+
115
+ # V1 format: each model backend config
91
116
  anthropic: NotRequired[BackendSettingsDict]
92
117
  deepseek: NotRequired[BackendSettingsDict]
93
118
  gemini: NotRequired[BackendSettingsDict]
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: vectorvein
3
- Version: 0.2.12
3
+ Version: 0.2.13
4
4
  Summary: VectorVein python SDK
5
5
  Author-Email: Anderson <andersonby@163.com>
6
6
  License: MIT
@@ -1,6 +1,6 @@
1
- vectorvein-0.2.12.dist-info/METADATA,sha256=tceoZCMvKLrKIQMxR0iZlKF1WHIHxgYS5Kqmozm0b8U,4414
2
- vectorvein-0.2.12.dist-info/WHEEL,sha256=thaaA2w1JzcGC48WYufAs8nrYZjJm8LqNfnXFOFyCC4,90
3
- vectorvein-0.2.12.dist-info/entry_points.txt,sha256=6OYgBcLyFCUgeqLgnvMyOJxPCWzgy7se4rLPKtNonMs,34
1
+ vectorvein-0.2.13.dist-info/METADATA,sha256=sEZT2h2XwDUIkIvdKzXomeSuniLUSr-HWlj88MmHRew,4414
2
+ vectorvein-0.2.13.dist-info/WHEEL,sha256=thaaA2w1JzcGC48WYufAs8nrYZjJm8LqNfnXFOFyCC4,90
3
+ vectorvein-0.2.13.dist-info/entry_points.txt,sha256=6OYgBcLyFCUgeqLgnvMyOJxPCWzgy7se4rLPKtNonMs,34
4
4
  vectorvein/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
5
5
  vectorvein/api/__init__.py,sha256=lfY-XA46fgD2iIZTU0VYP8i07AwA03Egj4Qua0vUKrQ,738
6
6
  vectorvein/api/client.py,sha256=xF-leKDQzVyyy9FnIRaz0k4eElYW1XbbzeRLcpnyk90,33047
@@ -28,7 +28,7 @@ vectorvein/chat_clients/yi_client.py,sha256=RNf4CRuPJfixrwLZ3-DEc3t25QDe1mvZeb9s
28
28
  vectorvein/chat_clients/zhipuai_client.py,sha256=Ys5DSeLCuedaDXr3PfG1EW2zKXopt-awO2IylWSwY0s,519
29
29
  vectorvein/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
30
30
  vectorvein/server/token_server.py,sha256=36F9PKSNOX8ZtYBXY_l-76GQTpUSmQ2Y8EMy1H7wtdQ,1353
31
- vectorvein/settings/__init__.py,sha256=ecGyrE_6YfX9z6Igb1rDCu1Q-qMTcVozWF3WEl_hiKA,4871
31
+ vectorvein/settings/__init__.py,sha256=oBOLwG61RN9dq012Bgq3OfZyxyrJ5j0BYNmHbThhpjo,8097
32
32
  vectorvein/settings/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
33
33
  vectorvein/types/__init__.py,sha256=ie7H3rTMq_Fg836vOmy96m3wzjDkqfekQecPXXEDbcM,3005
34
34
  vectorvein/types/defaults.py,sha256=VrkQoyHqC_eK3g1b6egpPYLLo0ltwMHqxDscCX4y-N0,27417
@@ -36,7 +36,7 @@ vectorvein/types/enums.py,sha256=7KTJSVtQueImmbr1fSwv3rQVtc0RyMWXJmoE2tDOaso,166
36
36
  vectorvein/types/exception.py,sha256=gnW4GnJ76jND6UGnodk9xmqkcbeS7Cz2rvncA2HpD5E,69
37
37
  vectorvein/types/llm_parameters.py,sha256=4SxDbJKVb9oGYymyxQtNZ66YZmUQd9_CpYYg81_Inkk,7650
38
38
  vectorvein/types/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
39
- vectorvein/types/settings.py,sha256=71D94qqG4PVY59NDp1M29ZGcINOByilGcnCrNs5rbX8,3214
39
+ vectorvein/types/settings.py,sha256=096mcaqtuZYCeJQYzF4ILqrLp_z5-upJAxwLrRZ5WQk,4165
40
40
  vectorvein/utilities/media_processing.py,sha256=7KtbLFzOYIn1e9QTN9G6C76NH8CBlV9kfAgiRKEIeXY,6263
41
41
  vectorvein/utilities/rate_limiter.py,sha256=dwolIUVw2wP83Odqpx0AAaE77de1GzxkYDGH4tM_u_4,10300
42
42
  vectorvein/utilities/retry.py,sha256=6KFS9R2HdhqM3_9jkjD4F36ZSpEx2YNFGOVlpOsUetM,2208
@@ -61,4 +61,4 @@ vectorvein/workflow/nodes/vector_db.py,sha256=t6I17q6iR3yQreiDHpRrksMdWDPIvgqJs0
61
61
  vectorvein/workflow/nodes/video_generation.py,sha256=qmdg-t_idpxq1veukd-jv_ChICMOoInKxprV9Z4Vi2w,4118
62
62
  vectorvein/workflow/nodes/web_crawlers.py,sha256=LsqomfXfqrXfHJDO1cl0Ox48f4St7X_SL12DSbAMSOw,5415
63
63
  vectorvein/workflow/utils/json_to_code.py,sha256=F7dhDy8kGc8ndOeihGLRLGFGlquoxVlb02ENtxnQ0C8,5914
64
- vectorvein-0.2.12.dist-info/RECORD,,
64
+ vectorvein-0.2.13.dist-info/RECORD,,