vectorvein 0.2.13__py3-none-any.whl → 0.2.15__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,7 @@
1
1
  # @Author: Bi Ying
2
2
  # @Date: 2024-07-27 00:30:56
3
3
  import warnings
4
+ from copy import deepcopy
4
5
  from typing import List, Optional, Literal
5
6
 
6
7
  from pydantic import BaseModel, Field
@@ -59,7 +60,7 @@ class Backends(BaseModel):
59
60
 
60
61
  class Settings(BaseModel):
61
62
  VERSION: Optional[str] = Field(
62
- default=None, description="Configuration version. If provided, will use the corresponding format."
63
+ default="2", description="Configuration version. If provided, will use the corresponding format."
63
64
  )
64
65
  endpoints: List[EndpointSetting] = Field(
65
66
  default_factory=list, description="Available endpoints for the LLM service."
@@ -114,8 +115,14 @@ class Settings(BaseModel):
114
115
  "xai": defs.XAI_MODELS,
115
116
  }
116
117
 
118
+ data = deepcopy(data)
119
+
117
120
  version = data.get("VERSION")
118
121
 
122
+ if len(data) == 0:
123
+ version = "2"
124
+ data["backends"] = {}
125
+
119
126
  # If V2 format, model configs are in the backends dictionary
120
127
  if version == "2":
121
128
  if "backends" not in data:
@@ -124,13 +131,12 @@ class Settings(BaseModel):
124
131
  backends = data["backends"]
125
132
  else:
126
133
  backends = data
127
- if len(data) > 0:
128
- warnings.warn("You're using vectorvein's deprecated V1 format. Please use V2 format.")
134
+ warnings.warn("You're using vectorvein's deprecated V1 format. Please use V2 format.")
129
135
 
130
136
  for model_type, default_models in model_types.items():
131
137
  if model_type in backends:
132
- model_settings = BackendSettings()
133
138
  user_models = backends[model_type].get("models", {})
139
+ model_settings = BackendSettings()
134
140
  model_settings.update_models(default_models, user_models)
135
141
  backends[model_type] = model_settings
136
142
  else:
@@ -149,6 +155,10 @@ class Settings(BaseModel):
149
155
  def load(self, settings_dict: SettingsDict):
150
156
  self.__init__(**settings_dict)
151
157
 
158
+ @classmethod
159
+ def load_from_dict(cls, settings_dict: SettingsDict):
160
+ return cls(**settings_dict)
161
+
152
162
  def get_endpoint(self, endpoint_id: str) -> EndpointSetting:
153
163
  for endpoint in self.endpoints:
154
164
  if endpoint.id == endpoint_id:
@@ -165,5 +175,29 @@ class Settings(BaseModel):
165
175
  # Compatible with VERSION 1 format
166
176
  return getattr(self, backend_name)
167
177
 
178
+ def export(self):
179
+ if self.VERSION == "2":
180
+ return super().model_dump(
181
+ exclude={
182
+ "anthropic",
183
+ "deepseek",
184
+ "gemini",
185
+ "groq",
186
+ "local",
187
+ "minimax",
188
+ "mistral",
189
+ "moonshot",
190
+ "openai",
191
+ "qwen",
192
+ "yi",
193
+ "zhipuai",
194
+ "baichuan",
195
+ "stepfun",
196
+ "xai",
197
+ }
198
+ )
199
+ else:
200
+ return super().model_dump(exclude={"backends"})
201
+
168
202
 
169
203
  settings = Settings()
@@ -61,8 +61,13 @@ from .settings import (
61
61
  BackendSettingsDict,
62
62
  EndpointSettingDict,
63
63
  SettingsDict,
64
+ SettingsV1Dict,
65
+ SettingsV2Dict,
64
66
  )
65
67
 
68
+ from anthropic.types import ThinkingConfigParam, ThinkingConfigEnabledParam
69
+ from openai.types.chat.completion_create_params import ResponseFormat
70
+
66
71
 
67
72
  __all__ = [
68
73
  "CONTEXT_LENGTH_CONTROL",
@@ -126,4 +131,9 @@ __all__ = [
126
131
  "BackendSettingsDict",
127
132
  "EndpointSettingDict",
128
133
  "SettingsDict",
134
+ "SettingsV1Dict",
135
+ "SettingsV2Dict",
136
+ "ThinkingConfigParam",
137
+ "ThinkingConfigEnabledParam",
138
+ "ResponseFormat",
129
139
  ]
@@ -176,62 +176,6 @@ GROQ_MODELS: Final[Dict[str, Dict[str, Any]]] = {
176
176
  # Qwen models
177
177
  QWEN_DEFAULT_MODEL: Final[str] = "qwen2.5-72b-instruct"
178
178
  QWEN_MODELS: Final[Dict[str, Dict[str, Any]]] = {
179
- "qwen1.5-1.8b-chat": {
180
- "id": "qwen1.5-1.8b-chat",
181
- "context_length": 30000,
182
- "max_output_tokens": 4096,
183
- "function_call_available": False,
184
- "response_format_available": True,
185
- },
186
- "qwen1.5-4b-chat": {
187
- "id": "qwen1.5-4b-chat",
188
- "context_length": 30000,
189
- "max_output_tokens": 4096,
190
- "function_call_available": False,
191
- "response_format_available": True,
192
- },
193
- "qwen1.5-7b-chat": {
194
- "id": "qwen1.5-7b-chat",
195
- "context_length": 30000,
196
- "max_output_tokens": 4096,
197
- "function_call_available": False,
198
- "response_format_available": True,
199
- },
200
- "qwen1.5-14b-chat": {
201
- "id": "qwen1.5-14b-chat",
202
- "context_length": 30000,
203
- "max_output_tokens": 4096,
204
- "function_call_available": False,
205
- "response_format_available": True,
206
- },
207
- "qwen1.5-32b-chat": {
208
- "id": "qwen1.5-32b-chat",
209
- "context_length": 30000,
210
- "max_output_tokens": 4096,
211
- "function_call_available": False,
212
- "response_format_available": True,
213
- },
214
- "qwen1.5-72b-chat": {
215
- "id": "qwen1.5-72b-chat",
216
- "context_length": 30000,
217
- "max_output_tokens": 4096,
218
- "function_call_available": False,
219
- "response_format_available": True,
220
- },
221
- "qwen1.5-110b-chat": {
222
- "id": "qwen1.5-110b-chat",
223
- "context_length": 30000,
224
- "max_output_tokens": 4096,
225
- "function_call_available": False,
226
- "response_format_available": True,
227
- },
228
- "qwen2-72b-instruct": {
229
- "id": "qwen2-72b-instruct",
230
- "context_length": 30000,
231
- "max_output_tokens": 4096,
232
- "function_call_available": False,
233
- "response_format_available": True,
234
- },
235
179
  "qwen2.5-7b-instruct": {
236
180
  "id": "qwen2.5-7b-instruct",
237
181
  "context_length": 131072,
@@ -282,6 +226,30 @@ QWEN_MODELS: Final[Dict[str, Dict[str, Any]]] = {
282
226
  "response_format_available": False,
283
227
  "native_multimodal": True,
284
228
  },
229
+ "qwen2.5-vl-72b-instruct": {
230
+ "id": "qwen2.5-vl-72b-instruct",
231
+ "context_length": 131072,
232
+ "max_output_tokens": 8192,
233
+ "function_call_available": False,
234
+ "response_format_available": False,
235
+ "native_multimodal": True,
236
+ },
237
+ "qwen2.5-vl-7b-instruct": {
238
+ "id": "qwen2.5-vl-7b-instruct",
239
+ "context_length": 131072,
240
+ "max_output_tokens": 8192,
241
+ "function_call_available": False,
242
+ "response_format_available": False,
243
+ "native_multimodal": True,
244
+ },
245
+ "qwen2.5-vl-3b-instruct": {
246
+ "id": "qwen2.5-vl-3b-instruct",
247
+ "context_length": 131072,
248
+ "max_output_tokens": 8192,
249
+ "function_call_available": False,
250
+ "response_format_available": False,
251
+ "native_multimodal": True,
252
+ },
285
253
  "qwen-max": {
286
254
  "id": "qwen-max",
287
255
  "context_length": 8096,
@@ -101,17 +101,13 @@ class BackendsDict(TypedDict):
101
101
  xai: NotRequired[BackendSettingsDict]
102
102
 
103
103
 
104
- class SettingsDict(TypedDict):
104
+ class SettingsV1Dict(TypedDict):
105
105
  """TypedDict representing the expected structure of the settings dictionary."""
106
106
 
107
- VERSION: NotRequired[str]
108
107
  endpoints: List[EndpointSettingDict]
109
108
  token_server: NotRequired[ServerDict]
110
109
  rate_limit: NotRequired[RateLimitConfigDict]
111
110
 
112
- # V2 format: all model backend configs in a single dictionary
113
- backends: NotRequired[BackendsDict]
114
-
115
111
  # V1 format: each model backend config
116
112
  anthropic: NotRequired[BackendSettingsDict]
117
113
  deepseek: NotRequired[BackendSettingsDict]
@@ -128,3 +124,18 @@ class SettingsDict(TypedDict):
128
124
  baichuan: NotRequired[BackendSettingsDict]
129
125
  stepfun: NotRequired[BackendSettingsDict]
130
126
  xai: NotRequired[BackendSettingsDict]
127
+
128
+
129
+ class SettingsV2Dict(TypedDict):
130
+ """TypedDict representing the expected structure of the settings dictionary."""
131
+
132
+ VERSION: NotRequired[str]
133
+ endpoints: List[EndpointSettingDict]
134
+ token_server: NotRequired[ServerDict]
135
+ rate_limit: NotRequired[RateLimitConfigDict]
136
+
137
+ # V2 format: all model backend configs in a single dictionary
138
+ backends: NotRequired[BackendsDict]
139
+
140
+
141
+ SettingsDict = Union[SettingsV1Dict, SettingsV2Dict]
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: vectorvein
3
- Version: 0.2.13
3
+ Version: 0.2.15
4
4
  Summary: VectorVein python SDK
5
5
  Author-Email: Anderson <andersonby@163.com>
6
6
  License: MIT
@@ -1,6 +1,6 @@
1
- vectorvein-0.2.13.dist-info/METADATA,sha256=sEZT2h2XwDUIkIvdKzXomeSuniLUSr-HWlj88MmHRew,4414
2
- vectorvein-0.2.13.dist-info/WHEEL,sha256=thaaA2w1JzcGC48WYufAs8nrYZjJm8LqNfnXFOFyCC4,90
3
- vectorvein-0.2.13.dist-info/entry_points.txt,sha256=6OYgBcLyFCUgeqLgnvMyOJxPCWzgy7se4rLPKtNonMs,34
1
+ vectorvein-0.2.15.dist-info/METADATA,sha256=meMymU-CMwxurlkRGFjRPsQZkrSEFDLy8kfns0k7OZQ,4414
2
+ vectorvein-0.2.15.dist-info/WHEEL,sha256=thaaA2w1JzcGC48WYufAs8nrYZjJm8LqNfnXFOFyCC4,90
3
+ vectorvein-0.2.15.dist-info/entry_points.txt,sha256=6OYgBcLyFCUgeqLgnvMyOJxPCWzgy7se4rLPKtNonMs,34
4
4
  vectorvein/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
5
5
  vectorvein/api/__init__.py,sha256=lfY-XA46fgD2iIZTU0VYP8i07AwA03Egj4Qua0vUKrQ,738
6
6
  vectorvein/api/client.py,sha256=xF-leKDQzVyyy9FnIRaz0k4eElYW1XbbzeRLcpnyk90,33047
@@ -28,15 +28,15 @@ vectorvein/chat_clients/yi_client.py,sha256=RNf4CRuPJfixrwLZ3-DEc3t25QDe1mvZeb9s
28
28
  vectorvein/chat_clients/zhipuai_client.py,sha256=Ys5DSeLCuedaDXr3PfG1EW2zKXopt-awO2IylWSwY0s,519
29
29
  vectorvein/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
30
30
  vectorvein/server/token_server.py,sha256=36F9PKSNOX8ZtYBXY_l-76GQTpUSmQ2Y8EMy1H7wtdQ,1353
31
- vectorvein/settings/__init__.py,sha256=oBOLwG61RN9dq012Bgq3OfZyxyrJ5j0BYNmHbThhpjo,8097
31
+ vectorvein/settings/__init__.py,sha256=x1V0zHMLmafoqpLlEr4_LSZ0nxQj_LDoGRB1pKUiMO4,9030
32
32
  vectorvein/settings/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
33
- vectorvein/types/__init__.py,sha256=ie7H3rTMq_Fg836vOmy96m3wzjDkqfekQecPXXEDbcM,3005
34
- vectorvein/types/defaults.py,sha256=VrkQoyHqC_eK3g1b6egpPYLLo0ltwMHqxDscCX4y-N0,27417
33
+ vectorvein/types/__init__.py,sha256=DJYGhlshgUQgzXPfMfKW5sTpBClZUCLhqmCqF44lVuU,3329
34
+ vectorvein/types/defaults.py,sha256=hdCEY1bEPmPbsh335bXd4LWMeJXLWD3_EulURaWs4EU,26430
35
35
  vectorvein/types/enums.py,sha256=7KTJSVtQueImmbr1fSwv3rQVtc0RyMWXJmoE2tDOaso,1667
36
36
  vectorvein/types/exception.py,sha256=gnW4GnJ76jND6UGnodk9xmqkcbeS7Cz2rvncA2HpD5E,69
37
37
  vectorvein/types/llm_parameters.py,sha256=4SxDbJKVb9oGYymyxQtNZ66YZmUQd9_CpYYg81_Inkk,7650
38
38
  vectorvein/types/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
39
- vectorvein/types/settings.py,sha256=096mcaqtuZYCeJQYzF4ILqrLp_z5-upJAxwLrRZ5WQk,4165
39
+ vectorvein/types/settings.py,sha256=B8eVJsJRrv-op8Enn-Rbour2b9vD01TLXz3xlXEF9vo,4485
40
40
  vectorvein/utilities/media_processing.py,sha256=7KtbLFzOYIn1e9QTN9G6C76NH8CBlV9kfAgiRKEIeXY,6263
41
41
  vectorvein/utilities/rate_limiter.py,sha256=dwolIUVw2wP83Odqpx0AAaE77de1GzxkYDGH4tM_u_4,10300
42
42
  vectorvein/utilities/retry.py,sha256=6KFS9R2HdhqM3_9jkjD4F36ZSpEx2YNFGOVlpOsUetM,2208
@@ -61,4 +61,4 @@ vectorvein/workflow/nodes/vector_db.py,sha256=t6I17q6iR3yQreiDHpRrksMdWDPIvgqJs0
61
61
  vectorvein/workflow/nodes/video_generation.py,sha256=qmdg-t_idpxq1veukd-jv_ChICMOoInKxprV9Z4Vi2w,4118
62
62
  vectorvein/workflow/nodes/web_crawlers.py,sha256=LsqomfXfqrXfHJDO1cl0Ox48f4St7X_SL12DSbAMSOw,5415
63
63
  vectorvein/workflow/utils/json_to_code.py,sha256=F7dhDy8kGc8ndOeihGLRLGFGlquoxVlb02ENtxnQ0C8,5914
64
- vectorvein-0.2.13.dist-info/RECORD,,
64
+ vectorvein-0.2.15.dist-info/RECORD,,